[a-zA-Z0-9]{64})/$', views.ActiveView.as_view()),\n url(r'^account/(?Plogout)/$', views.AccountView.as_view()),\n url(r'^account/(?Plogin)/$', views.AccountView.as_view()),\n]\n","sub_path":"useless/python/headquaters/registration/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"89630682","text":"# -*- coding: utf-8 -*-\nimport os, sys\n\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\nsys.path.append(os.path.join(os.path.split(os.path.realpath(__file__))[0], '../util'))\nimport loghelper, util, db, config, name_helper\n\n#logger\nloghelper.init_logger(\"patch_user_wxheadimgurl\", stream=True)\nlogger = loghelper.get_logger(\"patch_user_wxheadimgurl\")\n\nconn = None\n\n\ndef main():\n conn = db.connect_torndb()\n users = conn.query(\"select * from user where wxheadimgurl is null\")\n for user in users:\n uw = conn.get(\"select * from user_wechat where userId=%s and headimgurl is not null order by id desc limit 1\", user[\"id\"])\n if uw is not None:\n conn.update(\"update user set wxheadimgurl=%s where id=%s\", uw[\"headimgurl\"], user[\"id\"])\n conn.close()\n\n\ndef main2():\n conn = db.connect_torndb()\n users = conn.query(\"select * from user where username is not null and username=phone\")\n for user in users:\n uw = conn.get(\"select * from user_wechat where userId=%s and headimgurl is not null order by id desc limit 1\", user[\"id\"])\n if uw is not None:\n conn.update(\"update user set username=%s where id=%s\", uw[\"nickname\"], user[\"id\"])\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()\n main2()","sub_path":"data/patch/patch_user_wxheadimgurl.py","file_name":"patch_user_wxheadimgurl.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"188668528","text":"import sys\nimport sparql_query\nimport json\nimport glob\nimport os\nimport networkx as nx\n# import matplotlib.pyplot as plt\nimport matplotlib\nmatplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!\nimport matplotlib.pyplot as plt\n\n\n_TYPE_URI = ''\n\n\ndef get_type(uri, endpoint_url):\n try:\n result = sparql_query.query(\n 'select distinct ?type where {' + uri + ' ' + _TYPE_URI + ' ?type}', endpoint_url)\n except:\n return None\n else:\n json_result = json.loads(result)['results']['bindings']\n if json_result:\n return json_result\n return None\n\n\n# @TODO Escribir la funcion que hace una consulta para sacar los\n# tipos mencionados en el endpoint y devolverlos como un set.\ndef get_types(endpoint):\n json_results = get_type('[]', endpoint)\n types = []\n for result in json_results:\n types.append(result['type']['value'])\n return set(types)\n\n\n# @TODO Escribir la funcion que con una consulta retorne\n# la cantidad de veces que aparece el tipo t en el endpoint.\ndef count_instances(endpoint_url, t):\n try:\n result = sparql_query.query(\n 'select distinct count(?s) where { ?s ' + _TYPE_URI + ' <' + t + '>}', endpoint_url)\n except:\n return None\n else:\n json_result = json.loads(result)['results']['bindings']\n if json_result:\n return int(json_result[0]['callret-0']['value'])\n return None\n\n\ndef generate_type_relation_stats(dataset, count_repetitions):\n\n path = '/media/data/logs/'\n # path = '/Users/cbuil/Proyectos/logs/data/logs_examples/'\n current_path = path + 'datasets/' + dataset + '/logs/'\n type_relation_maps = {}\n file_count = 0\n\n f_write = open(os.getcwd() + '/type_relation_map_graph_' + dataset + '_repeats_' + str(count_repetitions) + '.nt', 'w')\n for day_folder in sorted(glob.glob(current_path + 'relation_maps/*')):\n for user_file in glob.glob(day_folder + '/*.json'):\n with open(user_file, 'r') as f:\n lines = f.readlines()\n for line in lines:\n json_line = json.loads(line)\n repetitions = json_line['repetitions']\n type_relation_map = json_line['type_relation_map']\n for elements in type_relation_map:\n if 'openlink' not in elements[0] and 'openlink' not in elements[1] and 'openlink' not in elements[2]:\n f_write.write(str(elements[0]) + ' ' + str(elements[1]) + ' ' + str(elements[2]) + ' . \\n')\n if '?' in elements[0]:\n elements[0] = '_:s' # + str(file_count)\n if '?' in elements[1]:\n elements[1] = '_:p' # + str(file_count)\n if '?' in elements[2]:\n elements[2] = '_:o' # + str(file_count)\n if count_repetitions:\n type_relation_maps[tuple(elements)] = type_relation_maps.get(tuple(elements), 0) + int(repetitions)\n else:\n type_relation_maps[tuple(elements)] = type_relation_maps.get(tuple(elements), 0) + 1\n file_count += 1\n f_write.close()\n\n graph = nx.DiGraph()\n f_write = open('Type_relation_maps_stats_' + dataset + '_repeats_' + str(count_repetitions) + '.txt', 'w')\n with open('type_relation_map_graph_' + dataset + '_repeats_' + str(count_repetitions) + '_distinct.nt', 'w') as f_write_1:\n for type_relation_map in sorted(type_relation_maps, key=lambda type_relation_map: type_relation_maps[type_relation_map], reverse=True):\n if type_relation_maps[type_relation_map] > 10:\n graph.add_node(type_relation_map[0])\n graph.add_node(type_relation_map[2])\n graph.add_edge(type_relation_map[0], type_relation_map[2], weight=type_relation_maps[type_relation_map], label=type_relation_map[1])\n print(json.dumps(type_relation_map) + ' ' + str(type_relation_maps[type_relation_map]))\n f_write_1.write(json.dumps(type_relation_map[0]) + ' ' + json.dumps(type_relation_map[1]) + ' ' + json.dumps(type_relation_map[2]) + ' . \\n')\n # for element in type_relation_map:\n f_write.write(str(type_relation_map) + ' ' + str(type_relation_maps[type_relation_map]) + '\\n')\n # print(str(type_relation_map) + ' ' + str(type_relation_maps[type_relation_map]))\n f_write.close()\n nx.draw(graph)\n plt.savefig(\"graph.png\")\n\n\nif __name__ == '__main__':\n\n # Para ver que esta funcionando usamos un endpoint.\n # Si todo anda bien basta con eliminar el [:1] y dejarlo corriendo\n if len(sys.argv) == 1:\n print('usage: python type_stats.py ')\n else:\n generate_type_relation_stats(sys.argv[1], False)\n","sub_path":"src/statistics/type_relation_map_stats.py","file_name":"type_relation_map_stats.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"632467250","text":"from click.testing import CliRunner\nfrom unittest import mock\nimport json\n\nimport pytest\n\nfrom mapbox_tilesets.scripts.cli import (\n add_source,\n view_source,\n delete_source,\n validate_source,\n list_sources,\n)\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.post\")\ndef test_cli_add_source(mock_request_post, MockResponse):\n okay_response = {\"id\": \"mapbox://tileset-source/test-user/hello-world\"}\n mock_request_post.return_value = MockResponse(okay_response, status_code=200)\n runner = CliRunner()\n validated_result = runner.invoke(\n add_source, [\"test-user\", \"hello-world\", \"tests/fixtures/valid.ldgeojson\"]\n )\n assert validated_result.exit_code == 0\n\n assert (\n validated_result.output\n == \"\"\"{\"id\": \"mapbox://tileset-source/test-user/hello-world\"}\\n\"\"\"\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.post\")\ndef test_cli_add_source_no_validation(mock_request_post, MockResponse):\n error_response = {\n \"message\": \"Invalid file format. Only GeoJSON features are allowed.\"\n }\n mock_request_post.return_value = MockResponse(error_response, status_code=400)\n runner = CliRunner()\n no_validation_result = runner.invoke(\n add_source,\n [\n \"test-user\",\n \"hello-again\",\n \"tests/fixtures/invalid.ldgeojson\",\n \"--no-validation\",\n ],\n )\n assert no_validation_result.exit_code == 1\n\n assert (\n no_validation_result.exception.message\n == '{\"message\": \"Invalid file format. Only GeoJSON features are allowed.\"}'\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.get\")\ndef test_cli_view_source(mock_request_get, MockResponse):\n message = {\"id\": \"mapbox://tileset-source/test-user/hello-world\"}\n mock_request_get.return_value = MockResponse(message, status_code=200)\n runner = CliRunner()\n result = runner.invoke(view_source, [\"test-user\", \"hello-world\"])\n\n assert result.exit_code == 0\n assert json.loads(result.output) == message\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.delete\")\ndef test_cli_delete_source(mock_request_delete, MockResponse):\n mock_request_delete.return_value = MockResponse(\"\", status_code=204)\n runner = CliRunner()\n result = runner.invoke(delete_source, [\"test-user\", \"hello-world\"], input=\"y\")\n assert result.exit_code == 0\n assert (\n result.output\n == \"Are you sure you want to delete test-user hello-world? [y/N]: y\\nSource deleted.\\n\"\n )\n force_result = runner.invoke(delete_source, [\"test-user\", \"hello-world\", \"--force\"])\n assert force_result.exit_code == 0\n assert force_result.output == \"Source deleted.\\n\"\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.delete\")\ndef test_cli_delete_source_aborted(mock_request_delete, MockResponse):\n mock_request_delete.return_value = MockResponse(\"\", status_code=201)\n runner = CliRunner()\n result = runner.invoke(delete_source, [\"test-user\", \"hello-world\"], input=\"n\")\n assert result.exit_code == 1\n assert (\n result.output\n == \"Are you sure you want to delete test-user hello-world? [y/N]: n\\nAborted!\\n\"\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\n@mock.patch(\"requests.get\")\ndef test_cli_view_source_2(mock_request_get, MockResponse):\n message = [\n {\"id\": \"mapbox://tileset-source/test-user/hello-world\"},\n {\"id\": \"mapbox://tileset-source/test-user/hola-mundo\"},\n ]\n mock_request_get.return_value = MockResponse(message, status_code=200)\n runner = CliRunner()\n result = runner.invoke(list_sources, [\"test-user\"])\n\n assert result.exit_code == 0\n assert (\n result.output\n == \"mapbox://tileset-source/test-user/hello-world\\nmapbox://tileset-source/test-user/hola-mundo\\n\"\n )\n\n\n@pytest.mark.usefixtures(\"token_environ\")\ndef test_cli_validate_source():\n runner = CliRunner()\n result = runner.invoke(validate_source, [\"tests/fixtures/valid.ldgeojson\"])\n assert result.exit_code == 0\n assert result.output == \"Validating features\\n✔ valid\\n\"\n","sub_path":"tests/test_cli_sources.py","file_name":"test_cli_sources.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"34117549","text":"from setuptools import setup\n\nns = \"eregs_ns.parser\" # The namespace for regulations-parser extensions.\nfs = \"atf_regparser\" # The directory name for the package.\nentry_points = {\n \"%s.preprocessors\" % ns: [\n \"USCode = %s.preprocs:USCode\" % fs\n ],\n \"%s.test_suite\" % ns: [\n \"testsuite = %s.tests\" % fs\n ]\n}\n\nsetup(\n name=fs,\n version=\"1.0.0\",\n packages=[fs],\n classifiers=[\n 'License :: Public Domain',\n 'License :: CC0 1.0 Universal (CC0 1.0) Public Domain Dedication'\n ],\n entry_points=entry_points\n)\n","sub_path":"eregs_extensions/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"207304972","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom jinja2 import Template\nfrom sqlalchemy.orm import sessionmaker\nfrom models import DB_ENGINE, Products\n# from googletrans import Translator\n\n# t = Translator()\nsess = sessionmaker(DB_ENGINE)\nsession = sess()\nprice = open('template.html').read()\ntemplate = Template(price)\n\n\ndef create_category_price(categories=[]):\n for cat in categories:\n arr = cat.split(\"|\")\n cat_name = arr[-1]\n products = session.query(Products).order_by(Products.name).filter(Products.category==cat).filter(Products.instock==True).limit(1000).all()\n if len(products)>20:\n print(cat)\n data = dict(cat_name=cat_name, products=products)\n out = template.render(data=data)\n with open(\"{}.html\".format(cat.replace('/','_').replace('|','_')), \"w\") as html:\n print(out, file=html)\n\ndef get_categories_list():\n cats = session.query(Products.category).distinct(Products.category).filter(Products.instock == True).all()\n cat_list = []\n for i in cats:\n cat_list.append(i[0])\n return cat_list\n\n\nif __name__ == '__main__':\n\n cats = get_categories_list()\n create_category_price(categories=cats)\n","sub_path":"price.py","file_name":"price.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"578558592","text":"#create services here\nfrom educationApp.models import Datas, Index\nfrom difflib import SequenceMatcher\nimport os\nimport educationApp.utils as utils\nfrom educationApp.tokenizer import Analyzer\nfrom educationApp.gensimUtils import Similar\nclass DataService:\n def __init__(self):\n pass\n\n def getDatas(self, name):\n datas = self.getDatasByIndex(name)\n return datas\n \n def getAllIndex(self, name):\n matcherService = NameMatcherService()\n allIndex = matcherService.getAllIndex(name)\n return allIndex\n\n def getDatasByIndex(self, index):\n dataName = index\n fileDatas = utils.readJsonFromTxtFile('/Users/liug/Documents/dataSearch/files/' + dataName + '.txt')\n dataNodes = fileDatas['returndata']['datanodes']\n nodesNames = [{'cname':cname['cname'], 'code':cname['code']} for cname in fileDatas['returndata']['wdnodes'][0]['nodes']]\n colName = []\n col2018 = []\n col2017 = []\n col2016 = []\n col2015 = []\n col2014 = []\n col2013 = []\n col2012 = []\n col2011 = []\n col2010 = []\n col2009 = []\n for nodeName in nodesNames:\n cname = nodeName['cname']\n code = nodeName['code']\n colName.append(cname)\n for dataNode in dataNodes:\n if(dataNode['code'] == 'zb.' + code + '_sj.2018'):\n col2018.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2017'):\n col2017.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2016'):\n col2016.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2015'):\n col2015.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2014'):\n col2014.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2013'):\n col2013.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2012'):\n col2012.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2011'):\n col2011.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2010'):\n col2010.append(dataNode['data']['data'])\n if(dataNode['code'] == 'zb.' + code + '_sj.2009'):\n col2009.append(dataNode['data']['data']) \n \n tableData = []\n row = 0\n for col in colName:\n data = {\"name\": col, \"2018\":col2018[row], \"2017\":col2017[row], \"2016\":col2016[row],\n \"2015\":col2015[row], \"2014\":col2014[row], \"2013\":col2013[row], \"2012\":col2012[row],\n \"2011\":col2011[row], \"2010\":col2010[row], \"2009\":col2009[row]}\n tableData.append(data)\n row = row + 1\n '''\n tableDatas = {'指标':colName,\n '2018': col2018,'2017':col2017,'2016':col2016,'2015':col2015,'2014':col2014,'2013':col2013,'2012':col2012,'2011':col2011,'2010':col2010,'2009':col2009}\n '''\n datas = Datas('', index)\n datas.setDatas(tableData)\n return datas\n \nclass NameMatcherService:\n def __init__(self):\n pass\n \n def createWordsCorpora(self):\n corpora = []\n for filename in os.listdir('/Users/liug/Documents/dataSearch/files'):\n if filename.endswith(\".txt\") :\n corpora.append(filename[:-4])\n return corpora\n \n def getIndex(self, nameForMatch):\n analyzer = Analyzer()\n segWords = analyzer.cutWords(nameForMatch)\n wordCorpora = self.createWordsCorpora()\n sim = Similar(Similar.EDUCATION, wordCorpora)\n similaries = sim.similary(nameForMatch)\n index = ''\n for documentNumber, score in sorted(enumerate(similaries), key=lambda x: x[1], reverse=True):\n index = wordCorpora[documentNumber]\n break\n return index\n \n def getAllIndex(self, nameForMatch):\n analyzer = Analyzer()\n segWords = analyzer.cutWords(nameForMatch)\n wordCorpora = self.createWordsCorpora()\n sim = Similar(Similar.EDUCATION, wordCorpora)\n similaries = sim.similary(nameForMatch)\n allIndex = []\n for documentNumber, score in sorted(enumerate(similaries), key=lambda x: x[1], reverse=True):\n index = wordCorpora[documentNumber]\n allIndex.append(index)\n return allIndex\n \n \n ","sub_path":"educationApp/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"637628843","text":"\"\"\"\ninstabot example\n\nworkflow:\n mention [@user] in comment section\n\"\"\"\nimport os\nimport sys\n\nsys.path.append(os.path.join(sys.path[0], '../'))\nfrom instabot import Bot\n\nparser = argparse.ArgumentParser(add_help=True)\nparser.add_argument('-u', type=str, help=\"username\")\nparser.add_argument('-p', type=str, help=\"password\")\nparser.add_argument('-proxy', type=str, help=\"proxy\")\nparser.add_argument('user', type=str, help='user')\nparser.add_argument('nfollowers', type=int, help='nfollowers')\nargs = parser.parse_args()\n\nbot = Bot()\nbot.login()\n\nuserID = bot.get_user_id_from_username(args.user)\nsomeones_followers = bot.api.get_total_followers_or_followings(userID,\n amount=args.nfollowers,\n filter_private=False,\n filter_business=False,\n filter_verified=False,\n usernames=True,)\n\nmedias = bot.get_your_medias()\nmedia_to_comment = medias[0]\n\nfor usr in someones_followers:\n comment = '@' + usr\n bot.api.comment(media_to_comment, comment)\n bot.console_print('{} media commented with text: {}'.format(media_to_comment, comment), 'green')\n bot.total['comments'] += 1\n bot.delay('comment')\n\n'''\n# get a list of users to mention and store in a text file\ninput(colored(\"what user followers do you want to scrape ? : \", 'red')) # scrape users followers\nwith open('someones_followers_scrape.txt', 'w') as file:\n file.write(someones_followers)\npages_to_scrape = bot.read_list_from_file(\"someones_followers_scrape.txt\") # reading passed \"someones followers to scrape\"\nf = open(\"scrappedFOLLOWERS.txt\", \"w\") # stored list of \"Someone's Followers\"\nfor follower in pages_to_scrape:\n users = bot.get_user_followers(follower,30)\nfor userfollowers in users:\n f.write(userfollowers + \"\\n\")\nprint(colored(\"\\n\" + \"successfully written Someone's Followers , to textfile scrappedFOLLOWERS.txt\", 'green'))\nf.close()\n\n# convert passed scrapped followers to usernames\n\n\nprint(colored(\"Converting scrappedFOLLOWERS.txt to usernames, MIGHT TAKE AWHILE!!!!\", 'red'))\nwusers = bot.read_list_from_file(\"scrappedFOLLOWERS.txt\")\nwith open(\"usernamelist.txt\", 'w+') as f:\n\tfor list in wusers:\n\t\tusernames=bot.get_username_from_user_id(list) + '\\n'\n\t\tf.write(usernames)\n\tprint(colored(\"succesfully converted \" + str(wusers), 'green'))\n\n# append '@' to scrapped list\n\n\nprint(\"adding '@' to usernames\")\nappendText = '@'\nfollowlist = open(\"usernamelist.txt\", 'r')\nupdatedList = open(\"mentionlist.txt\", 'w')\nfor name in followlist:\n updatedList.write(appendText + name.rstrip() + '\\n')\nupdatedList.close()\nprint(colored(\"succesfully appended '@' to usernames\", 'green'))\n\n\n# comment @users on last media post\nmedias=bot.get_your_medias()\nwhile True:\n\tbot.comment_medias([medias[0]])\n'''","sub_path":"mention.py","file_name":"mention.py","file_ext":"py","file_size_in_byte":3000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"117104369","text":"import os\ncount = 0\ntry:\n os.remove(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\")\nexcept:\n pass\nwith open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\Input text_files\\\\Capture.txt\", \"r\") as fp:\n Lines = fp.readlines()\n title = \"MED_ITEM_ID,PARTICULARS,QTY,PRICE,NET_AMT\"\n with open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\", \"a+\") as ff:\n title = title.replace(\" \",\"\")\n ff.write(title + \"\\n\")\n nextLineNewRecord = False\n startRecording = False\n headerIsMedicineOrMaterial = False\n rows = []\n row = \"\"\n csv = \"\"\n temp = \"\"\n for line in Lines:\n if (startRecording):\n line_s = line.strip().replace(\"\\\\''\",\"\")\n print(line_s)\n if line_s:\n if line_s.startswith(\"Sub Total\"):\n nextLineNewRecord = True\n headerIsMedicineOrMaterial = False\n else:\n nextLineNewRecord = False\n if nextLineNewRecord:\n rows.append(row)\n row = \"\"\n else:\n if row:\n if not headerIsMedicineOrMaterial:\n row = row + \" \" + line_s\n else:\n data = line_s.split()\n if not line_s.__contains__(\"Packed:\"):\n temp = temp + \" \" + line_s\n try:\n a = float(data[len(data) - 1])\n a = float(data[len(data) - 2])\n a = float(data[len(data) - 3])\n row = row + \"[\" + temp.replace(\"Charged :\",\"\")\n temp = \"\"\n except Exception as e:\n pass\n else:\n row = line_s\n if line_s.__contains__(\"Medicines\") or line_s.__contains__(\"Materials\"):\n headerIsMedicineOrMaterial = True\n else:\n if line.strip().startswith(\"Ref\"):\n startRecording = True\n if row:\n rows.append(row)\n\n for data in rows:\n dataSub = data.strip('\"').split(\"[\")\n header = dataSub[0].split()\n if header or len(dataSub) > 1:\n if header:\n SrNo = header[0]\n headerText = header[1:]\n with open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\",\"a+\") as ff:\n ff.write(SrNo + \",\" + \" \".join(headerText) + \"\\n\")\n for dd in dataSub[1:]:\n data_s = dd.strip('\"').split()\n l = len(data_s)\n particulars = \"\"\n quantity = \"\"\n price = \"\"\n amount = \"\"\n for i, d in enumerate(data_s):\n if (i < l - 3) or l < 3:\n if particulars:\n particulars = particulars + \" \" + d\n else:\n particulars = d\n elif i == l - 3:\n quantity = d\n elif i == l - 2:\n price = d\n elif i == l - 1:\n amount = d\n particulars=particulars.replace(\"'\",\"''\")\n\n if particulars.__contains__(\"]\"):\n csv = ',\"[' + particulars.replace('\"','\"\"') + '\",' + quantity + ',' + price + ',' + amount\n else:\n csv = ',\"' + particulars.replace('\"','\"\"') + '\",' + quantity + ',' + price + ',' + amount\n with open(\"C:\\\\Users\\\\dell\\\\Documents\\\\UiPath\\\\BEML_Workflow1set\\\\Python_Script\\\\CSV Files\\\\beml.csv\",\"a+\") as ff:\n ff.write(csv + \"\\n\")\n","sub_path":"BEML_Workflow1set/Python_Script/Detailed_Bill.py","file_name":"Detailed_Bill.py","file_ext":"py","file_size_in_byte":4047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"61273494","text":"def test_visit_google_com_returns_page_with_Google_in_title(browser):\n b = browser\n b.visit(\"https://www.google.com/\")\n assert \"Google\" in b.title\n\n\ndef test_headless_visit_google_com_returns_page_with_Google_in_title(headless_browser):\n b = headless_browser\n b.visit(\"https://www.google.com/\")\n assert \"Google\" in b.title\n\n\ndef test_chrome_fill_github_in_google_search_box_returns_github_website(chrome_browser):\n b = chrome_browser\n b.visit(\"https://www.google.com/\")\n b.fill(\"q\", \"github\")\n search_button = b.find_by_name(\"btnK\")\n b.wait_for(search_button.is_displayed, timeout=1.5)\n search_button.click()\n b.wait(1)\n assert b.find_by_text(\"github.com\")\n","sub_path":"tests/test_example.py","file_name":"test_example.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"85599959","text":"# astrologers stars\nprint(\"give the no. of rows\")\nrows = int(input())\nprint(\"write true(1) or false(2) \")\nx = int(input())\nif x == 1:\n for i in range (1,rows+1):\n print(i*\"*\")\nelif x == 2:\n for i in range (1, rows + 1):\n i1 = rows+1 - i\n print(i1*\"*\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"23543341","text":"from itertools import (tee, islice)\n\ndef pairwise(iterable):\n '''s -> (s0,s1), (s1,s2), (s2, s3), ...\n from https://docs.python.org/3/library/itertools.html#itertools-recipes\n '''\n a, b = tee(iterable)\n next(b, None)\n return zip(a, b)\n\ndef in_chunks(iterable, size):\n '''s, 3 -> [s0,s1,s2], [s3,s4,s5], ...'''\n while True:\n chunk = list(islice(iterable, size))\n if not chunk:\n raise StopIteration\n yield chunk\n\ndef add_items(dictionary, items):\n result = dictionary.copy()\n result.update(items)\n return result\n","sub_path":"Homework2/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"575030808","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('success_game/', views.succ_game, name='success_game'),\n path('ind_player//', views.player_view, name='ind_player')\n\n]\n","sub_path":"elo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"183325646","text":"\"\"\"\r\nAuthor: Hrishee Shastri\r\nMay 2019\r\nGenetic Algorithm for optimization of scalar functions with vector input. \r\n\"\"\"\r\n\r\nfrom chromosome import *\r\nimport os\r\nimport math\r\n\r\ndef GA_SEARCH(mutrate, crossrate, popsize, gens, rep, file, fn, interval, key=min):\r\n \"\"\"\r\n Executes a genetic algorithm to optimize a mathematical function fn. Returns a pair (X,y) where X is an input vector and y is the optimized fn(X)\r\n mutrate -- mutation rate, between 0 and 1 inclusive\r\n crossrate -- crossover rate, between 0 and 1 inclusive\r\n popsize -- positive even integer population size to be maintained throughout iteration\r\n gens -- a number greater than 0 that specifies the number of generations to iterate through\r\n rep -- representation function to be used (instance of Representation class). Maps from bitstrings to real numbers in the given interval\r\n Pass the function object (e.g. GRAY_CODE)\r\n file -- text file name to write output to (not the same as console output -- file output writes every generation, while\r\n console output only writes when an improvement has been made)\r\n fn -- the real valued mathematical function to be optimized, wrapped in a TestFn object. fn : R^n --> R (i.e. vector valued inputs, scalar valued outputs).\r\n interval -- A 3-tuple (start, end, step) inclusive that constrains the search space for fn. In other words, each entry x_i in the input vector \r\n is constrained by x_i \\in [start,end] with step increments. Make sure fn is continuous along every point in the interval (e.g. no ZeroDivisionErrors).\r\n W -- scaling window = 1\r\n S -- selection strategy = E \r\n key -- min for function minimization and max for function maximization \r\n \"\"\"\r\n\r\n assert popsize > 0, \"popsize is not positive\"\r\n assert 0 <= mutrate and mutrate <= 1, \"invalid mutation rate\"\r\n assert 0 <= crossrate and crossrate <= 1, \"invalid crossover rate\"\r\n assert gens > 0, \"num of generations not positive\"\r\n\r\n# print(\"Initializing...\")\r\n\r\n # Initialize representation \r\n REP = rep(interval)\r\n\r\n# print(key.__name__.upper() + \"IMIZING \" + str(fn).upper() + \" (\" + REP.get_name() + \")\")\r\n\r\n\r\n f = open(os.path.join(\"caruana_data\", file + \".txt\"), 'w')\r\n #g = open(os.path.join(\"caruana_data\", file + \"best_sol\" + \".txt\"), 'w')\r\n\r\n # Initialize random population\r\n EVAL_LIMIT = 5000\r\n EVALS = 0\r\n curr_gen = 1\r\n POP = []\r\n dim = fn.get_input_dimension()\r\n\r\n for i in range(0, popsize):\r\n vec = \"\"\r\n for n in range(dim):\r\n vec += REP.get_random_bitstr()\r\n chrom = Chromosome(REP, vec)\r\n POP.append(chrom)\r\n\r\n assert len(POP) == popsize, \"POP has incorrect number of elements\"\r\n\r\n\r\n # evaluate population \r\n #print(\"Evolving...\")\r\n # Fitness map is not performance value. It is just the evaluation of the objective function to be minimized.\r\n FITNESS_MAP = {chrom:chrom.eval_fitness(fn) for chrom in POP}\r\n\r\n # scaling window of 1\r\n if key == min:\r\n best = math.inf\r\n f_prime = max(FITNESS_MAP.values())\r\n else:\r\n best = -math.inf\r\n f_prime = min(FITNESS_MAP.values())\r\n\r\n for k in POP:\r\n # f.write(str(k.performance_value(FITNESS_MAP, f_prime, key)))\r\n # f.write(\"\\t\")\r\n f.write(str(FITNESS_MAP[k]))\r\n f.write(\"\\n\")\r\n EVALS += 1\r\n\r\n #g.write(str(key(FITNESS_MAP.values())) + \"\\n\")\r\n # Evolve\r\n while EVALS < EVAL_LIMIT:\r\n curr_gen += 1\r\n child_POP = []\r\n new_children = [] # new individuals not from previous generation. Child_pop is the entire population that will replace POP.\r\n # new_children keeps track of the individuals that are not from previous generation\r\n for i in range(popsize//2):\r\n #parent1, parent2 = wheel_selection(POP, FITNESS_MAP, f_prime, key)\r\n #parent1, parent2 = stochastic_universal_sampling(POP, FITNESS_MAP, f_prime, key)\r\n parent1, parent2 = rank_selection(POP, FITNESS_MAP, f_prime, key)\r\n \r\n if random.uniform(0,1) <= crossrate:\r\n child1, child2 = parent1.crossover(parent2)\r\n else:\r\n child1, child2 = parent1, parent2\r\n\r\n child1 = child1.mutate(mutrate)\r\n child2 = child2.mutate(mutrate)\r\n\r\n if child1 != parent1 and child1 != parent2:\r\n new_children.append(child1)\r\n if child2 != parent1 and child2 != parent2:\r\n new_children.append(child2)\r\n\r\n\r\n child_POP.append(child1)\r\n child_POP.append(child2)\r\n\r\n # elitist replacement\r\n best_chrom = key(FITNESS_MAP, key = FITNESS_MAP.get)\r\n if best_chrom not in child_POP:\r\n child_POP.append(best_chrom)\r\n\r\n POP = child_POP.copy()\r\n\r\n assert len(POP) == popsize or len(POP) == popsize + 1, \"popsize not maintained after next generation\"\r\n FITNESS_MAP = {chrom:chrom.eval_fitness(fn) for chrom in POP}\r\n\r\n # scaling window of 1, so recompute f_prime every generation\r\n if key == min:\r\n f_prime = max(FITNESS_MAP.values())\r\n else:\r\n f_prime = min(FITNESS_MAP.values())\r\n\r\n for new in new_children:\r\n # f.write(str(new.performance_value(FITNESS_MAP, f_prime, key)))\r\n # f.write(\"\\t\")\r\n f.write(str(FITNESS_MAP[new]))\r\n f.write(\"\\n\")\r\n EVALS += 1\r\n if EVALS == EVAL_LIMIT:\r\n break \r\n\r\n #g.write(str(key(FITNESS_MAP.values())) + \"\\n\")\r\n\r\n# print(\"All \" + str(EVALS) + \" fitness evals completed\")\r\n","sub_path":"comparison-GA/optimizationGA.py","file_name":"optimizationGA.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"409240288","text":"class Hamster:\n def __init__(self, n, v, k):\n self.navn = n\n self.vekt = v\n self.kjonn = k\n \n self.over = None\n self.under = None\n self.venstre = None\n self.hoyre = None\n \n def __eq__(self, hamster):\n if (self.navn == hamster.navn and self.kjonn == hamster.kjonn):\n return True\n else:\n return False\n \n def hent_info(self):\n tekst = \"\"\n tekst += \"\\nNavn: \" + self.navn\n tekst += \"\\nVekt: \" + self.vekt\n tekst += \"\\nKjonn: \" + self.kjonn\n return tekst\n \n def __str__(self):\n return self.navn\n ","sub_path":"uke11/hamster.py","file_name":"hamster.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"599738811","text":"class MoveError(BaseException):\n pass\n\n\nclass World(object):\n \n def __init__(self, size=100):\n self.size = size\n self.directions = ['n', 'e', 's', 'w']\n self.obstacles = {}\n \n def get(self, loc):\n return self.obstacles.get('{}_{}'.format(loc[0], loc[1]))\n \n def put(self, loc, obj):\n self.obstacles['{}_{}'.format(loc[0], loc[1])] = obj;\n\n\nclass Rover(object):\n\n def __init__(self, world, x=0, y=0, direction='n'):\n self.world = world\n self.location = [x, y]\n self.dirAngle = self.world.directions.index(direction)\n\n def command(self, cmds):\n for cmd in cmds:\n if cmd is 'f':\n self.move(1)\n elif cmd is 'r':\n self.turn(1)\n elif cmd is 'l':\n self.turn(-1)\n elif cmd is 'b':\n self.move(-1)\n else:\n raise TypeError()\n\n @property\n def direction(self):\n return self.world.directions[self.dirAngle]\n\n def move(self, distance):\n axis = (self.dirAngle+1) % 2\n headingNE = 1 if self.dirAngle < 2 else -1\n new_loc = list(self.location)\n new_loc[axis] = (new_loc[axis] + headingNE * distance) % self.world.size\n if self.world.get(new_loc):\n raise MoveError()\n self.location = new_loc\n \n def turn(self, angle):\n self.dirAngle = (self.dirAngle + angle) % len(self.world.directions)","sub_path":"jpospychala/rover.py","file_name":"rover.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"381407994","text":"# Build a standalone function to remove strings of even lengths from a given array. Given ['Nope!', 'Its', 'kris', 'strating', 'with', 'K!', '(instead', 'of', 'Chris', 'with', 'C)', '.'], change it to ['Nope!', 'Its', 'Chris']\n\ndef removeEven(strArr):\n i = 0\n while i < len(strArr):\n if (len(strArr[i]) % 2 == 0):\n strArr.remove(strArr[i])\n else:\n i += 1\n\nmyStrArr = ['Nope!', 'Its', 'kris', 'starting', 'with', 'K!', '(instead', 'of', 'Chris', 'with', 'C)', '.']\nprint(\"The original array is {}\").format(myStrArr)\nremoveEven(myStrArr)\nprint(\"The changed array is {}\").format(myStrArr)\n","sub_path":"Chapter-04-Strings-AssociativeArrays/Remove-Even-Length-Strings/Remove-Even-Length-Strings.py","file_name":"Remove-Even-Length-Strings.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"137456818","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pylab import *\r\nimport os.path\r\n\r\ncol_names = ['host', 'space1', 'space2', 'timestamp', 'timezone', 'request', 'code', 'bytes']\r\ndf1 = pd.read_csv('log.csv', delim_whitespace=True, names=col_names, header=None,\r\n error_bad_lines=False); \r\n# read csv data and place into dataframe\r\n\r\ndf1['freq'] = df1.groupby('code')['code'].transform('count');\r\n\r\ndf2 = df1[df1.code != 200]\r\n\r\n\r\n\r\n\r\n\r\ncount = 0\r\nb = 0\r\nresult = [] \r\nfor a in df2.index:\r\n for d in a:\r\n if abs(d - c) <= 2:\r\n count = count + 1\r\n b = b + 1\r\n result.append(str(c)+','+str(count))\r\n count = 0\r\n b = 0 \r\n \r\n\r\n\r\n\r\n''' \r\na = [200,4,5,7,8,11]\r\ncount = 0\r\nb = 0\r\nresult = []\r\nfor c in a:\r\n for d in a:\r\n if abs(d - c) <= 2:\r\n count = count + 1\r\n b = b + 1\r\n result.append(str(c)+','+str(count))\r\n count = 0\r\n b = 0 \r\n \r\n'''\r\n\r\n ","sub_path":"insight_testsuite/tests/test_features/log_input/feature4.py","file_name":"feature4.py","file_ext":"py","file_size_in_byte":1041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"427866","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"Tests for the shared functionality for Elasticsearch output modules.\"\"\"\n\nimport unittest\n\ntry:\n from mock import MagicMock\nexcept ImportError:\n from unittest.mock import MagicMock\n\nfrom dfvfs.path import fake_path_spec\n\nfrom plaso.containers import events\nfrom plaso.lib import definitions\nfrom plaso.output import shared_elastic\n\nfrom tests.containers import test_lib as containers_test_lib\nfrom tests.output import test_lib\n\n\nclass TestElasticsearchOutputModule(\n shared_elastic.SharedElasticsearchOutputModule):\n \"\"\"Elasticsearch output module for testing.\"\"\"\n\n def _Connect(self):\n \"\"\"Connects to an Elasticsearch server.\"\"\"\n self._client = MagicMock()\n\n\n@unittest.skipIf(shared_elastic.elasticsearch is None, 'missing elasticsearch')\nclass SharedElasticsearchOutputModuleTest(test_lib.OutputModuleTestCase):\n \"\"\"Tests the shared functionality for Elasticsearch output modules.\"\"\"\n\n # pylint: disable=protected-access\n\n _TEST_EVENTS = [\n {'a_binary_field': b'binary',\n 'data_type': 'syslog:line',\n 'filename': 'log/syslog.1',\n 'hostname': 'ubuntu',\n 'my_number': 123,\n 'some_additional_foo': True,\n 'path_spec': fake_path_spec.FakePathSpec(\n location='log/syslog.1'),\n 'text': (\n 'Reporter PID: 8442 (pam_unix(cron:session): session\\n '\n 'closed for user root)'),\n 'timestamp': '2012-06-27 18:17:01+00:00',\n 'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN}]\n\n def testConnect(self):\n \"\"\"Tests the _Connect function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._client)\n\n output_module._Connect()\n\n self.assertIsNotNone(output_module._client)\n\n def testCreateIndexIfNotExists(self):\n \"\"\"Tests the _CreateIndexIfNotExists function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n def testFlushEvents(self):\n \"\"\"Tests the _FlushEvents function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n output_module._InsertEvent(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 2)\n self.assertEqual(output_module._number_of_buffered_events, 1)\n\n output_module._FlushEvents()\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n def testGetSanitizedEventValues(self):\n \"\"\"Tests the _GetSanitizedEventValues function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n\n event_tag = events.EventTag()\n event_tag.AddLabel('Test')\n\n event_values = output_module._GetSanitizedEventValues(\n event, event_data, event_data_stream, event_tag)\n\n expected_event_values = {\n 'a_binary_field': 'binary',\n 'data_type': 'syslog:line',\n 'datetime': '2012-06-27T18:17:01.000000Z',\n 'display_name': 'FAKE:log/syslog.1',\n 'filename': 'log/syslog.1',\n 'hostname': 'ubuntu',\n 'message': '[',\n 'my_number': 123,\n 'path_spec': (\n '{\"__type__\": \"PathSpec\", \"location\": \"log/syslog.1\", '\n '\"type_indicator\": \"FAKE\"}'),\n 'some_additional_foo': True,\n 'source_long': 'Log File',\n 'source_short': 'LOG',\n 'tag': ['Test'],\n 'text': ('Reporter PID: 8442 (pam_unix(cron:session): '\n 'session\\n closed for user root)'),\n 'timestamp': 1340821021000000,\n 'timestamp_desc': 'Content Modification Time',\n }\n\n self.assertIsInstance(event_values, dict)\n self.assertEqual(event_values, expected_event_values)\n\n def testInsertEvent(self):\n \"\"\"Tests the _InsertEvent function.\"\"\"\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n output_module._InsertEvent(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 2)\n self.assertEqual(output_module._number_of_buffered_events, 1)\n\n output_module._InsertEvent(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 4)\n self.assertEqual(output_module._number_of_buffered_events, 2)\n\n output_module._FlushEvents()\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n def testClose(self):\n \"\"\"Tests the Close function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n\n self.assertIsNotNone(output_module._client)\n\n output_module.Close()\n\n self.assertIsNone(output_module._client)\n\n def testSetFlushInterval(self):\n \"\"\"Tests the SetFlushInterval function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertEqual(\n output_module._flush_interval, output_module._DEFAULT_FLUSH_INTERVAL)\n\n output_module.SetFlushInterval(1234)\n\n self.assertEqual(output_module._flush_interval, 1234)\n\n def testSetIndexName(self):\n \"\"\"Tests the SetIndexName function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._index_name)\n\n output_module.SetIndexName('test_index')\n\n self.assertEqual(output_module._index_name, 'test_index')\n\n def testSetPassword(self):\n \"\"\"Tests the SetPassword function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._password)\n\n output_module.SetPassword('test_password')\n\n self.assertEqual(output_module._password, 'test_password')\n\n def testSetServerInformation(self):\n \"\"\"Tests the SetServerInformation function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._host)\n self.assertIsNone(output_module._port)\n\n output_module.SetServerInformation('127.0.0.1', 1234)\n\n self.assertEqual(output_module._host, '127.0.0.1')\n self.assertEqual(output_module._port, 1234)\n\n def testSetUsername(self):\n \"\"\"Tests the SetUsername function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n self.assertIsNone(output_module._username)\n\n output_module.SetUsername('test_username')\n\n self.assertEqual(output_module._username, 'test_username')\n\n def testWriteEventBody(self):\n \"\"\"Tests the WriteEventBody function.\"\"\"\n output_mediator = self._CreateOutputMediator()\n\n formatters_directory_path = self._GetDataFilePath(['formatters'])\n output_mediator.ReadMessageFormattersFromDirectory(\n formatters_directory_path)\n\n output_module = TestElasticsearchOutputModule(output_mediator)\n\n output_module._Connect()\n output_module._CreateIndexIfNotExists('test', {})\n\n self.assertEqual(len(output_module._event_documents), 0)\n self.assertEqual(output_module._number_of_buffered_events, 0)\n\n event, event_data, event_data_stream = (\n containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))\n output_module.WriteEventBody(event, event_data, event_data_stream, None)\n\n self.assertEqual(len(output_module._event_documents), 2)\n self.assertEqual(output_module._number_of_buffered_events, 1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/output/shared_elastic.py","file_name":"shared_elastic.py","file_ext":"py","file_size_in_byte":9081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"521912252","text":"'''\r\nThis scripts is to caculate the frequency of diffrent length miRNA in five files.\r\n'''\r\n\r\nimport re\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nimport os\r\n\r\n\r\ndef CountNumbers(file):\r\n \"count the number of different lenth of the sequences.\"\r\n with open(r\"{}\".format(file), \"r\") as f: # open files\r\n reads = f.read()\r\n\r\n pattern = re.compile(\"[A-Z]+\")\r\n match_list = re.findall(pattern, reads)\r\n\r\n count = dict()\r\n for i in range(17, 36):\r\n count[i] = 0\r\n for read in match_list:\r\n L = len(read)\r\n count[L] += 1\r\n\r\n ### a more pythonic method to caculate the number of various length of miRNA in the fasta file\r\n # from collections import Counter\r\n # length = [len(seq) for seq in match_list]\r\n # count = Counter(length)\r\n\r\n count = sorted(count.items(), key=lambda d: d[0], reverse=False)\r\n return count\r\n\r\n\r\ndef LengthToNumbers(file):\r\n \"return a tuple, in which every element contains length and numbers.\"\r\n cunt = CountNumbers(file)\r\n length = []\r\n numbers = []\r\n for i in range(len(cunt)):\r\n length.append(cunt[i][0])\r\n numbers.append(cunt[i][1])\r\n return length, numbers\r\n\r\n\r\nnumberlist = []\r\nlengthlist = [i for i in range(17, 36)]\r\nos.chdir(r\"D:/miRNA\")\r\nfilename = os.listdir()\r\n\r\nfor file in filename:\r\n length, numbers = LengthToNumbers(file)\r\n numberlist.append(numbers)\r\n\r\nwith open(r\"D:/PY sumbline coding/count.csv\", \"a\", encoding=\"utf-8\") as cf:\r\n csvfile = csv.writer(cf)\r\n csvfile.writerow(lengthlist)\r\n csvfile.writerows(numberlist)\r\n\r\n\r\n# visualize these data\r\ndata=pd.read_csv('count.csv').T\r\ndata.columns=[\"F1\",\"F2\",\"F3\",\"F4\",\"F5\"]\r\ndata.plot.bar(rot=0)\r\nplt.xlabel=(\"Length of miRNA\")\r\nplt.ylabel=(\"Frequency of Diffrent miRNA\")\r\nplt.title(\"the Number of Various Length of miRNA\")\r\nplt.show()\r\nplt.savefig('countFrequency.png',dpi=400,bbox_inches='tight')","sub_path":"count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"156937616","text":"from itertools import combinations\nimport numpy as np\nimport copy\n\n#素数を数え上げる\nprime_list = []\nfor i in range(2, 100):\n flag = 0\n for j in range(2, i//2+1):\n if(i % j == 0):\n flag = 1\n break\n if flag == 0:\n prime_list.append(i)\n\nnum = len(prime_list)\nsub = sum_all = sum(prime_list)\n#print(sum_all)\n\nsa_list = []\nsb_list = []\n\n#組み合わせ全探索\nfor k in range(num+1):\n for comb in combinations(prime_list, k):\n #閾値より小さかったら残す\n if sub > abs((sum_all-sum(comb))-sum(comb)):\n sa_list.clear()\n for p in comb:\n sa_list.append(p)\n sub = abs((sum_all-sum(comb))-sum(comb))\n\nprint(\"SA>{}\".format(sa_list))\nfor t in prime_list:\n if t not in sa_list:\n sb_list.append(t)\nprint(\"SB>{}\".format(sb_list))\n","sub_path":"0527/2210104053/problem7_1.py","file_name":"problem7_1.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"269244504","text":"import copy\n\nmsr_training_utf8 = open('msr_training.utf8.ic', 'r', encoding='utf-8')\nmsr_str = msr_training_utf8.read()\nmsr_training_utf8.close()\nmsr_list = msr_str.split('\\n')\n\nhead_dict = {\n 'B':0,\n 'M':1,\n 'E':2,\n 'S':3,\n}\nreverse_head_dict ={\n 0:'B',\n 1:'M',\n 2:'E',\n 3:'S',\n}\n\nmsr_pro_dict_txt = open('msr_pro_dict.txt', 'r', encoding='utf-8')\nmsr_pro_dict = eval(msr_pro_dict_txt.read())\nmsr_pro_dict_txt.close()\n\nlast_word_pro_dict_txt = open('last_word_pro_dict.txt', 'r', encoding='utf-8')\nlast_word_pro_dict = eval(last_word_pro_dict_txt.read())\nlast_word_pro_dict_txt.close()\n\nnext_word_pro_dict_txt = open('next_word_pro_dict.txt', 'r', encoding='utf-8')\nnext_word_pro_dict = eval(next_word_pro_dict_txt.read())\nnext_word_pro_dict_txt.close()\n\nmsr_double_array_pro_dict_txt = open('msr_double_array_pro_dict.txt', 'r', encoding='utf-8')\nmsr_double_array_pro_dict = eval(msr_double_array_pro_dict_txt.read())\nmsr_double_array_pro_dict_txt.close()\n\nstr_test = input('输入一句话:')\nsegmentation_list = []\npath_list = []\n\nfor i in str_test:\n segmentation_list.append([i, 0, 0, 0, 0])\n\npath_list = copy.deepcopy(segmentation_list)\n\nfor i in range(len(segmentation_list)):\n if i == 0:\n # 代表第一个字,首字需要注意没有上一个字,只需要计算W后和R\n W_before = 0\n R = 0\n for j in range(4):\n string_last_word = segmentation_list[i][0] # 希\n string_current_word = segmentation_list[i+1][0] # 腊\n if string_last_word+reverse_head_dict[j]+string_current_word in next_word_pro_dict:\n W_before = next_word_pro_dict[string_last_word+reverse_head_dict[j]+string_current_word]\n if string_last_word in msr_pro_dict:\n R = msr_pro_dict[string_last_word][j]\n segmentation_list[i][j + 1] = W_before + R\n continue\n\n if i < len(segmentation_list) - 1:\n # 中间的所有字都有上下\n string_last_word = segmentation_list[i - 1][0]\n string_current_word = segmentation_list[i][0]\n string_next_word = segmentation_list[i + 1][0]\n\n for a in range(4): # 遍历当前文字的四种状态\n last_word_pro = 0\n next_word_pro = 0\n R = 0\n P = 0\n # 判断上面一个字的哪个状态导致的这个字的某个状态\n\n max_index = 0\n for b in range(1, 4): # 求上一个字的四种状态对应的式子最大值和他的index\n if string_last_word in msr_double_array_pro_dict:\n if msr_double_array_pro_dict[string_last_word][max_index][a] * segmentation_list[i-1][max_index+1] < \\\n msr_double_array_pro_dict[string_last_word][b][a] * segmentation_list[i-1][b+1]:\n max_index = b\n\n if string_last_word+string_current_word+reverse_head_dict[a] in last_word_pro_dict:\n last_word_pro = last_word_pro_dict[string_last_word+string_current_word+reverse_head_dict[a]]\n if string_current_word+reverse_head_dict[a]+string_next_word in next_word_pro_dict:\n next_word_pro = next_word_pro_dict[string_current_word+reverse_head_dict[a]+string_next_word]\n if string_current_word in msr_pro_dict:\n R = msr_pro_dict[string_current_word][a]\n if string_last_word in msr_double_array_pro_dict:\n P = msr_double_array_pro_dict[string_last_word][max_index][a]\n segmentation_list[i][a+1] = P * segmentation_list[i-1][max_index + 1] + \\\n last_word_pro + next_word_pro + R\n path_list[i][a+1] = max_index\n\n if i == len(segmentation_list) - 1:\n string_last_word = segmentation_list[len(segmentation_list) - 2][0]\n string_current_word = segmentation_list[len(segmentation_list) - 1][0]\n for a in range(4):\n last_word_pro = 0\n P = 0\n R = 0\n max_index = 0\n for b in range(1, 4): # 求上一个字的四种状态对应的式子最大值和他的index\n if string_current_word in msr_double_array_pro_dict:\n if msr_double_array_pro_dict[string_current_word][max_index][a] * segmentation_list[i - 1][max_index + 1] < \\\n msr_double_array_pro_dict[string_current_word][b][a] * segmentation_list[i - 1][b + 1]:\n max_index = b\n if string_last_word+string_current_word+reverse_head_dict[a] in last_word_pro_dict:\n last_word_pro = last_word_pro_dict[string_last_word+string_current_word+reverse_head_dict[a]]\n if string_current_word in msr_double_array_pro_dict:\n P = msr_double_array_pro_dict[string_current_word][max_index][a]\n if string_current_word in msr_pro_dict:\n R = last_word_pro + msr_pro_dict[string_current_word][a]\n segmentation_list[len(segmentation_list) - 1][a+1] = \\\n P * segmentation_list[i - 1][max_index + 1] + R\n path_list[len(segmentation_list) - 1][a+1] = max_index\n\n# for i in segmentation_list:\n# print(i)\n# for i in path_list:\n# print(i)\n\nmax_index = 0\nfor i in range(1, 4):\n if segmentation_list[-1][i+1] > segmentation_list[-1][max_index+1]:\n max_index = i\nstr_sentence = ''\nindex = max_index\nstr_sentence = segmentation_list[-1][0] + reverse_head_dict[max_index] + str_sentence\nfor i in range(0, len(segmentation_list)-1):\n index = path_list[-1-i][index + 1]\n str_sentence = segmentation_list[-2-i][0] + reverse_head_dict[index] + str_sentence\n\nfor i in segmentation_list:\n print(i)\nfor i in path_list:\n print(i)\nprint(str_sentence)\n\nstring_sentence = ''\nfor s in str_sentence:\n if s == 'E' or s == 'S':\n string_sentence += ' '\n elif s == 'B' or s == 'M':\n continue\n else:\n string_sentence += s\nprint('分词结果:' + string_sentence)","sub_path":"First/word_segmentation.py","file_name":"word_segmentation.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"107244530","text":"import requests\nimport urllib.request\nimport image2image as im\n\nurl = 'https://api.github.com/repos/pgrimaud/pgrimaud/stargazers'\n\n\ndef get_stargazers_from_api(api_url, start_page):\n # Without an API key in the params, you are limited to 50 API calls per hour\n params = {'format': 'json', 'page': start_page, 'per_page': 100}\n response = requests.get(url=api_url, params=params)\n return response.json()\n\n\ndef get_all_users():\n page = 1\n avatars = []\n\n has_results = get_stargazers_from_api(url, page)\n while has_results:\n for result in has_results:\n avatars.append(result['avatar_url'] + '&s=30')\n print('Got page ' + str(page) + ' of stargazers')\n page += 1\n has_results = get_stargazers_from_api(url, page)\n\n return avatars\n\n\ndef download_avatars():\n counter = 1\n for avatar in get_all_users():\n urllib.request.urlretrieve(avatar, './avatars/avt' + str(counter) + '.png')\n counter += 1\n\n\nif __name__ == '__main__':\n print('Starting script')\n download_avatars()\n print('Avatars have been downloaded')\n im.main(im.get_args())\n print('Output has been created')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"180472712","text":"import logging\nfrom gi.repository import Gtk\n\nclass StreamblankToolbarController(object):\n\t\"\"\" Manages Accelerators and Clicks on the Composition Toolbar-Buttons \"\"\"\n\n\tdef __init__(self, drawing_area, win, uibuilder, warning_overlay):\n\t\tself.log = logging.getLogger('StreamblankToolbarController')\n\n\t\tself.warning_overlay = warning_overlay\n\n\t\tblank_sources = ['pause', 'nostream']\n\n\n\t\tlivebtn = uibuilder.find_widget_recursive(drawing_area, 'stream_live')\n\t\tblankbtn = uibuilder.find_widget_recursive(drawing_area, 'stream_blank')\n\n\t\tblankbtn_pos = drawing_area.get_item_index(blankbtn)\n\n\t\tlivebtn.connect('toggled', self.on_btn_toggled)\n\t\tlivebtn.set_name('live')\n\n\t\tfor idx, name in enumerate(blank_sources):\n\t\t\tif idx == 0:\n\t\t\t\tnew_btn = blankbtn\n\t\t\telse:\n\t\t\t\tnew_icon = Gtk.Image.new_from_pixbuf(blankbtn.get_icon_widget().get_pixbuf())\n\t\t\t\tnew_btn = Gtk.RadioToolButton(group=livebtn)\n\t\t\t\tnew_btn.set_icon_widget(new_icon)\n\t\t\t\tdrawing_area.insert(new_btn, blankbtn_pos+1)\n\n\t\t\tnew_btn.set_label(\"Stream %s\" % name)\n\t\t\tnew_btn.connect('toggled', self.on_btn_toggled)\n\t\t\tnew_btn.set_name(name)\n\n\tdef on_btn_toggled(self, btn):\n\t\tif not btn.get_active():\n\t\t\treturn\n\n\t\tself.log.info(\"on_btn_toggled: %s\", btn.get_name())\n\t\tif btn.get_name() == 'live':\n\t\t\tself.warning_overlay.disable()\n\n\t\telse:\n\t\t\tself.warning_overlay.enable(btn.get_name())\n","sub_path":"voctogui/lib/toolbar/streamblank.py","file_name":"streamblank.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"399494427","text":"# list (mutable)\ngrades = [77, 80, 30, 80]\n# print(sum(grades) / len(grades))\n\n# tuplets (immutable)\ngrades_tuplets = (77, 213, 23)\n\n# sets - collection of unique & unordered\ngrades_set = {214, 77, 33}\n\n# grades_tuplets = grades_tuplets + (2,)\n# print(grades_set)\n\n\n# set operations\nlottery_numbers = {1,2,3,4}\nwinner_numbers = {1,2,3,7}\n\n# print(lottery_numbers.intersection(winner_numbers)) # 1,2,3\n# print(lottery_numbers.union(winner_numbers)) # 1,2,3,4,7\n# print(lottery_numbers.difference(winner_numbers)) # 4","sub_path":"section2/lists_tuplets_sets.py","file_name":"lists_tuplets_sets.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"448744054","text":"import os\n\nimport numpy as np\nimport segmentation_models_pytorch as smp\nimport pydensecrf.densecrf as dcrf\nimport torch\n\nfrom lib import unet\n\ndef load_model(name, pretrained=False):\n '''\n Loads model into memory\n\n Arguements:\n name (string) -- codename of the model. Avalible models:\n * pure_unet - Simple U-Net model\n * unet_resnet18 - U-Net with pretrained ResNet18 encoder\n * unet_vgg11 - U-Net with pretrained VGG11 encoder\n * unet_seresnext50 - U-Net with pretrained SE-ResNeXt50 encoder\n pretrained (boolean) -- load pretrained weights. \n If True, model loads in evaluation mode.\n ''' \n if not pretrained:\n if name==\"pure_unet\":\n model = unet.unet_model.UNet(3, 1)\n elif name==\"unet_resnet18\":\n model = smp.Unet(\"resnet18\", \"imagenet\")\n elif name==\"unet_vgg11\":\n model = smp.Unet(\"vgg11\", \"imagenet\")\n elif name==\"se_resnext50\":\n model = smp.Unet(\"se_resnext50_32x4d\", \"imagenet\")\n else:\n if name==\"pure_unet\":\n model = torch.load('../models/best_model_unet.pth')\n elif name==\"unet_resnet18\":\n model = torch.load('../models/best_model_unet_reanet18_aug.pth')\n elif name==\"unet_vgg11\":\n model = torch.load('../models/best_model_unet_vgg11_aug.pth')\n elif name==\"se_resnext50\":\n model = torch.load('../models/best_model_seresnext50_aug.pth')\n model = model.eval()\n return model\n\nclass CRFModel:\n '''\n Wrapper of DenseCRF postprocessor on PyTorch segmentation models\n\n Arguements:\n base_model (torch.Module) -- wrapped PyTorch model\n device (string) -- device where model is held\n\n Methods:\n __call__(tensor) -- interface for base_model.forward(tensor)\n get_mask(image) -- apply model with DenseCRF postprocessing\n to an input image\n '''\n def __init__(self, base_model, device=None):\n self.base = base_model\n self.device = device\n if device is not None:\n self.base = self.base.to(device)\n\n def __call__(self, input):\n return self.base(input)\n\n def get_mask(self, image, no_crf=False):\n # transform image to uint8 array for pydencecrf\n if image.dtype in {np.float16, np.float32, np.float64}:\n image = (image*255).astype(np.np.uint8)\n # get tensor for model\n tensor = self.__img_to_torch(image)\n mask = self.base(tensor)\n mask = self.__mask_to_numpy(mask)\n if no_crf:\n return (mask>0.5).astype(np.uint8)\n # Apply DenseCRF\n mask = self.__dense_crf(image, mask)\n return mask\n\n def __img_to_torch(self, image):\n '''\n Tranforms image to PyTorch format\n\n Eg. image(240,320,3) -> tensor(1,3,240,320)\n '''\n image = image.astype(np.float32)#/255\n image = np.moveaxis(image, 2, 0)\n tensor = torch.tensor(image[np.newaxis, :, :, :])\n if self.device is not None:\n tensor = tensor.to(self.device)\n return tensor\n\n def __mask_to_numpy(self, tensor):\n '''\n Transforms model output to numpy array\n '''\n tensor = tensor.cpu().detach()\n if torch.min(tensor)<0 or torch.max(tensor)>1:\n tensor = torch.sigmoid(tensor)\n return tensor.numpy()[0,0]\n\n def __dense_crf(self, img, output_probs):\n # code from: https://github.com/milesial/Pytorch-UNet/blob/master/utils/crf.py\n h = output_probs.shape[0]\n w = output_probs.shape[1]\n\n output_probs = np.expand_dims(output_probs, 0)\n output_probs = np.append(1 - output_probs, output_probs, axis=0)\n\n d = dcrf.DenseCRF2D(w, h, 2)\n U = -np.log(output_probs)\n U = U.reshape((2, -1))\n U = np.ascontiguousarray(U)\n img = np.ascontiguousarray(img)\n\n d.setUnaryEnergy(U)\n\n d.addPairwiseGaussian(sxy=20, compat=3)\n d.addPairwiseBilateral(sxy=30, srgb=20, rgbim=img, compat=10)\n\n Q = d.inference(5)\n Q = np.argmax(np.array(Q), axis=0).reshape((h, w))\n\n return Q\n","sub_path":"human_segmentation/lib/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"614229891","text":"#!/usr/bin/env python\n\nfrom blimpy.guppi import GuppiRaw\nimport h5py\ntry:\n import bitshuffle.h5\n HAS_BITSHUFFLE = True\nexcept ImportError:\n HAS_BITSHUFFLE = False\n \nimport time\nimport os\nimport glob\nimport numpy as np\n\ndef cmd_tool(args=None):\n \"\"\" Command line tool for converting guppi raw into HDF5 versions of guppi raw \"\"\"\n from argparse import ArgumentParser\n\n if not HAS_BITSHUFFLE:\n print(\"Error: the bitshuffle library is required to run this script.\")\n exit()\n\n parser = ArgumentParser(description=\"Command line utility for creating HDF5 Raw files.\")\n parser.add_argument('filename', type=str, help='Name of filename to read')\n args = parser.parse_args()\n\n fileroot = args.filename.split('.0000.raw')[0]\n\n filelist = glob.glob(fileroot + '*.raw')\n filelist = sorted(filelist)\n\n\n # Read first file\n r = GuppiRaw(filelist[0])\n header, data = r.read_next_data_block()\n dshape = data.shape #r.read_next_data_block_shape()\n print(dshape)\n\n n_blocks_total = 0\n for filename in filelist:\n print(filename)\n r = GuppiRaw(filename)\n n_blocks_total += r.n_blocks\n print(n_blocks_total)\n\n full_dshape = np.concatenate(((n_blocks_total,), dshape))\n\n\n # Create h5py file\n h5 = h5py.File(fileroot + '.h5', 'w')\n h5.attrs['CLASS'] = 'GUPPIRAW'\n block_size = 0 # This is chunk block size\n dset = h5.create_dataset('data',\n shape=full_dshape,\n #compression=bitshuffle.h5.H5FILTER,\n #compression_opts=(block_size, bitshuffle.h5.H5_COMPRESS_LZ4),\n dtype=data.dtype) \n\n h5_idx = 0\n for filename in filelist:\n print(\"\\nReading %s header...\" % filename)\n r = GuppiRaw(filename)\n h5 = h5py.File(filename + '.h5', 'w')\n \n header, data = r.read_next_data_block()\n \n for ii in range(0, r.n_blocks):\n t0 = time.time()\n print(\"Reading block %i of %i\" % (h5_idx+1, full_dshape[0]))\n header, data = r.read_next_data_block()\n t1 = time.time()\n \n t2 = time.time()\n print(\"Writing block %i of %i\" % (h5_idx+1, full_dshape[0]))\n dset[h5_idx, :] = data\n t3 = time.time()\n print(\"Read: %2.2fs, Write %2.2fs\" % ((t1-t0), (t3-t2)))\n \n h5_idx += 1\n\n # Copy over header information as attributes\n for key, value in header.items():\n dset.attrs[key] = value\n\n h5.close()\n\n t1 = time.time()\n print(\"Conversion time: %2.2fs\" % (t1- t0))\n\nif __name__ == \"__main__\":\n cmd_tool()","sub_path":"blimpy/deprecated/gup2hdf.py","file_name":"gup2hdf.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"16383458","text":"# encoding:utf-8\nimport os\nimport pyhfss.HfssGeneral as pg\nimport pyhfss.Hfss3DModeler as pd\ndef op():\n\n testscript = pg.HfssGeneral()\n testscript.HfssNewProject()\n testscript.HfssInsertDesign()\n testmodel = pd.Hfss3DModeler(testscript.fid)\n testmodel.HfssDrawBox('box2',['0','1','1'],['1','2','3'],'pec','False')\n testscript.HfssClosefid()\n\n os.system(r\"D:\\Software\\AnsysEM\\AnsysEM19.3\\Win64\\ansysedt.exe /RunScript D:\\code\\hfss\\tmp.py\")\n\n\nif __name__ == '__main__':\n op()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"559050622","text":"import numpy as num\n# from print_stats import print_test_stats, build_full_flag\n# import cresthh.anuga\nimport sys\nsys.path.append('/home/ZhiLi/CRESTHH')\nimport cresthh.anuga\nfrom cresthh import anuga\nfrom cresthh.anuga import Domain\nimport pandas as pd\n# from anuga import Transmissive_boundary, Refelective_boundary\nimport numpy as np\nimport os\n\nfrom cresthh.anuga import distribute, myid, numprocs, finalize, barrier\nimport geopandas as gpd\nfrom pyproj import Proj, CRS, transform\n\n\nmyProj= Proj(\"+proj=utm +zone=15, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\nstart='20170825000000'\nend= '20170901000000'\ninterval= '2M'\nif myid==0:\n\n \n yieldstep= pd.Timedelta(interval).total_seconds() \n topo_file= '/home/ZhiLi/CRESTHH/data/dem/DEM_sub.tif'\n study_area= gpd.read_file('/home/ZhiLi/CRESTHH/Examples/excessive_rain/68500_sub/68500_basin.shp')\n interior_area= gpd.read_file('/home/ZhiLi/CRESTHH/data/buffered_mainstream_new/mainstream_buffer.shp')\n base_resolution = 1000000 #1km\n interior_resolution= 1000 #10 m2 \n \n myProj = Proj(\"+proj=utm +zone=15, +south +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\n \n lons= np.array(study_area.exterior[0].coords)[:,0]; lats=np.array(study_area.exterior[0].coords)[:,1]\n utm_coords_ext= [myProj(lon,lat) for (lon, lat) in zip(lons, lats)]\n lons= np.array(interior_area.exterior[0].coords)[:,0]; lats=np.array(interior_area.exterior[0].coords)[:,1]\n utm_coords_int= [myProj(lon,lat) for (lon, lat) in zip(lons, lats)] \n if os.path.exists('1km_082500.msh'):\n DOMAIN= anuga.create_domain_from_file('1km_082500.msh')\n else:\n DOMAIN= anuga.create_domain_from_regions(\n utm_coords_ext,\n boundary_tags={'bottom': [0]},\n maximum_triangle_area=1000000,\n interior_regions=[[utm_coords_int, interior_resolution]],\n mesh_filename='1km_082500.msh') \n # domain= anuga.create_domain_from_regions(bounding_polygon, boundary_tags={'bottom':[0],}, maximum_triangle_area=0.001,verbose=True)\n DOMAIN.set_name('Aug_Sep_coupled_refined_channel')\n DOMAIN.set_proj(\"+proj=utm +zone=15, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\n DOMAIN.set_quantity('elevation', filename=topo_file, location='centroids') # Use function for elevation\n DOMAIN.set_quantity('friction', filename='/home/ZhiLi/CRESTHH/data/Texas_friction/manningn.tif', location='centroids') # Constant friction \n DOMAIN.set_quantity('stage', expression='elevation', location='centroids') \n DOMAIN.set_quantity('SM', 0.012, location='centroids')\n DOMAIN.set_quantity('Ksat', filename='/hydros/MengyuChen/ef5_param/crest_params/ksat_usa.tif', location='centroids')\n DOMAIN.quantities['Ksat'].centroid_values[:]*= 289.0\n DOMAIN.set_quantity('WM', filename='/hydros/MengyuChen/ef5_param/crest_params/wm_usa.tif', location='centroids')\n DOMAIN.quantities['WM'].centroid_values[:]*= 871.0\n DOMAIN.set_quantity('B', filename='/hydros/MengyuChen/ef5_param/crest_params/b_usa.tif', location='centroids')\n DOMAIN.quantities['B'].centroid_values[:]*= 5e-10\n DOMAIN.set_quantity('IM', filename='/hydros/MengyuChen/ef5_param/crest_params/im_usa.tif', location='centroids')\n DOMAIN.quantities['IM'].centroid_values[:]*= 0.06\n DOMAIN.set_quantity('KE', 0.415853, location='centroids')\n \n Br = anuga.Reflective_boundary(DOMAIN)\n Bt = anuga.Transmissive_boundary(DOMAIN)\n Bi = anuga.Dirichlet_boundary([0, 0, 0]) \n\n DOMAIN.set_boundary({'bottom': Bt,\n 'exterior': Br})\nelse:\n DOMAIN=None\n\nbarrier()\n\nDOMAIN= distribute(DOMAIN)\nDOMAIN.set_proj(\"+proj=utm +zone=15, +north +ellps=WGS84 +datum=WGS84 +units=m +no_defs\")\nDOMAIN.set_coupled(True)\n\n#domain.set_evap_dir('/hydros/MengyuChen/pet', pattern='cov_et17%m%d.asc', freq='D')\n#domain.set_precip_dir('/home/ZhiLi/CRESTHH/data/precip',pattern='imerg%Y%m%dS%H%M%S.tif', freq='H')\n#domain.set_timestamp('20170825180000', format='%Y%m%d%H%M%S')\n#domain.set_time_interval('1H')\n\nDOMAIN.set_evap_dir('/home/ZhiLi/CRESTHH/data/evap', pattern='cov_et17%m%d.asc.tif', freq='1D')\n# domain.set_precip_dir('/home/ZhiLi/CRESTHH/data/precip',pattern='nimerg%Y%m%dS%H%M%S.tif', freq='H')\nDOMAIN.set_precip_dir('/hydros/MengyuChen/mrmsPrecRate',pattern='PrecipRate_00.00_%Y%m%d-%H%M00.grib2-var0-z0.tif', freq=interval)\nDOMAIN.set_timestamp(start, format='%Y%m%d%H%M%S')\nDOMAIN.set_time_interval(interval)\ntotal_seconds= (pd.to_datetime(end) - pd.to_datetime(start)).total_seconds()\n\n\nfor t in DOMAIN.evolve(yieldstep=120, duration=total_seconds):\n if myid==0:\n DOMAIN.write_time()\n\nDOMAIN.sww_merge(verbose=True)\n","sub_path":"Examples/excessive_rain/.ipynb_checkpoints/parallel_job-checkpoint.py","file_name":"parallel_job-checkpoint.py","file_ext":"py","file_size_in_byte":4688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"310641821","text":"from ..funcs import load_images_from_spritesheet\nimport pygame, json\n\nclass Image:\n def __init__(self, editor, j, i, x, y, offset, data=None, selection=None):\n self.editor = editor\n self.i = i\n self.j = j\n self.position = [x,y]\n self.offset = offset\n\n if data:\n self.id = data['id']\n self.filepath = data['filepath']\n self.group_name = data['group_name']\n self.image = data['image']\n self.index = data['index']\n self.scale = data['scale']\n elif selection:\n self.id = selection.id\n self.filepath = selection.filepath\n self.group_name = selection.group_name\n self.image = selection.image\n self.index = selection.index\n self.scale = selection.scale\n\n try:\n if selection:\n self.autotile_config = json.load(open(selection.autotile_config_path, 'r'))\n return\n\n self.autotile_config = json.load(open(data['autotile_config_path'], 'r'))\n except:\n self.autotile_config = None\n\n def show(self, surface=None):\n if not surface:\n surface = self.editor.screen\n #Renders the image according to the self.editor.world.scroll\n surface.blit(self.image, [self.position[0]+self.offset[0]-self.editor.world.scroll[0], self.position[1]+self.offset[1]-self.editor.world.scroll[1]])\n\n def fill(self, images, selection, depth=950):\n if depth == 0:\n return\n\n for dir in [(0,-1), (1,0), (0,1), (-1,0)]:\n i, j = self.i+dir[1], self.j+dir[0]\n\n if i-self.editor.world.scroll[1]//self.editor.res >= 0 and i-self.editor.world.scroll[1]//self.editor.res < self.editor.screen.get_height()//self.editor.res+1 and j-self.editor.world.scroll[0]//self.editor.res >= 0 and j-self.editor.world.scroll[0]//self.editor.res < self.editor.screen.get_width()//self.editor.res+1:\n neighbor = self.get_image_with_index(i, j, images)\n\n #If the neighbor is not yet defined, the neighbor becomes an image object and is put into the images list\n if not neighbor:\n neighbor = Image(self.editor, j, i, j*self.editor.res, i*self.editor.res, self.offset, selection=selection)\n images.append(neighbor)\n neighbor.fill(images, selection, depth-1)\n\n def autotile(self, images, selector_panel_images):\n if self.autotile_config:\n neighbors = self.get_neighbors(images)\n\n binary = '0000'\n\n #Sets binary according to the neighbors around the image\n for neighbor in neighbors:\n if neighbor and neighbor.id == self.id:\n binary += '1'\n else:\n binary += '0'\n\n #Gets the image according to the binary and the configuration file\n try:\n key = str(int(binary, 2))\n index = self.autotile_config[key]\n\n images = load_images_from_spritesheet(f'data/graphics/spritesheet/{self.filepath}.png')\n image = images[index]\n\n self.image = pygame.transform.scale(image, (image.get_width()*self.scale, image.get_height()*self.scale))\n self.index = index\n\n try:\n offset_data = json.load(open(f'data/configs/offsets/{self.id}_offset.json', 'r'))\n offset = offset_data[str(self.index)]\n offset[0] *= self.scale\n offset[1] *= self.scale\n except Exception as e:\n # print(e)\n offset = [0,0]\n\n self.offset = offset\n\n except Exception as e:\n print('AUTOTILE ERROR: ', e)\n\n def get_neighbors(self, images):\n #Returns neighbor images\n neighbors = []\n\n for dir in [(0,-1), (1,0), (0,1), (-1,0)]:\n i, j = self.i+dir[1], self.j+dir[0]\n if i-self.editor.world.scroll[1]//self.editor.res >= 0 and i-self.editor.world.scroll[1]//self.editor.res < self.editor.screen.get_height()//self.editor.res+1 and j-self.editor.world.scroll[0]//self.editor.res >= 0 and j-self.editor.world.scroll[0]//self.editor.res < self.editor.screen.get_width()//self.editor.res+1:\n neighbor = self.get_image_with_index(i, j, images)\n neighbors.append(neighbor)\n\n return neighbors\n\n def get_image_with_index(self, i, j, images):\n #Returns image with the same given index (i, j)\n for image in images:\n if image.i == i and image.j == j:\n return image\n\n return None\n\n def within(self, starting, ending):\n #Returns image if it is within the rectangle dimension\n sx, sy = starting[0]+self.editor.world.scroll[0], starting[1]+self.editor.world.scroll[1]\n ex, ey = ending[0]+self.editor.world.scroll[0], ending[1]+self.editor.world.scroll[1]\n\n return (\n self.position[0] > sx and\n self.position[1] > sy and\n self.position[0]+self.get_width() < ex and\n self.position[1]+self.get_height() < ey\n )\n\n def get_width(self):\n #Returns image width\n return self.image.get_width()\n\n def get_height(self):\n #Returns image height\n return self.image.get_height()\n","sub_path":"Level_Editor/scripts/world/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":5433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"523023805","text":"import json\nimport logging\nfrom django.http.response import HttpResponse\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib import auth\nfrom topicos.models import Topico\nfrom topicos.models import Mensagem\nfrom topicos.decorators import ajax_login_required\nfrom django.contrib.auth.models import User\n\nlogger = logging.getLogger(__name__)\n\n\ndef login(request):\n username = request.POST['username']\n password = request.POST['password']\n user = auth.authenticate(username=username, password=password)\n user_dict = None\n\n if user is not None:\n if user.is_active:\n auth.login(request, user)\n user_dict = _user2dict(user)\n return HttpResponse(json.dumps(user_dict), content_type='application/json')\n\n\ndef cadastro(request):\n username = request.POST['username']\n password = request.POST['password']\n email = request.POST['email']\n first_name = request.POST['first_name']\n is_superuser = json.loads(request.POST['is_superuser'])\n\n user_dict = adiciona_usuario(username, password, email,\n first_name, is_superuser)\n\n return HttpResponse(json.dumps(user_dict), content_type='application/json')\n\n\ndef adiciona_usuario(username, password, email, first_name, is_superuser):\n user_dict = None\n\n if username and password and email and first_name:\n\n try:\n user_exist = User.objects.get(username=username)\n except:\n user_exist = None\n\n if user_exist is None:\n user = User.objects.create_user(username, password, email)\n\n user.first_name = first_name\n user.set_password(password)\n user.is_superuser = is_superuser\n user.save()\n user_dict = _user2dict(user)\n\n return user_dict\n\n\ndef logout(request):\n auth.logout(request)\n return HttpResponse('{}', content_type='application/json')\n\n\ndef _whoami(request):\n if request.user.is_authenticated():\n i_am = {\n 'user': _user2dict(request.user),\n 'authenticated': True,\n }\n else:\n i_am = {'authenticated': False}\n\n return i_am\n\n\ndef whoami(request):\n i_am = _whoami(request)\n return HttpResponse(json.dumps(i_am), content_type='application/json')\n\n\ndef get_user_details(request):\n username = request.GET['username']\n user = auth.get_user_model().objects.get(username=username)\n user_dict = _user2dict(user)\n return HttpResponse(json.dumps(user_dict), content_type='application/json')\n\n\n@ajax_login_required\ndef list_topicos(request):\n filters = json.loads(request.GET.get('filters', '{}'))\n topicos_dic = pega_topicos()\n\n return HttpResponse(json.dumps(topicos_dic), content_type='application/json')\n\n\ndef pega_topicos():\n topicos = Topico.objects.all()\n topicos_dic = [t.to_dict_json() for t in topicos]\n for t in topicos_dic:\n user = t['id_usuario']\n t['id_usuario'] = user.id\n\n return topicos_dic\n\n\n@ajax_login_required\ndef edita_topico(request):\n topico_id = json.loads(request.GET.get('topico_id'))\n topico_editado = request.GET.get('topico_editado')\n i_am = _whoami(request)['user']\n\n resposta = update_topico(topico_id, topico_editado, i_am['username'])\n\n return HttpResponse(json.dumps(resposta), content_type='application/json')\n\n\ndef update_topico(topico_id, topico_editado, username):\n if topico_editado:\n try:\n topico_exists = Topico.objects.get(name=topico_editado)\n return 'Esse nome eh invaliado'\n except Topico.DoesNotExist:\n try:\n topico = Topico.objects.get(id=topico_id)\n except:\n return 'Topico Invalido'\n\n try:\n i_am = User.objects.get(username=username)\n except:\n return 'Usuario Invalido'\n\n user = topico.id_usuario\n\n if user.id == i_am.id or (i_am.is_superuser\n and user.is_superuser == False):\n\n try:\n topico.name = topico_editado\n topico.save()\n return \"Topico salvo com sucesso\"\n except:\n return \"Topico nao pode ser salvo\"\n else:\n return 'Voce nao pode modificar esse topico'\n else:\n return 'O nome do topico nao pode ficar em branco'\n\n\n@ajax_login_required\ndef list_mensagens(request):\n topico_id = request.GET.get('topico_id')\n\n if topico_id is None:\n logger.error('Nenhum topico encontrado, retornando []')\n return HttpResponse('[]', content_type='application/json')\n\n topico_id = json.loads(topico_id)\n\n mensagens_dict = get_mensagens(topico_id)\n\n return HttpResponse(json.dumps(mensagens_dict), content_type='application/json')\n\n\ndef get_mensagens(topico_id):\n\n if topico_id:\n try:\n thread = Topico.objects.get(id=topico_id)\n except:\n return []\n\n try:\n todas_mensagens = Mensagem.objects.filter(id_topico=thread)\n except:\n return []\n\n mensagens_dict = [m.to_dict_json() for m in todas_mensagens]\n for m in mensagens_dict:\n user = m['id_usuario']\n m['usuario_nome'] = user.username\n m['data'] = str(m['data'])\n m['data'] = m['data'].split('+')[0]\n m['data'] = m['data'].split('.')[0]\n m['id_topico'] = ''\n m['id_usuario'] = user.id\n else:\n mensagens_dict = []\n\n return mensagens_dict\n\n\n@ajax_login_required\ndef deleta_mensagem(request):\n usuario_id = request.GET.get('usuario_id')\n mensagem_id = request.GET.get('mensagem_id')\n\n if usuario_id and mensagem_id:\n usuario_id = json.loads(usuario_id)\n mensagem_id = json.loads(mensagem_id)\n\n i_am = _whoami(request)['user']\n\n mensagem_dict = deletar_mensagembd(usuario_id, mensagem_id, i_am['username'])\n\n return HttpResponse(json.dumps(mensagem_dict), content_type='application/json')\n\n\ndef deletar_mensagembd(usuario_id, mensagem_id, username):\n mensagem_dict = 'Mansagem nao encontrada'\n\n try:\n user = User.objects.get(id=usuario_id)\n except:\n return 'Usuario Invalido'\n\n try:\n i_am = User.objects.get(username=username)\n except:\n return 'Voce precisa logar para ter acesso a essa funcionalidade'\n\n if i_am.id == user.id:\n try:\n mensagem_dict = _deletar(mensagem_id)\n except:\n mensagem_dict = 'Mensagem nao existe'\n elif i_am.is_superuser and user.is_superuser == False:\n try:\n mensagem_dict = _deletar(mensagem_id)\n except:\n mensagem_dict = 'Mensagem nao existe'\n else:\n mensagem_dict = 'Mensagem nao pode ser deletada por você'\n\n return mensagem_dict\n\n\ndef _deletar(mensagem_id):\n try:\n mensagem = Mensagem.objects.filter(id=mensagem_id)\n mensagem.delete()\n return 'Mensagem deletada com sucesso'\n except:\n return 'Mensagem id invalido'\n\n\n@ajax_login_required\ndef registra_mensagem(request):\n topico_id = json.loads(request.GET.get('topico_id'))\n mensagem = request.GET.get('mensagem')\n i_am = _whoami(request)['user']\n\n mensagem_dict = salva_mensagem(topico_id, mensagem, i_am['username'])\n\n return HttpResponse(json.dumps(mensagem_dict), content_type='application/json')\n\n\ndef salva_mensagem(topico_id, mensagem, username):\n\n try:\n usuario = User.objects.get(username=username)\n except:\n return 'Usuario Invalido'\n\n try:\n thread = Topico.objects.get(id=topico_id)\n except:\n return 'Topico Invalido'\n\n try:\n m = Mensagem(id_usuario=usuario, id_topico=thread, conteudo=mensagem)\n m.save()\n return \"Mensagem salva com sucesso\"\n except:\n return \"Mensagem nao pode ser salva no momento\"\n\n\n@ajax_login_required\ndef registra_topico(request):\n topico = request.GET.get('topico')\n mensagem = request.GET.get('mensagem')\n i_am = _whoami(request)['user']\n\n mensagem_topico = salva_topico(topico, mensagem, i_am['username'])\n\n return HttpResponse(json.dumps(mensagem_topico), content_type='application/json')\n\n\ndef salva_topico(topico, mensagem, username):\n if topico and mensagem:\n try:\n usuario = User.objects.get(username=username)\n except:\n return 'Esse usuario nao existe'\n\n try:\n topico_existe = Topico.objects.get(name=topico)\n return 'Topico ja existe'\n except:\n thread = Topico(name=topico, id_usuario=usuario)\n thread.save()\n\n m = Mensagem(id_usuario=usuario, id_topico=thread, conteudo=mensagem)\n m.save()\n return 'Topico adicionado com sucesso'\n else:\n return 'Existem campos em branco'\n\n\n@ajax_login_required\ndef deleta_topico(request):\n topico_id = topico = request.GET.get('topico_id')\n\n if topico_id:\n topico_id = json.loads(topico_id)\n i_am = _whoami(request)['user']\n\n mensagem_delete = deletar_topico(topico_id, i_am['username'])\n else:\n mensagem_delete = 'Topico nao existe'\n\n return HttpResponse(json.dumps(mensagem_delete), content_type='application/json')\n\n\ndef deletar_topico(topico_id, username):\n try:\n topico = Topico.objects.get(id=topico_id)\n except:\n return 'Topico invalido'\n\n try:\n i_am = User.objects.get(username=username)\n except:\n return 'Usuario Invalido'\n\n user = topico.id_usuario\n\n if i_am.id == user.id or (i_am.is_superuser and user.is_superuser == False):\n mensagens_deletadas = Mensagem.objects.filter(id_topico=topico.id)\n try:\n for m in mensagens_deletadas:\n Mensagem.objects.filter(id=m.id).delete()\n topico.delete()\n return 'Topico deletado com sucesso'\n except:\n return 'O topico nao pode ser deletado'\n else:\n return 'Topico nao pode ser deletada por você'\n\n\ndef _user2dict(user):\n return {\n 'username': user.username,\n 'name': user.first_name,\n 'permissions':{\n 'ADMIN': user.is_superuser,\n 'STAFF': user.is_staff,\n }\n }\n","sub_path":"topicos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"588906441","text":"import pandas as pd\nimport numpy as np\nimport requests, json\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import train_test_split\nfrom preprocessing import load_stop_words, lemmatize_remove_stop_words, preprocess_columns\n\ndata = pd.read_csv('data/dataset2.csv')\ncolumns_names = list(pd.read_csv('data/dataset2.csv', nrows=1).columns)\nstop_words = load_stop_words()\ntrain_dataset, test_dataset = ([] for i in range(2))\n\nchunk_size_for_test = 50\ntotal_predicted, good_predicted = (0,)*2\n\nfor x in range(len(data.columns)):\n dataset = data[data.columns[x]].dropna().apply(preprocess_columns).tolist()\n train, test = train_test_split(dataset, test_size=0.2)\n column_as_string = ' '.join(str(v) for v in train).replace('\"', '').lower()\n preprocessed_train_data = lemmatize_remove_stop_words(column_as_string, stop_words)\n train_dataset.append(preprocessed_train_data)\n #rozdel testovacie data (list) na mensie listy o velkosti \"chunk_size_for_test\" + v kazdom tomto liste sprav z elementov jeden string:\n test_chunks = [' '.join(str(v) for v in test[x:x+chunk_size_for_test]).replace('\"', '').lower() for x in range(0, len(test), chunk_size_for_test)]\n test_dataset.append(test_chunks)\n\ntext_clf_svm = Pipeline([\n ('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf-svm', SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, max_iter=5, random_state=42)),]).fit(train_dataset, columns_names)\n\nfor x in range(len(data.columns)):\n preprocessed_test_chunk_data = [lemmatize_remove_stop_words(chunk_from_column, stop_words) for chunk_from_column in test_dataset[x]]\n predicted_svm = text_clf_svm.predict(preprocessed_test_chunk_data)\n\n print(\"\\n\"+columns_names[x] + \"\\nCelkovy pocet: \" + str(len(predicted_svm)) + \"\\nSpravne: \"+ str(np.count_nonzero(predicted_svm == columns_names[x])) + \"\\nNespravne urcene: \")\n bad_predicted = [elem for elem in predicted_svm if elem != columns_names[x]]\n print({i:bad_predicted.count(i) for i in bad_predicted})\n total_predicted += len(predicted_svm)\n good_predicted += np.count_nonzero(predicted_svm == columns_names[x])\n\nprint(\"\\n\\nCelkovy pocet: \"+ str(total_predicted) + \", spravne: \" + str(good_predicted))\nprint((good_predicted/total_predicted)*100)\n","sub_path":"classifier.py","file_name":"classifier.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"166604943","text":"\n\n\n# Give some coins of different value and their quantity. Find how many values which are in range 1 ~ n can these coins be combined.\n\n# 多重背包变种\n\n\n\nclass Solution:\n \"\"\"\n @param n: the value from 1 - n\n @param value: the value of coins\n @param amount: the number of coins\n @return: how many different value\n \"\"\"\n def backPackVIII(self, n, value, amount):\n # write your code here\n # 2D-DP\n m = len(value) # number of items\n dp = [False] * (n + 1) # dp[i][j]: if can first i coins can combine j value\n dp[0] = True\n res = 0\n for i in range(m): # traverse on item\n cnt = [0] * (n + 1) # count how many times i-th item are used in combing the values from value[i] to n.\n for j in range(value[i], n + 1): # 多次放入背包,正序\n if dp[j] == False and dp[j - value[i]] and cnt[j - value[i]] < amount[i]: # j is not visited but can be combined from j - value[i] and the count does not exceed amount[i]\n dp[j] = True\n res += 1\n cnt[j] = cnt[j - value[i]] + 1\n \n return res","sub_path":"Backpack VIII.py","file_name":"Backpack VIII.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"185514218","text":"def fn():\r\n s = input().strip()\r\n\r\n def priority(c):\r\n if(c == '+' or c == '-'):\r\n return 1\r\n elif(c == '*' or c == '/'):\r\n return 2\r\n else:\r\n return 3\r\n\r\n ans = \"\"\r\n stack = []\r\n\r\n for i in s:\r\n if i.isalpha():\r\n ans += i\r\n elif(i == '('):\r\n stack.append(i)\r\n elif(i == ')'):\r\n while(stack[-1] != '('):\r\n ans+=stack.pop()\r\n stack.pop()\r\n else:\r\n while(len(stack) != 0 and stack[-1] != '(' and priority(stack[-1])>=priority(i)):\r\n ans += stack.pop()\r\n stack.append(i)\r\n\r\n while(len(stack)!=0):\r\n ans += stack.pop()\r\n \r\n print(ans)\r\n\r\n\r\n \r\nfor _ in range(int(input().strip())):\r\n fn()","sub_path":"python/Infix_to_Postfix.py","file_name":"Infix_to_Postfix.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"257749855","text":"# -*- coding: cp936 -*-\n#屏幕圆球的移动\n#主要的目的是如何确定小球能确定出边界\n\n\n#加载模块,使用SimpleGUICS2Pygame 代替 simplegui\nimport SimpleGUICS2Pygame.simpleguics2pygame as simplegui\nimport random\n#初始化\n\nwidth =800\n\nheight = 480\n\nball_radius = 20\n\ninit_pos =[width/2,height/2]\n\npositionx =random.randrange(2,10,3)\n\npositiony =random.randrange(2,10,5)\n\nvel=[positionx,positiony]\ntime = 0\n\n#事件函数\n\ndef draw(canvas):\n ball_pos = init_pos\n ball_pos[0] = init_pos[0] + 2 * vel[0]\n ball_pos[1] = init_pos[1] + 3 * vel[1]\n if ball_pos[1] > height-1-ball_radius:\n vel[1]=-vel[1]\n if ball_pos[1] < ball_radius:\n vel[1]=-vel[1]\n if ball_pos[0] < ball_radius:\n vel[0] =-vel[0]\n if ball_pos[0] > width-1-ball_radius:\n vel[0] =- vel[0]\n canvas.draw_circle(ball_pos,ball_radius,2,\"black\",\"red\")\n\n\ndef tick():\n global time\n time = time+1\n#注册事件\n\nf = simplegui.create_frame(\"motion\",width,height)\nf.set_draw_handler(draw)\nt = simplegui.create_timer(100,tick)\n\nt.start()\nf.start()\n","sub_path":"Collisions.py","file_name":"Collisions.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"447544502","text":"class URL:\n\tdef __init__(self, url):\n\t\tprorocol_link = url.split('://')\n\t\tif len(prorocol_link) == 1:\n\t\t\tprorocol_link = ['', url]\n\t\t\n\t\tprotocol, link = prorocol_link\n\t\thost, *path = link.split('/')\n\t\tpath = '/'.join(path)\n\n\t\tpath_query = path.split('?')\n\t\tif len(path_query) == 1:\n\t\t\tpath_query = [path, '']\n\n\t\tpath, query = path_query\n\t\tdomain = '.'.join(host.split('.')[-2:]).split(':')[0]\n\t\t\n\t\tself.protocol = protocol\n\t\tself.path = path\n\t\tself.domain = domain\n\t\tself.host = host\n\t\tself.query = query\n\n\t\tself.params = None\n\t\tself.__get_params()\n\n\tdef __get_params(self):\n\t\tquery = self.query\n\t\tif not query: return\n\n\t\tparams = {}\n\t\tquery = query.split('&')\n\n\t\tfor q in query:\n\t\t\tname, val = q.split('=')\n\t\t\tparams[name] = val\n\n\t\tself.params = params\n\n","sub_path":"repl-set-data/scraper/URL.py","file_name":"URL.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"28665296","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='CategoryVideo',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, verbose_name=b'Nome')),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data do Cadastro', blank=True)),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Categoria de V\\xeddeo',\n 'verbose_name_plural': 'Categorias de V\\xeddeos',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Magazine',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('number', models.IntegerField(verbose_name=b'N\\xc3\\xbamero')),\n ('file', models.FileField(upload_to=b'', verbose_name=b'Arquivo da revista')),\n ('status', models.BooleanField(default=True)),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data do Cadastro', blank=True)),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Revista',\n 'verbose_name_plural': 'Revistas',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Video',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('url', models.URLField()),\n ('embed_code', models.CharField(max_length=100)),\n ('type', models.CharField(max_length=2, verbose_name=b'Tipo', choices=[(b'0', b'Vimeo'), (b'1', b'Youtube')])),\n ('status', models.BooleanField(default=True)),\n ('created_at', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Data do Cadastro', blank=True)),\n ('category', models.ForeignKey(verbose_name=b'Categoria', to='multimidia.CategoryVideo')),\n ('user', models.ForeignKey(verbose_name=b'Usu\\xc3\\xa1rio', to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'V\\xeddeo',\n 'verbose_name_plural': 'V\\xeddeos',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"apps/multimidia/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"13934844","text":"import torch\nfrom .. import SimpleLogger\nfrom . import ae\nfrom ..utils import kl_divergence\n\nimport os\nimport pickle\n\n\nclass Model(ae.Model):\n def __init__(\n self,\n net,\n opt,\n n_epochs,\n gpu_ids,\n save_dir,\n data_provider,\n crit_recon,\n save_state_iter=1,\n save_progress_iter=1,\n beta=1,\n beta_start=1000,\n beta_iters_max=12500,\n c_max=500,\n c_iters_max=80000,\n gamma=500,\n objective=\"H\",\n kld_avg=False,\n ):\n\n super(Model, self).__init__(\n net,\n opt,\n data_provider,\n crit_recon,\n gpu_ids,\n save_dir,\n n_epochs,\n save_state_iter,\n save_progress_iter,\n )\n\n self.beta = beta\n self.beta_start = beta_start\n self.beta_iters_max = beta_iters_max\n self.kld_avg = kld_avg\n self.objective = objective\n\n logger_path = \"{}/logger.pkl\".format(save_dir)\n\n if os.path.exists(logger_path):\n self.logger = pickle.load(open(logger_path, \"rb\"))\n else:\n print_str = \"[{epoch:%d}][{iter:%d}] reconLoss: {recon_loss:%.6f} kld: {kld_loss:%.6f} total: {total_loss:%.6f} time: {time:%.2f}\"\n\n self.logger = SimpleLogger(print_str)\n\n def iteration(self):\n\n torch.cuda.empty_cache()\n\n gpu_id = self.gpu_ids[0]\n\n net = self.net\n opt = self.opt\n crit_recon = self.crit_recon\n\n # do this just incase anything upstream changes these values\n net.train(True)\n\n opt.zero_grad()\n\n x = self.data_provider.next()\n x = x.cuda(gpu_id)\n\n #####################\n # train autoencoder\n #####################\n\n # Forward passes\n x_hat, z = net(x)\n\n recon_loss = crit_recon(x_hat, x)\n\n kld, _, _ = kl_divergence(z[0], z[1])\n if self.objective == \"H\":\n beta_vae_loss = recon_loss + self.beta * kld\n elif self.objective == \"H_eps\":\n beta_vae_loss = recon_loss + torch.abs((self.beta * kld) - x.shape[0] * 0.1)\n elif self.objective == \"B\":\n C = torch.clamp(\n torch.Tensor(\n [self.c_max / self.c_iters_max * len(self.logger)]\n ).type_as(x),\n 0,\n self.c_max,\n )\n beta_vae_loss = recon_loss + self.gamma * (kld - C).abs()\n\n beta_vae_loss.backward(retain_graph=True)\n opt.step()\n\n log = {\n \"recon_loss\": recon_loss.item(),\n \"kld_loss\": kld.item(),\n \"total_loss\": beta_vae_loss.item(),\n \"z\": [e.cpu().numpy() for e in z],\n }\n\n return log\n\n def save_progress(self):\n # gpu_id = self.gpu_ids[0]\n # epoch = self.get_current_epoch()\n\n # data_provider = self.data_provider\n # net = self.net\n pass\n","sub_path":"geneselection/solvers/bvae.py","file_name":"bvae.py","file_ext":"py","file_size_in_byte":3001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"164284373","text":"import shutil\nimport random\nimport glob\nimport datetime\nimport os\n\ndef combine_img(path, org_dir_name_1, org_dir_name_2, new_dir_name):\n \"\"\"\n 複数ディレクトリ内の画像を一つのディレクトリに移動する\n \"\"\"\n filepathP = path + org_dir_name_1\n filepathN = path + org_dir_name_2\n fileListP = sorted(glob.glob(filepathP + \"/*.jpg\"))\n fileListN = sorted(glob.glob(filepathN + \"/*.jpg\"))\n\n now = datetime.datetime.now()\n time_seed = now.timestamp()\n random.seed(time_seed)\n\n if not os.path.exists(path + new_dir_name):\n os.makedirs(path + new_dir_name)\n for p in fileListP :\n\n shutil.move(p, path + new_dir_name)\n\n for n in fileListN :\n shutil.move(n, path + new_dir_name)\n\n","sub_path":"edit_image/combine_img.py","file_name":"combine_img.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"200217799","text":"from collections import deque\n\nn = int(input())\nisland = [list(map(int, input().split())) for _ in range(n)]\n\ncnt = 0\n\nQ = deque()\n\ndy = [0, 0, 1, -1, 1, -1, 1, -1]\ndx = [1, -1, 0, 0, 1, 1, -1, -1]\n\nfor i in range(n):\n for j in range(n):\n if island[i][j] == 1:\n island[i][j] = 0\n Q.append((i,j))\n while Q:\n tmp = Q.popleft()\n for k in range(8):\n x = tmp[0] + dx[k]\n y = tmp[1] + dy[k]\n if 0 <= x < n and 0 <= y < n and island[x][y] == 1:\n island[x][y] = 0\n Q.append((x,y))\n cnt += 1\nprint(cnt)","sub_path":"sec07-dfs-and-bfs/13_섬나라_아일랜드(BFS).py","file_name":"13_섬나라_아일랜드(BFS).py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"35112817","text":"\"\"\"\nAuthor:goblinM\nDate:2020-01-15\nDescribe:yaml数据解析\n\"\"\"\nimport os\n\nimport yaml\n# current_path = os.path.abspath(\".\")\ncurrent_path = os.path.dirname(os.path.abspath(__file__))\nmultiple_yaml_path = os.path.join(current_path, \"config.yaml\")\nsingle_yaml_path = os.path.join(current_path, \"single_config.yaml\")\n\n\nclass YamlMethods(object):\n\n def open_read_file(self, path=single_yaml_path):\n \"\"\"打开文件\"\"\"\n with open(path, 'r', encoding='utf-8') as f:\n file_data = f.read()\n return file_data\n\n def open_write_file(self, path):\n \"\"\"写入文件\"\"\"\n f = open(path, 'w', encoding='utf-8')\n return f\n\n def single_yaml_load(self):\n \"\"\"解析单个yaml文档\"\"\"\n file_data = self.open_read_file(single_yaml_path)\n data = yaml.safe_load(file_data)\n print(\"type:\", type(data))\n print(data)\n return data\n\n def multiple_yaml_load(self):\n \"\"\"解析多个yaml文档\"\"\"\n file_data = self.open_read_file(multiple_yaml_path)\n data_list = yaml.safe_load_all(file_data)\n print(\"type:\", type(data_list))\n for data in data_list:\n print(data)\n return data_list\n\n def non_standard_yaml_dump(self):\n \"\"\"dump生成不一定标准的yaml文件 safe_dump 生成标准文档\"\"\"\n py_object = {'school': 'zhang',\n 'students': ['a', 'b']}\n file = self.open_write_file(os.path.join(current_path, \"output_non_standard_config.yaml\"))\n yaml.safe_dump(py_object, file)\n\n def standard_yaml_dump(self):\n \"\"\"ruamel 生成标准的yaml文件\"\"\"\n from ruamel import yaml\n py_object = {'school': 'zhang',\n 'students': ['a', 'b']\n }\n file = self.open_write_file(os.path.join(current_path, \"output_standard_config.yaml\"))\n yaml.dump(py_object, file, Dumper=yaml.RoundTripDumper)\n\n\nif __name__ == '__main__':\n y = YamlMethods()\n # y.non_standard_yaml_dump()\n # y.standard_yaml_dump()\n y.single_yaml_load()\n # y.multiple_yaml_load()\n\n","sub_path":"test_page_object/data_layer/analysis_data.py","file_name":"analysis_data.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"489623038","text":"import tweepy\n\nkeys = []\n\nwith open(\"keys.gitignore\", \"r\") as f:\n for line in f:\n keys.append(line[:-1])\n f.close()\n\nconsumer_key = keys[0]\nconsumer_secret = keys[1]\naccess_key = keys[2]\naccess_secret = keys[3]\n\ndef authorize_account(consumer_key = consumer_key, consumer_secret = consumer_secret,\n access_key = access_key, access_secret = access_secret):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_key, access_secret)\n return tweepy.API(auth)\n \nif __name__ == \"__main__\":\n twitter_account = authorize_account()\n twitter_account.update_status(\"Teste teste test.\")\n","sub_path":"auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"108554439","text":"import os\nfrom .common import *\n\n# Debug settings\nDEBUG = False\n\n### Add any site-specific Kiwi settings below this line\n# for more information about available settings see\n# http://kiwitcms.readthedocs.io/en/latest/configuration.html\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'change-me'\n\n\n# Administrators error report email settings\nADMINS = [\n # ('Your Name', 'your_email@example.com'),\n]\n\n\n### DO NOT CHANGE THE SETTINGS BELOW\n\n# provides filename versioning\nSTATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'\n\n# indicate that this is the Enterprise Edition version\nKIWI_VERSION = \"%s-ee\" % KIWI_VERSION\n","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"236494183","text":"\r\n#load Dataset\r\nimport pandas as pd\r\nfrom wordcloud import WordCloud\r\nimport matplotlib.pyplot as pl\r\n\r\ndef load_data():\r\n data_=pd.read_csv('post_vaccination_tweets_final.csv')\r\n return data_\r\n\r\ndata__=load_data()\r\ndf_twitter= pd.DataFrame(data__)\r\n\r\ndef wordcloud_plot(wordcloud):\r\n pl.figure(figsize=(20,20))\r\n pl.imshow(wordcloud)\r\n pl.axis('off')\r\nmy_string=[]\r\nfor text in df_twitter['text']:\r\n my_string.append(text)\r\n \r\n \r\n \r\n \r\nmy_string=pd.Series(my_string).str.cat(sep=' ') \r\nwordcloud=WordCloud(width=1000,height=500).generate(my_string)\r\nwordcloud_plot(wordcloud)","sub_path":"word cloud visulization.py","file_name":"word cloud visulization.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"527784358","text":"import tkinter as tk\nfrom test import *\nfrom tkinter import *\nimport os\n\nclass Redirect():\n\n def __init__(self, widget):\n self.widget = widget\n\n def write(self, text):\n self.widget.insert('end', text)\n\n # some widget may need it\n #def flush(self):\n # pass\n\ntext = tk.Text(root)\ntext.pack()\n\n# keep original stdout\nold_stdout = sys.stdout \n\n# assing Redirect with widget Text \nsys.stdout = Redirect(text)\n\nroot.mainloop()\n\n# assign back original stdout (if you need it)\nsys.stdout = old_stdout\n","sub_path":"Python_GUI/teste/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"205099832","text":"import os\n\ndef file_input(x, y):\n os.system(\"mkdir a%03d_%03d\"%(x,y))\n os.system(\"cp * ./a%03d_%03d\"%(x,y))\n\ndef change_itp(x, y):\n os.chdir(\"./a%03d_%03d\"%(x,y))\n f = open(\"or.itp\", 'r')\n o = open(\"ethanol.itp\", 'w')\n \n a = 1\n\n for i in f:\n if a == 8:\n sigma = float(i[45:52])\n epsilin = float(i[53:64])\n o.write(i[:45]+'%7.4f'%(sigma*x/100)+'%12.4f'%(epsilin*y/100)+'\\n')\n #if a == 11:\n # sigma = float(i[45:52])\n # epsilin = float(i[53:64])\n # o.write(i[:45]+'%7.4f'%(sigma*x/100)+'%12.4f'%(epsilin*y/100)+'\\n')\n elif a == 12:\n sigma = float(i[45:52])\n epsilin = float(i[53:64])\n o.write(i[:45]+'%7.4f'%(sigma*x/100)+'%12.4f'%(epsilin*y/100)+'\\n')\n else:\n o.write(i)\n a += 1\n os.chdir(\"../\")\n\nfor a in range(95,106):\n for b in range(95,106):\n file_input( a , b )\n change_itp(a , b)\n","sub_path":"arc/linux_tools/scan/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"617897126","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport csv\nimport sys\n\nargs = sys.argv\n\ndef main():\n # 元データの数dataNと,0を付け足したデータ数N\n dataN = 2500\n N = 4096\n\n # サンプリング間隔(s)\n dt = 0.01\n\n #時間軸と周波数軸 周波数は単位を[kHz]にするため1000で割る\n t = np.arange(0, N*dt, dt)\n freq = np.linspace(0, 1.0/dt, N)/1000\n\n #csvデータ読み込み用配列\n data = []\n\n csv_file = args[1]\n\n with open(csv_file) as f:\n reader = csv.reader(f)\n data = []\n for row in reader:\n data = data + [row]\n #print('DATA NUM ='+str(len(data)))\n\n # データ末尾に0を付け足す\n for i in range(N-dataN):\n data.append(['0','0'])\n #print('CHANGED DATA NUM ='+str(len(data)))\n\n #文字列を実数に直して,numpy配列に入れ,転置する\n f = np.array([[float(s2) for s2 in s] for s in data])\n f2 = f.T\n\n # 高速フーリエ変換\n Fx = np.fft.fft(f2[0])\n Fy = np.fft.fft(f2[1])\n #print(Fx.size)\n\n # 振幅スペクトルを計算\n Ampx = np.abs(Fx) / dataN/10 * 2\n Ampy = np.abs(Fy) / dataN/10 * 2\n\n for i in range(Fx.size):\n print(freq[i],',',Ampx[i],',',Ampy[i])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"vol2deg_FFT/FFT.py","file_name":"FFT.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"639788369","text":"import os, sys\nimport wx\nimport wx.lib.agw.multidirdialog as MDD\nimport pandas as pd\nfrom geopy import Point\nfrom geopy.distance import vincenty\nfrom shapely.geometry.multipolygon import MultiPolygon\nfrom shapely import wkt\nfrom shapely.ops import cascaded_union\nfrom itertools import combinations\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport pyproj \nimport shapely\nimport shapely.ops as ops\nfrom shapely.geometry.polygon import Polygon\nfrom functools import partial\nfrom math import sin, cos, asin, sqrt, degrees, radians\nfrom shapely.geometry.polygon import LinearRing\nfrom matplotlib import pyplot as fig\n\n#Global Variable Declaration\n#directory1 --> \"Upload opr1\" ; directory2 --> \"Upload opr1\"; directory3 --> \"Upload opr2\"; directory4 --> \"Upload opr2\"\ndirectory1=\"\"\ndirectory2=\"\"\ndirectory3=\"\"\ndirectory4=\"\"\n\n#filex stores the file name selected coresponding to directoryx\nfile1=\"\"\nfile2=\"\"\nfile3=\"\"\nfile4=\"\"\n\n#sectorName is string being Typed in textBox of Sector name\nsectorName=\"\"\n\n#list used for generating multiplygon Data\nlstii=[]\nlstww=[]\nmaxlst=[]\ninitialv=0\nfinalv=0\n\n#dfpd --> \"Physical Data VF\"; dfv1 --->\"UE-2.1 Voda LTE time Mean Data\"; dfv2 --->\"UE-5 Voda LTE time Mean Data\"\n#df--> \"Mapped Data of LTE time Data + Physical Data\";\n#dfIND--> \"Individual Sector Analysis of the entered Sector Name\"\n#dfUE -->\"UE% Left in each bucket of every sector, See-UE%_Data.xlsx\"\n#dfUE_Final -->\"Overall UE% Left every sector, See-UE%_Data_.xlsx\"\n\ndfpd=pd.DataFrame()\ndfv1=pd.DataFrame()\ndfv2=pd.DataFrame()\ndf=pd.DataFrame()\ndfInd=pd.DataFrame()\ndfUE_Final=pd.DataFrame()\ndfUE=pd.DataFrame()\n\nwildcard = \"Excel Sheet (*.xlsx)|*.xlsx|\" \\\n \"All files (*.*)|*.*\"\n\n#MyForm class calls the Framework \nclass MyForm(wx.Frame):\n from shapely.geometry.multipolygon import MultiPolygon\n\n def __init__(self):\n wx.Frame.__init__(self, None, wx.ID_ANY, title='Nokia Frame-Work',size=wx.Size(450, 100))\n \n # Add a panel so it looks correct on all platforms\n self.panel = wx.Panel(self, wx.ID_ANY)\n self.currentDirectory = os.getcwd()\n \n#Defining componentes of Framework i.e., the buttons,TextBox,Static Text\n bmp = wx.ArtProvider.GetBitmap(wx.ART_INFORMATION, wx.ART_OTHER, (16, 16))\n titleIco = wx.StaticBitmap(self.panel, wx.ID_ANY, bmp)\n title = wx.StaticText(self.panel, wx.ID_ANY, 'Frame-Work Info')\n\n title11 = wx.StaticText(self.panel, wx.ID_ANY, 'Upload Time Raw Data')\n openFileDlgBtn1 = wx.Button(self.panel,-1, label=\"Upload Opr1\")\n openFileDlgBtn1.Bind(wx.EVT_BUTTON, self.onOpenFile1)\n \n title12 = wx.StaticText(self.panel, wx.ID_ANY, 'Upload Physical Data ')\n openFileDlgBtn2 = wx.Button(self.panel,-1, label=\"Upload Opr1\")\n openFileDlgBtn2.Bind(wx.EVT_BUTTON, self.onOpenFile2)\n\n openFileDlgBtn11 = wx.Button(self.panel,-1, label=\"Upload Opr2\")\n openFileDlgBtn11.Bind(wx.EVT_BUTTON, self.onOpenFile11)\n \n openFileDlgBtn22 = wx.Button(self.panel,-1, label=\"Upload Opr2\")\n openFileDlgBtn22.Bind(wx.EVT_BUTTON, self.onOpenFile22)\n\n\n\n Btitle1 = wx.StaticText(self.panel, wx.ID_ANY, 'Parse(Upload File First)* ')\n genBtn1 = wx.Button(self.panel, wx.ID_ANY, 'Parse Opr1')\n self.Bind(wx.EVT_BUTTON, self.onClickParse, genBtn1)\n genBtn11 = wx.Button(self.panel, wx.ID_ANY, 'Parse Opr2')\n self.Bind(wx.EVT_BUTTON, self.onClickParse, genBtn11)\n \n Btitle2 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate Multi-Polygon Data')\n genBtn2 = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen1, genBtn2)\n\n Btitle3 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate UE% Left Data')\n genBtn3 = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen2, genBtn3)\n\n Btitle4 = wx.StaticText(self.panel, wx.ID_ANY, 'Generate Final Coverage Data')\n genBtn4 = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen3, genBtn4)\n \n para1 = wx.StaticText(self.panel, wx.ID_ANY, 'Get Overlap Data of LMBTS With Neighbour Site')\n\n \n l1 = wx.StaticText(self.panel, -1, \"Sector Name\")\n self.t1 = wx.TextCtrl(self.panel)\n self.t1.Bind(wx.EVT_TEXT,self.OnKeyTyped) \n\n okBtn = wx.Button(self.panel, wx.ID_ANY, 'Generate')\n self.Bind(wx.EVT_BUTTON, self.onGen4, okBtn)\n \n #Putting components into BoxSizer which is basically a layout\n topSizer = wx.BoxSizer(wx.VERTICAL)\n titleSizer = wx.BoxSizer(wx.HORIZONTAL)\n uploadSizer1 = wx.BoxSizer(wx.HORIZONTAL)\n uploadSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer1 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer2 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer3 = wx.BoxSizer(wx.HORIZONTAL)\n genSizer4 = wx.BoxSizer(wx.HORIZONTAL)\n paratextSizer = wx.BoxSizer(wx.HORIZONTAL)\n btnSizer = wx.BoxSizer(wx.HORIZONTAL)\n \n#Adding Corresponding Component to there boxes\n titleSizer.Add(title, 0, wx.ALL, 5)\n titleSizer.Add(titleIco, 0, wx.ALL, 5)\n \n \n uploadSizer1.Add(title11,0, wx.ALL, 5)\n uploadSizer1.Add(openFileDlgBtn1,0, wx.ALL, 5)\n uploadSizer1.Add(openFileDlgBtn11,0, wx.ALL, 5)\n\n uploadSizer2.Add(title12,0, wx.ALL, 5)\n uploadSizer2.Add(openFileDlgBtn2,0, wx.ALL, 5)\n uploadSizer2.Add(openFileDlgBtn22,0, wx.ALL, 5)\n\n genSizer1.Add(Btitle1,0, wx.ALL, 5)\n genSizer1.Add(genBtn1,0, wx.ALL, 5)\n genSizer1.Add(genBtn11,0, wx.ALL, 5)\n\n genSizer2.Add(Btitle2,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n genSizer2.Add(genBtn2,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n\n genSizer3.Add(Btitle3,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n genSizer3.Add(genBtn3,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n\n genSizer4.Add(Btitle4,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n genSizer4.Add(genBtn4,1, wx.ALIGN_RIGHT|wx.ALL, 5)\n\n paratextSizer.Add(para1, 0, wx.ALL, 5)\n\n btnSizer.Add(l1, 1, wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)\n btnSizer.Add(self.t1,1,wx.EXPAND|wx.ALIGN_LEFT|wx.ALL,5)\n btnSizer.Add(okBtn, 0, wx.ALL, 5)\n\n \n topSizer.Add(titleSizer, 0, wx.CENTER)\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n topSizer.Add(uploadSizer1, 0, wx.CENTER)\n topSizer.Add(uploadSizer2, 0, wx.CENTER)\n topSizer.Add(genSizer1, 0, wx.RIGHT)\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5)\n\n topSizer.Add(genSizer2, 0, wx.CENTER)\n topSizer.Add(genSizer3, 0, wx.CENTER)\n topSizer.Add(genSizer4, 0, wx.CENTER)\n topSizer.Add(wx.StaticLine(self.panel), 0, wx.ALL|wx.EXPAND, 5) \n topSizer.Add(paratextSizer, 0, wx.CENTER)\n topSizer.Add(btnSizer, 0, wx.ALL|wx.CENTER, 5)\n \n # SetSizeHints(minW, minH, maxW, maxH)\n self.SetSizeHints(250,300,500,400)\n self.panel.SetSizer(topSizer)\n topSizer.Fit(self)\n\n#Framework Layout is done, now defining there function which will be called\n#First \"Generate\" button --> onGen1;\n#Second \"Generate\" button --> onGen2;\n#Third \"Generate\" button --> onGen3;\n#Fourth \"Generate\" button --> onGen4;\n\n#onGen1 Creates File \"Multipolygon.xlsx\" which would be input for further \"Generate\" Button\n def onGen1(self, event):\n print(\"Wait Code running...\")\n global directory1,sectorName,lstii,lstww,maxlst,initialv,finalv,dfpd,dfv1,dfv2,df,dfInd,dfUE_Final,dfUE \n\n#Reading 'Parsed.xlsx' file generated\n os.chdir(directory1)\n file = 'Parsed.xlsx'\n xl= pd.ExcelFile(file)\n dfv1=xl.parse(xl.sheet_names[0])\n dfv2=xl.parse(xl.sheet_names[1])\n\n initialv=dfv1.columns.get_loc(\"% UEs with distance to base station in the range of 0-78m in 2.1km cells\")\n finalv=dfv1.columns.get_loc(\"% UEs with distance to base station in the range of 2262-3000m in 2.1km cells\")\n\n#Generating Data for voda UE-2.1\n dfg11=self.onGen1Fun(dfv1)\n\n if(dfv2.columns.get_loc(\"% UEs with distance to base station in the range of 4.8-6km in 5km cells\")>finalv):\n finalv=dfv2.columns.get_loc(\"% UEs with distance to base station in the range of 4.8-6km in 5km cells\")\n\n#Generating Data for voda UE-5\n dfg12=self.onGen1Fun(dfv2)\n \n#Merging Multipolygon Data into df\n df=pd.concat([dfg11,dfg12],axis=0)\n newfile=directory1+\"\\Multipolygon.xlsx\"\n writer = pd.ExcelWriter(newfile)\n df.to_excel(writer,sheet_name=\"Sheet-1\",index=False)\n writer.save()\n#Cleaing df dataframe to avoid append of data while multi-click \n df.iloc[0:0]\n print ('Done! Check File at Source Destination..')\n \n\n \n def onGen2(self, event):\n print(\"Wait Code running...\")\n global directory1,directory2,directory3,directory4,file1,file2,file3,file4,sectorName,lstii,lstww,maxlst,initialv,finalv,dfpd,dfv1,dfv2,df,dfInd,dfUE_Final,dfUE\n \n#Reading Recent 'Multipolygon.xlsx' generated\n os.chdir(directory1)\n file = 'Multipolygon.xlsx'\n xl= pd.ExcelFile(file)\n df=xl.parse(xl.sheet_names[0])\n \n#Defining header of dfUE dataframe\n stri=\"\"\n strj=\"\"\n clm=['Sector Name','Intersecting Sector List']\n\n for a in range(initialv,finalv+1):\n clm.append('PercentLeftFrominitial in '+'Polygon'+str(a-initialv+1))\n clm.append('PolygonLeftFrominitial in '+'Polygon'+str(a-initialv+1))\n dfUE = pd.DataFrame(columns=clm)\n\n#iterate over each sector polygen and intersect it will rest other Sector\n for i in range(len(df)): \n stri='POLYGON'+lstww[i][len(lstww[i])-1]\n pi = wkt.loads(stri)\n mi = self.MultiPolygon([pi]) \n j=0 \n lst=[]\n strw=[] \n percent_remaining=[]\n poly_r=[]\n for f in range(maxlst[i]):\n strw.append(lstww[i][f])\n percent_remaining.append(100)\n\n area_ini=[]\n for f in range(maxlst[i],finalv-initialv+1):\n percent_remaining.append('NA')\n for f in range(maxlst[i]): \n strini='POLYGON'+strw[f]\n p_ini = wkt.loads(strini)\n m_ini = self.MultiPolygon([p_ini])\n poly_r.append(m_ini)\n coordinates_arrayi=[]\n for pol in m_ini:\n coordinates_arrayi = np.asarray(pol.exterior.coords)\n\n area_ini.append(self.initial_area(coordinates_arrayi))\n\n alt=[]\n alt1=[]\n alt.append(df.iat[i,1])\n alt1.append(df.iat[i,1])\n\n#Condition for intersection --> intersite distance distance <= sum of cell size determined by cummulative sum to 95% of both sector\n#Condition for intersection --> Then check if their outer boundry intersect\n#If yes, intersect each bucket of polygen with the outer boundry.\n for j in range(len(df)):\n if(j!=i and df.iat[i,0]!=df.iat[j,0]):\n d=self.dist(float(df.iat[i,2]),float(df.iat[i,3]),float(df.iat[j,2]),float(df.iat[j,3]))\n if(d<(df.iat[i,5]+df.iat[j,5])/1000):\n strj='POLYGON'+lstww[j][len(lstww[j])-1]\n pj = wkt.loads(strj)\n mj = self.MultiPolygon([pj])\n for pol in mi:\n for pol2 in mj:\n if (pol.intersects(pol2)==True) :\n lst.append(df.iat[j,1])\n for t in range(maxlst[i]):\n str1='POLYGON'+strw[t]\n try:\n p1 = wkt.loads(str1)\n m1 = self.MultiPolygon([p1])\n for pol in m1:\n for pol2 in mj:\n if (pol.intersects(pol2)==True and self.initial_area(self.get_array(m1))!=0 and self.initial_area(self.get_array(mj))!=0) :\n try:\n polygon_remaining=self.get_polyg(m1,mj)\n \n if(polygon_remaining.is_empty):\n percent_remaining[t]=0\n strw[t]=\"((0 0,0 0,0 0,0 0))\"\n string='POLYGON'+strw[t]\n p12 = wkt.loads(string)\n polygon_remaining = self.MultiPolygon([p12])\n poly_r[t]=polygon_remaining\n \n else:\n coordinate_remaining=self.get_array(polygon_remaining)\n # area_initial=self.initial_area(coordinate_initial)\n area_remaining=self.remaining_area(coordinate_remaining)\n percent_remaining[t]=area_remaining/area_ini[t]*100\n poly_r[t]=polygon_remaining\n y=0\n strw[t]=\"((\"\n while(y!=len(coordinate_remaining)-1):\n if(coordinate_remaining[y]==coordinate_remaining[y+1] and y==0):\n strw[t]=strw[t]\n elif(coordinate_remaining[y]==coordinate_remaining[y+1] and y==len(coordinate_remaining)-2):\n strw[t]=strw[t] \n elif(coordinate_remaining[y]==coordinate_remaining[y+1]):\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\")\"+\",\"+\"(\"\n else:\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\",\"\n y=y+1\n strw[t]=strw[t]+str(coordinate_remaining[len(coordinate_remaining)-1][0])+\" \"+str(coordinate_remaining[len(coordinate_remaining)-1][1])\n strw[t]=strw[t]+\"))\"\n \n except:\n arr_remaining=self.get_polyg1(m1,mj)\n if(len(arr_remaining)<=2):\n percent_remaining[t]=0 \n strw[t]=\"((0 0,0 0,0 0,0 0))\"\n string='POLYGON'+strw[t]\n p12 = wkt.loads(string)\n polygon_remaining = self.MultiPolygon([p12])\n poly_r[t]=polygon_remaining\n # alt.append(0)\n else:\n polygon_remaining=self.MultiPolygon(arr_remaining)\n coordinate_remaining=self.get_array(polygon_remaining)\n # area_initial=self.initial_area(coordinate_initial)\n area_remaining=self.remaining_area(coordinate_remaining)\n percent_remaining[t]=area_remaining*100/area_ini[t]\n poly_r[t]=polygon_remaining\n y=0\n strw[t]=\"((\"\n while(y!=len(coordinate_remaining)-1):\n if(coordinate_remaining[y]==coordinate_remaining[y+1] and y==0):\n strw[t]=strw[t]\n elif(coordinate_remaining[y]==coordinate_remaining[y+1] and y==len(coordinate_remaining)-2):\n strw[t]=strw[t] \n elif(coordinate_remaining[y]==coordinate_remaining[y+1]):\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\")\"+\",\"+\"(\"\n else:\n strw[t]=strw[t]+str(coordinate_remaining[y][0])+\" \"+str(coordinate_remaining[y][1])+\",\"\n y=y+1\n strw[t]+str(coordinate_remaining[len(coordinate_remaining)-1][0])+\" \"+str(coordinate_remaining[len(coordinate_remaining)-1][1])\n strw[t]=strw[t]+\"))\"\n else:\n percent_remaining[t]=percent_remaining[t]\n except:\n percent_remaining[t]=percent_remaining[t]\n for t in range(maxlst[i],finalv-initialv+1):\n percent_remaining[t]=percent_remaining[t]\n alt.append(lst)\n for t in range(maxlst[i]):\n\n if(poly_r[t].is_empty==False):\n coordinate_remaining=self.get_array(poly_r[t])\n area_remaining=self.remaining_area(coordinate_remaining)\n if(ifinalv):\n finalv=dfv2.columns.get_loc(\"% UEs with distance to base station in the range of 4.8-6km in 5km cells\")\n\n \n#Summing up all UE% in all bucket of LNBTS name.\n clm=['Sector Name','Final Coverage']\n dfUE_Final = pd.DataFrame(columns=clm)\n print(\"dfUE\"+str(len(dfUE)))\n for i in range(len(dfUE)):\n \n alt=[]\n alt.append(dfUE.iat[i,0])\n t=2\n sum=0\n \n while(str(dfUE.iat[i,t])!='nan' and t<2*(finalv-initialv+1)):\n \n sum=sum+dfUE.iat[i,t]\n t=t+2\n alt.append(sum)\n \n k=pd.Series(alt,index=clm)\n dfUE_Final=dfUE_Final.append(k, ignore_index=True)\n \n newfile=directory1+\"\\\\UE%_Data_final.xlsx\"\n writer = pd.ExcelWriter(newfile)\n dfUE_Final.to_excel(writer,sheet_name=\"Sheet-1\",index=False)\n writer.save()\n print ('Done! Check File at Source Destination..')\n dfUE_Final.iloc[0:0]\n \n \n#onGen4 generate individual Report of entered Sector name. It shows which all sector cut how much UE% in each bucket of entered sector name \n def onGen4(self, event):\n global directory1,directory2,directory3,directory4,file1,file2,file3,file4,sectorName,lstii,lstww,maxlst,initialv,finalv,dfpd,dfv1,dfv2,df,dfInd,dfUE_Final,dfUE\n os.chdir(directory1)\n\n for i in range(len(df)):\n if(df.iat[i,1]==sectorName):\n index=i\n break\n \n stri=\"\"\n strj=\"\"\n \n#generating header of dataframe\n clm=['Sector Name','Co-Sector Name']\n for a in range(initialv,finalv+1):\n clm.append('PercentLeftFrominitial in '+'Polygon'+str(a-initialv+1))\n dfInd = pd.DataFrame(columns=clm)\n c=0\n i=0\n i=index \n\n stri='POLYGON'+lstww[i][len(lstww[i])-1]\n pi = wkt.loads(stri)\n mi = self.MultiPolygon([pi]) \n j=0 \n strw=[] \n percent_remaining=[]\n poly_r=[]\n for f in range(maxlst[i]):\n strw.append(lstww[i][f])\n percent_remaining.append(100)\n\n area_ini=[]\n for f in range(maxlst[i],finalv-initialv+1):\n percent_remaining.append(\"NA\")\n for f in range(maxlst[i]): \n strini='POLYGON'+strw[f]\n p_ini = wkt.loads(strini)\n m_ini = self.MultiPolygon([p_ini])\n poly_r.append(m_ini)\n coordinates_arrayi=[]\n for pol in m_ini:\n coordinates_arrayi = np.asarray(pol.exterior.coords)\n\n area_ini.append(self.initial_area(coordinates_arrayi))\n\n\n\n#iterating over every sector to check sectors which are intersecting on the same condition above\n for j in range(len(df)):\n c=0\n for e in range(maxlst[i]):\n if(percent_remaining[e]!=0):\n c=c+1\n if(j!=i and df.iat[i,0]!=df.iat[j,0] and c!=0):\n d=self.dist(float(df.iat[i,2]),float(df.iat[i,3]),float(df.iat[j,2]),float(df.iat[j,3]))\n if(d<(df.iat[i,5]+df.iat[j,5])/1000):\n strj='POLYGON'+lstww[j][len(lstww[j])-1]\n pj = wkt.loads(strj)\n mj = self.MultiPolygon([pj])\n for pol in mi:\n for pol2 in mj:\n if (pol.intersects(pol2)==True):\n alt=[]\n alt.append(df.iat[i,1])\n alt.append(df.iat[j,1])\n for t in range(maxlst[i]):\n str1='POLYGON'+strw[t]\n p1 = wkt.loads(str1)\n m1 = self.MultiPolygon([p1])\n for pol in m1:\n for pol2 in mj:\n if (pol.intersects(pol2)==True and self.initial_area(self.get_array(m1))!=0 and self.initial_area(self.get_array(mj))!=0) :\n try:\n polygon_remaining=self.get_polyg(m1,mj)\n if(polygon_remaining.is_empty):\n alt.append(0) \n percent_remaining[t]=0 \n strw[t]=\"((0 0,0 0,0 0,0 0))\"\n string='POLYGON'+strw[t]\n p12 = wkt.loads(string)\n polygon_remaining = self.MultiPolygon([p12])\n poly_r[t]=polygon_remaining\n else:\n coordinate_remaining=self.get_array(polygon_remaining)\n area_remaining=self.remaining_area(coordinate_remaining)\n percent_remaining[t]=area_remaining/area_ini[t]*100\n if(i97):\n h=h+1\n break\n elif(sum+dfv1.iat[i,l]<97):\n sum=sum+dfv1.iat[i,l]\n h=h+1\n else:\n break\n maxlst.append(h)\n dv=[]\n dv=self.get_rangeArray(dfv1)\n alt.append(dv[h-1])\n m=initialv+h \n\n\n multipolyg=[]\n arr1=[]\n string1=\"\"\n\n for j in range(initialv,m):\n uplst=self.uplist(dv[j-initialv],lat1,lon1,dfv1.iat[i,11]) \n rightlst=self.rightlist(dv[j-initialv],lat1,lon1,dfv1.iat[i,11]-(42+j*3/(finalv-initialv)))\n leftlst=self.leftlist(dv[j-initialv],lat1,lon1,dfv1.iat[i,11]+(42+j*3/(finalv-initialv)))\n arr1=[(lat1,lon1),(leftlst[0],leftlst[1]),(uplst[0],uplst[1]),(rightlst[0],rightlst[1])]\n\n string1=\"((\"\n for p in range(len(arr1)):\n string1=string1+str(arr1[p][0])+\" \"+str(arr1[p][1])+\",\"\n\n string1=string1+str(arr1[0][0])+\" \"+str(arr1[0][1])\n string1=string1+\"))\" \n string='POLYGON'+string1\n p = wkt.loads(string)\n m1 = self.MultiPolygon([p])\n\n multipolyg.append(m1)\n\n if(j==initialv):\n\n alt.append(m1)\n lsti.append(arr1)\n lstw.append(string1)\n else:\n\n poly_remain=self.get_polyg(multipolyg[j-initialv],multipolyg[j-initialv-1])\n alt.append(poly_remain) \n arr=self.get_array(poly_remain)\n\n lsti.append(arr)\n string=\"((\"\n for p in range(4):\n string=string+str(arr[p][0])+\" \"+str(arr[p][1])+\",\"\n string=string+str(arr[4][0])+\" \"+str(arr[4][1])+\")\"+\",\"+\"(\"\n for p in range(5,len(arr)-1):\n string=string+str(arr[p][0])+\" \"+str(arr[p][1])+\",\"\n string=string+str(arr[0][0])+\" \"+str(arr[0][1])\n string=string+\"))\" \n lstw.append(string)\n for o in range(m,finalv+1):\n alt.append(0)\n lsti.append(arr1)\n lstw.append(string1)\n lstii.append(lsti)\n lstww.append(lstw)\n alt.append(m1)\n\n s=pd.Series(alt,index=clm)\n df=df.append(s, ignore_index=True) \n return df\n\n#returns multipolygens\n def poly(self,string):\n stri='POLYGON'+string\n p = wkt.loads(stri)\n m1 = self.MultiPolygon([p])\n return(m1)\n\n#return part of pol1 which is non-overlapping\n def get_polyg(self,m1,m2):\n outmulti=[]\n for pol1 in m1:\n for pol2 in m2:\n if pol1.intersects(pol2)==True:\n nonoverlap = (pol1.symmetric_difference(pol2)).difference(pol2)\n outmulti.append(nonoverlap)\n else:\n outmulti.append(pol1)\n finalpol = self.MultiPolygon(outmulti)\n return finalpol\n\n#return array form of part of pol1 which is non-overlapping\n def get_polyg1(self,m1,m2):\n outmulti=[]\n for pol1 in m1:\n for pol2 in m2:\n if pol1.intersects(pol2)==True:\n pol1=pol1.buffer(0)\n pol2=pol2.buffer(0)\n nonoverlap = (pol1.symmetric_difference(pol2)).difference(pol2) \n outmulti.append(nonoverlap)\n else: \n outmulti.append(pol1)\n return outmulti\n\n#return array of given multipolygon\n def get_array(self,m1):\n interior_coords = []\n exterior_coords=[]\n coordinates_initial=[]\n for pol in m1:\n exterior_coords= pol.exterior.coords[:] \n for interior in pol.interiors:\n interior_coords += interior.coords[:]\n for i in range(len(exterior_coords)):\n coordinates_initial.append(exterior_coords[i])\n for i in range(len(interior_coords)):\n coordinates_initial.append(interior_coords[i]) \n\n return coordinates_initial\n\n#return area of given array of polygon\n def initial_area(self,coordinate_initial):\n geom = Polygon(coordinate_initial)\n geom_area = ops.transform(\n partial(\n pyproj.transform,\n pyproj.Proj(init='EPSG:32643'),\n pyproj.Proj(\n proj='aea',\n lat1=geom.bounds[0],\n lat2=geom.bounds[2])),\n geom)\n area1=geom_area.area\n return (area1) \n\n#return area of given array of polygon\n def remaining_area(self,coordinate_remaining):\n geom = Polygon(coordinate_remaining)\n geom_area = ops.transform(\n partial(\n pyproj.transform,\n pyproj.Proj(init='EPSG:32643'),\n pyproj.Proj(\n proj='aea',\n lat1=geom.bounds[0],\n lat2=geom.bounds[2])),\n geom)\n area2=geom_area.area\n return (area2) \n\n#return outer distance of each range in raw data\n def get_rangeArray(self,df):\n dis=[]\n clm=df.columns\n idx1=idx2=0\n for i in range (len(clm)):\n if (clm[i]=='Avg UE distance'):\n idx1=i\n if (clm[i]=='Sector Name'):\n idx2=i\n\n for i in range(idx1+1,idx2):\n st1=clm[i].split(\" of\",1)[1]\n st2=st1.split(\"in \",1)[0]\n st3=st2.split(\"-\",1)[1]\n if(st3[len(st3)-3:]==\"km \"):\n dis.append(float(st3[:len(st3)-3])*1000)\n else:\n dis.append(float(st3[:len(st3)-2]))\n return dis\n\n\n def haversine(self,angle_radians):\n return sin(angle_radians / 2.0) ** 2\n\n def inverse_haversine(self,h):\n return 2 * asin(sqrt(h)) # radians\n\n#calculate distance between two location using lat long data\n def dist(self,lat1, lon1, lat2, lon2):\n Earth_radius_km = 6371.0\n RADIUS = Earth_radius_km\n\n lat1 = radians(lat1)\n lat2 = radians(lat2)\n dlat = lat2 - lat1\n dlon = radians(lon2 - lon1)\n h = self.haversine(dlat) + cos(lat1) * cos(lat2) * self.haversine(dlon)\n return RADIUS * self.inverse_haversine(h)\n\n#return left cordinate the polygon taken anti-clockwise from site location \n def leftlist(self,dist,lat,lon,azimuth):\n left=vincenty(kilometers=dist/1000).destination(Point(lat, lon),azimuth).format_decimal()\n leftlst=list(map(float,(left.split(\",\"))))\n return leftlst\n\n#return upside cordinate the polygon taken anti-clockwise from site location \n\n def uplist(self,dist,lat,lon,azimuth):\n up=vincenty(kilometers=dist/1000).destination(Point(lat, lon),azimuth).format_decimal()\n uplst=list(map(float,(up.split(\",\"))))\n return uplst\n\n#return right cordinate the polygon taken anti-clockwise from site location \n\n def rightlist(self,dist,lat,lon,azimuth):\n right=vincenty(kilometers=dist/1000).destination(Point(lat, lon), azimuth).format_decimal()\n rightlst=list(map(float,(right.split(\",\"))))\n return rightlst\n\n##########################################################################################################################################\n\n def OnKeyTyped(self, event): \n global sectorName\n sectorName=event.GetString()\n print (event.GetString())\n \n def onOK(self, event):\n # Do something\n print ('Done! Check File at Source Destination..')\n\n#opens file browser for Upload Button\n def onOpenFile1(self, event):\n \n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory1\n global file1\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory1=os.path.split(path)[0]\n file1=os.path.split(path)[1]\n print(file1)\n dlg.Destroy()\n\n#opens file browser for Upload Button\n def onOpenFile2(self, event):\n \n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory2\n global file2\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory2=os.path.split(path)[0]\n file2=os.path.split(path)[1]\n print(file2)\n dlg.Destroy()\n\n \n#opens file browser for Upload Button \n def onOpenFile11(self, event):\n \n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory3\n global file3\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory3=os.path.split(path)[0]\n file3=os.path.split(path)[1]\n print(file3)\n dlg.Destroy()\n\n#opens file browser for Upload Button\n def onOpenFile22(self, event):\n \"\"\"\n Create and show the Open FileDialog\n \"\"\"\n global directory4\n global file4\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentDirectory, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.FD_OPEN | wx.FD_MULTIPLE | wx.FD_CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n paths = dlg.GetPaths()\n print (\"You chose the following file(s):\")\n \n\n for path in paths:\n print (path+\"$\")\n directory4=os.path.split(path)[0]\n file4=os.path.split(path)[1]\n print(file4)\n dlg.Destroy()\n\n\n#function used to parse the raw data and mapp to physical data \n def onClickParse(self, event):\n global dfpd\n print(\"Wait Code running...\")\n\n\n os.chdir(directory1)\n xl1 = pd.ExcelFile(file1)\n df=xl1.parse(xl1.sheet_names[0])\n\n os.chdir(directory2)\n xl2 = pd.ExcelFile(file2)\n dfpd=xl2.parse(xl2.sheet_names[0])\n\n#Classifing the data based on the Cell size\n df.dropna(subset=['Expect cell size'], inplace=True)\n df1=df[df['Expect cell size']==2.1]\n df2=df[df['Expect cell size']==5]\n\n df1 =df1.dropna(axis=1,how='all')\n df2 =df2.dropna(axis=1,how='all')\n df1.dropna(subset=['% UEs with distance to base station in the range of 0-78m in 2.1km cells'], inplace=True)\n df2.dropna(subset=['% UEs with distance to base station in the range of 0-468m in 5km cells'], inplace=True)\n df2=df2.sort_values(['LNBTS name', 'LNCEL name'], ascending=[True,True])\n df1=df1.sort_values(['LNBTS name', 'LNCEL name'], ascending=[True,True])\n\n dfn1 = df1.groupby(['LNCEL name'],as_index=False).agg({'PERIOD_START_TIME':'last','MRBTS/SBTS name':'last',\n 'LNBTS type':'last','LNBTS name':'last','Expect cell size':'mean',\n 'Avg UE distance':'mean','% UEs with distance to base station in the range of 0-78m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 78-156m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 156-312m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 312-468m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 468-624m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 624-780m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 780-1092m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 1092-1404m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 1404-1794m in 2.1km cells':'mean',\n '% UEs with distance to base station in the range of 1794-2262m in 2.1km cells':'mean',\n '% UEs with distance to base station more than 2262m in 2.1km cells':'mean'})\n\n dfn1 = dfn1.rename(columns={'% UEs with distance to base station more than 2262m in 2.1km cells': '% UEs with distance to base station in the range of 2262-3000m in 2.1km cells'})\n\n dfn2 = df2.groupby(['LNCEL name'],as_index=False).agg({'PERIOD_START_TIME':'last','MRBTS/SBTS name':'last',\n 'LNBTS type':'last','LNBTS name':'last','Expect cell size':'mean',\n 'Avg UE distance':'mean','% UEs with distance to base station in the range of 0-468m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 468-1014m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 1014-1482m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 1482-2028m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 2028-2656m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 2656-3400m in 5km cells':'mean',\n '% UEs with distance to base station in the range of 3.4-4.1km in 5km cells':'mean',\n '% UEs with distance to base station in the range of 4.1-4.8km in 5km cells':'mean',\n '% UEs with distance to base station in the range of 4.8-5.6km in 5km cells':'mean',\n '% UEs with distance to base station more than 5.6km in 5km cells':'mean'})\n\n dfn2 = dfn2.rename(columns={'% UEs with distance to base station more than 5.6km in 5km cells': '% UEs with distance to base station in the range of 4.8-6km in 5km cells'})\n\n\n dfn1=self.Mapp_sectorID(dfn1)\n dfn2=self.Mapp_sectorID(dfn2)\n\n dfpd.dropna(subset=['Sector Name','Lat', 'Long','Azimuth'], inplace=True)\n dfn1.dropna(subset=dfn1.columns, inplace=True)\n dfn2.dropna(subset=dfn2.columns,inplace=True)\n\n#generating intermediate data\n newfile=directory1+\"\\Parsed_Intermediate_data.xlsx\"\n writer = pd.ExcelWriter(newfile)\n dfn1.to_excel(writer, sheet_name='UE-2.1(N)',index=False)\n dfn2.to_excel(writer, sheet_name='UE-5(N)',index=False)\n writer.save()\n\n#making sector name uniform in raw data and physical data \n for i in range(len(dfpd)):\n s1=str(dfpd.iat[i,4])\n l1=s1[len(s1)-1:]\n s1=s1[:len(s1)-2]\n sn1=s1+\"C\"+l1\n dfpd.iat[i,4]=sn1\n \n dfpd=dfpd.sort_values(by='Sector Name', ascending=False)\n dfn1=dfn1.sort_values(by='Sector Name', ascending=False)\n dfn2=dfn2.sort_values(by='Sector Name', ascending=False)\n dfpd = dfpd.rename(columns={'Sector Name': 'Sector Name-PD'})\n\n df_final_1=self.get_FinalData(dfn1)\n df_final_2=self.get_FinalData(dfn2)\n\n#creating \"parsed.xlsx\" file\n newfile=directory1+\"\\Parsed.xlsx\"\n print(\"About to write\")\n writer = pd.ExcelWriter(newfile)\n df_final_1.to_excel(writer, sheet_name='UE-2.1',index=False)\n df_final_2.to_excel(writer, sheet_name='UE-5.0',index=False)\n writer.save()\n dfpd.iloc[0:0]\n\n\n print ('Done! Check File at Source Destination..')\n\n\n def get_FinalData(self,df):\n clm=dfpd.columns \n clm=clm.append(df.columns)\n df_final=pd.DataFrame(columns=clm)\n\n for i in range(len(df)):\n for j in range(len(dfpd)):\n if(df.iat[i,len(df.columns)-1]==dfpd.iat[j,4]):\n data=[]\n for k in range(len(dfpd.columns)):\n data.append(dfpd.iat[j,k])\n for k in range(len(df.columns)):\n data.append(df.iat[i,k])\n\n s=pd.Series(data,index=clm)\n df_final=df_final.append(s, ignore_index=True)\n \n return df_final\n\n\n\n\n\n def Mapp_sectorID(self,dfn1):\n df_sector_1=pd.DataFrame(columns=['Sector Name'])\n for i in range(len(dfn1)):\n s1=str(dfn1.iat[i,0])\n if(len(s1)>2):\n l1=s1[len(s1)-1:]\n s1=s1[:len(s1)-2]\n sn1=s1+\"C\"+l1\n dfn1.iat[i,0]=sn1\n\n if (type(dfn1.iat[i,0])==str):\n k=pd.Series(dfn1.iat[i,0].split('-')[1],index=['Sector Name'])\n df_sector_1=df_sector_1.append(k, ignore_index=True)\n else:\n k=pd.Series(\"NaN\",index=['Sector Name'])\n df_sector_1=df_sector_1.append(k, ignore_index=True)\n dfn1['Sector Name']=df_sector_1\n return(dfn1)\n \n\n# Run the program\nif __name__ == '__main__':\n app = wx.App()\n frame = MyForm().Show()\n app.MainLoop()\n del app\n","sub_path":"Coverage_FrameWork.py","file_name":"Coverage_FrameWork.py","file_ext":"py","file_size_in_byte":52622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"293614892","text":"from django.db import models\nfrom datetime import datetime\nfrom ckeditor.fields import RichTextField\n\n# Create your models here.\nclass Youtuber(models.Model):\n\n # Restricting user\n crew_choices = (\n ('Solo', 'Solo'),\n ('Small', 'Small'),\n ('Large', 'Large'),\n )\n\n camera_choices = (\n ('Canon', 'Canon'),\n ('Sony', 'Sony'),\n ('Nikon', 'Nikon'),\n ('Red', 'Red'),\n ('Fuji', 'Fuji'),\n ('Gopro', 'Gopro'),\n ('Other', 'Other'),\n )\n\n categories_choices = (\n ('Code', 'Code'),\n ('Mobile Review', 'Mobile Review'),\n ('Vlogs', 'Vlogs'),\n ('Comedy', 'Comedy'),\n ('Gaming', 'Gaming'),\n ('Films', 'Films'),\n ('Cooking', 'Cooking'),\n ('Others', 'Others'),\n )\n\n name = models.CharField(max_length=50)\n price = models.IntegerField()\n photo = models.ImageField(upload_to='media/ytubers/%Y/%m/%d/')\n video_url = models.CharField(max_length=250)\n description = RichTextField()\n city = models.CharField(max_length=50)\n age = models.IntegerField()\n height = models.IntegerField()\n crew = models.CharField(choices=crew_choices , max_length=250)\n camera_type = models.CharField(choices=camera_choices, max_length=250)\n subs_count = models.CharField(max_length=250)\n category = models.CharField(choices=categories_choices, max_length=250)\n is_featured = models.BooleanField(default=False)\n created_date = models.DateTimeField(default=datetime.now, blank=True)\n\n def __str__(self):\n return self.name \n ","sub_path":"tubers/youtubers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"147928748","text":"from stanza.research import config\nif __name__ == '__main__':\n config.redirect_output()\n\nfrom stanza.cluster import pick_gpu\nparser = config.get_options_parser()\nparser.add_argument('--device', default=None,\n help='The device to use in Theano (\"cpu\" or \"gpu[0-n]\"). If None, '\n 'pick a free-ish device automatically.')\noptions, extras = parser.parse_known_args()\nif '-h' in extras or '--help' in extras:\n # If user is just asking for the options, don't scare them\n # by saying we're picking a GPU...\n pick_gpu.bind_theano('cpu')\nelse:\n pick_gpu.bind_theano(options.device)\n\n\nfrom stanza.monitoring import progress\nfrom stanza.research import evaluate, metrics, output\nimport datetime\nimport numbers\nimport learners\nimport color_instances\n\nparser.add_argument('--learner', default='Histogram', choices=learners.LEARNERS.keys(),\n help='The name of the model to use in the experiment.')\nparser.add_argument('--load', metavar='MODEL_FILE', default=None,\n help='If provided, skip training and instead load a pretrained model '\n 'from the specified path. If None or an empty string, train a '\n 'new model.')\nparser.add_argument('--train_size', type=int, default=[None], nargs='+',\n help='The number of examples to use in training. This number should '\n '*include* examples held out for validation. If None, use the '\n 'whole training set.')\nparser.add_argument('--validation_size', type=int, default=[0], nargs='+',\n help='The number of examples to hold out from the training set for '\n 'monitoring generalization error.')\nparser.add_argument('--test_size', type=int, default=[None], nargs='+',\n help='The number of examples to use in testing. '\n 'If None, use the whole dev/test set.')\nparser.add_argument('--data_source', default=['dev'], nargs='+',\n choices=color_instances.SOURCES.keys(),\n help='The type of data to use. Can supply several for sequential training.')\nparser.add_argument('--output_train_data', type=config.boolean, default=False,\n help='If True, write out the training dataset (after cutting down to '\n '`train_size`) as a JSON-lines file in the output directory.')\nparser.add_argument('--output_test_data', type=config.boolean, default=False,\n help='If True, write out the evaluation dataset (after cutting down to '\n '`test_size`) as a JSON-lines file in the output directory.')\nparser.add_argument('--listener', type=config.boolean, default=False,\n help='If True, evaluate on listener accuracy (description -> color). '\n 'Otherwise evaluate on speaker accuracy (color -> description).')\nparser.add_argument('--progress_tick', type=int, default=300,\n help='The number of seconds between logging progress updates.')\n\n\ndef main():\n options = config.options()\n\n progress.set_resolution(datetime.timedelta(seconds=options.progress_tick))\n\n train_datasets = []\n validation_datasets = []\n test_datasets = []\n\n if len(options.train_size) == 1:\n options.train_size = options.train_size * len(options.data_source)\n else:\n assert len(options.train_size) == len(options.data_source)\n if len(options.validation_size) == 1:\n options.validation_size = options.validation_size * len(options.data_source)\n else:\n assert len(options.validation_size) == len(options.data_source)\n if len(options.test_size) == 1:\n options.test_size = options.test_size * len(options.data_source)\n else:\n assert len(options.test_size) == len(options.data_source)\n\n for source, train_size, validation_size, test_size in zip(options.data_source,\n options.train_size,\n options.validation_size,\n options.test_size):\n train_insts = color_instances.SOURCES[source].train_data(\n listener=options.listener\n )[:train_size]\n if validation_size:\n assert validation_size < len(train_insts), \\\n ('No training data after validation split! (%d <= %d)' %\n (len(train_insts), validation_size))\n validation_insts = train_insts[-validation_size:]\n validation_datasets.append(validation_insts)\n train_insts = train_insts[:-validation_size]\n else:\n validation_datasets.append(None)\n train_datasets.append(train_insts)\n test_insts = color_instances.SOURCES[source].test_data(\n options.listener\n )[:test_size]\n test_datasets.append(test_insts)\n\n learner = learners.new(options.learner)\n\n m = [metrics.log_likelihood,\n metrics.log_likelihood_bits,\n metrics.perplexity,\n metrics.aic]\n example_inst = get_example_inst(test_datasets, train_datasets)\n if options.listener and not isinstance(example_inst.output, numbers.Integral):\n m.append(metrics.squared_error)\n elif isinstance(example_inst.output, (tuple, list)):\n m.append(metrics.prec1)\n if example_inst.output and isinstance(example_inst.output, basestring):\n m.extend([metrics.bleu, metrics.wer,\n metrics.token_perplexity_macro, metrics.token_perplexity_micro])\n else:\n m.append(metrics.accuracy)\n if example_inst.output and isinstance(example_inst.output, basestring):\n m.extend([metrics.bleu, metrics.wer,\n metrics.token_perplexity_macro, metrics.token_perplexity_micro])\n\n multi_train = (len(options.data_source) > 1)\n if options.load:\n with open(options.load, 'rb') as infile:\n learner.load(infile)\n\n train_results = None\n else:\n if hasattr(learner, '_data_to_arrays'):\n # XXX: is there a better way to ensure that the vocabulary is defined\n # before training starts?\n for train_insts in train_datasets[1:]:\n learner._data_to_arrays(train_insts, init_vectorizer=True)\n\n for i, (source, train_insts, validation_insts) in enumerate(zip(options.data_source,\n train_datasets,\n validation_datasets)):\n if not train_insts:\n continue\n\n if i > 0:\n learner.train(train_insts, validation_insts, metrics=m, keep_params=True)\n else:\n learner.train(train_insts, validation_insts, metrics=m)\n with open(config.get_file_path('model.p'), 'wb') as outfile:\n learner.dump(outfile)\n\n if multi_train:\n split_id = 'train_' + source\n else:\n split_id = 'train'\n train_results = evaluate.evaluate(learner, train_insts, metrics=m, split_id=split_id,\n write_data=options.output_train_data)\n if options.verbosity != 0:\n output.output_results(train_results, split_id)\n\n for i, (source, test_insts) in enumerate(zip(options.data_source,\n test_datasets)):\n if not test_insts:\n continue\n if multi_train:\n split_id = 'eval_' + source\n else:\n split_id = 'eval'\n test_results = evaluate.evaluate(learner, test_insts, metrics=m, split_id=split_id,\n write_data=options.output_test_data)\n if options.verbosity != 0:\n output.output_results(test_results, split_id)\n\n return train_results, test_results\n\n\ndef get_example_inst(test_datasets, train_datasets):\n # Use test if any are nonempty, if not, back off to train\n for dataset in test_datasets:\n if dataset:\n return dataset[0]\n for dataset in train_datasets:\n if dataset:\n return dataset[0]\n assert False, \"No data, can't determine correct evaluation metrics\"\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run_experiment.py","file_name":"run_experiment.py","file_ext":"py","file_size_in_byte":8472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"283222027","text":"import numpy as np\r\nimport cv2\r\n\r\n#numpy.ones reshapes the array--width600,height 800\r\nrect=np.ones((600,800,3),dtype=np.uint8)*255\r\n\r\n#bgr--red color below one\r\n#10 is the thickness\r\ncv2.rectangle(rect,(0,int(600/2)),(int(800/2),599),(0,0,00),20)\r\ncv2.imshow(\"image\",rect)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()\r\n\r\n#bgr below in green\r\n#-1 so no border\r\n#cv2.rectangle(rect,(int(800/2),0),(799,int(600/2)),(0,255,0),-1)\r\n#cv2.imshow(\"image\",rect)\r\n#cv2.waitKey()\r\n#cv2.destroyAllWindows()","sub_path":"rect.py","file_name":"rect.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"298644150","text":"import taichi as ti\nimport numpy as np\nimport matplotlib.cm as cm\n\nti.init()\n\nlx = 1.5\nly = 0.3\n\nnx = 60\nny = 20\n\nvelo_rel = 0.01\np_rel = 0.03\n\n# Add 1 cell padding to all directions.\np = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\npcor = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\n\nu = ti.var(dt=ti.f32, shape=(nx + 3, ny + 2))\nu0 = ti.var(dt=ti.f32, shape=(nx + 3, ny + 2))\nucor = ti.var(dt=ti.f32, shape=(nx + 3, ny + 2))\nu_post = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\n\nv = ti.var(dt=ti.f32, shape=(nx + 2, ny + 3))\nvcor = ti.var(dt=ti.f32, shape=(nx + 2, ny + 3))\nv0 = ti.var(dt=ti.f32, shape=(nx + 2, ny + 3))\nv_post = ti.var(dt=ti.f32, shape=(nx + 2, ny + 2))\n\n# ct stands for Cell Type.\n# ct = 0 -> Fluid\n# ct = 1 -> Solid\nct = ti.var(dt=ti.i32, shape=(nx + 2, ny + 2))\n\nrho = 100\nmu = 0.1\ndx = lx / nx\ndy = ly / ny\ndt = 0.001\n\nAu = ti.var(dt=ti.f32, shape=((nx + 1) * ny, (nx + 1) * ny))\nbu = ti.var(dt=ti.f32, shape=((nx + 1) * ny))\nxu = ti.var(dt=ti.f32, shape=((nx + 1) * ny))\nxuold = ti.var(dt=ti.f32, shape=((nx + 1) * ny))\n\nAv = ti.var(dt=ti.f32, shape=(nx * (ny + 1), nx * (ny + 1)))\nbv = ti.var(dt=ti.f32, shape=(nx * (ny + 1)))\nxv = ti.var(dt=ti.f32, shape=(nx * (ny + 1)))\nxvold = ti.var(dt=ti.f32, shape=(nx * (ny + 1)))\n\nAp = ti.var(dt=ti.f32, shape=(nx * ny, nx * ny))\nbp = ti.var(dt=ti.f32, shape=(nx * ny))\nxp = ti.var(dt=ti.f32, shape=(nx * ny))\n\n\ndef init():\n for i, j in ti.ndrange(nx + 2, ny + 2):\n p[i, j] = 100 - i / nx\n for i, j in ti.ndrange(nx + 3, ny + 2):\n u[i, j] = 5.0\n u0[i, j] = u[i, j]\n for i, j in ti.ndrange(nx + 2, ny + 3):\n v[i, j] = 0.0\n v0[i, j] = v[i, j]\n\n for i, j in ti.ndrange(nx + 2, ny + 2):\n ct[i, j] = 1 # \"1\" stands for solid\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 1)):\n ct[i, j] = -1 # \"-1\" stands for fluid\n\n for i, j in ti.ndrange(nx, ny):\n if (((i - 31)**2 + (j - 31)**2) < 36):\n ct[i, j] = 1\n u[i, j] = 0\n u0[i, j] = 0\n v[i, j] = 0\n v0[i, j] = 0\n\n\ndef fill_Au():\n for i, j in ti.ndrange((1, nx + 2), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n\n # Inlet and Outlet\n if (ct[i - 1, j]) == 1 or (ct[i, j] + ct[i - 1, j]) == 2:\n Au[k, k] = 1.0\n bu[k] = u[i, j]\n # Outlet\n elif (ct[i, j] == 1):\n Au[k, k] = 1.0\n Au[k, k - ny] = -1.0\n #bu[k] = u[i - 1, j]\n bu[k] = 0.0\n\n # Normal internal cells\n else:\n Au[k, k - 1] = -mu * dx / dy - max(\n [0, -rho * 0.5 * (v[i - 1, j] + v[i, j]) * dx]) # an\n Au[k, k + 1] = -mu * dx / dy - max(\n [0, rho * 0.5 * (v[i - 1, j + 1] + v[i, j + 1]) * dx]) # as\n Au[k, k - ny] = -mu * dy / dx - max(\n [0, rho * 0.5 * (u[i, j] + u[i - 1, j]) * dy]) # aw\n Au[k, k + ny] = -mu * dy / dx - max(\n [0, -rho * 0.5 * (u[i, j] + u[i + 1, j]) * dy]) # ae\n Au[k, k] = -Au[k, k - 1] - Au[k, k + 1] - Au[k, k - ny] - Au[\n k, k + ny] + rho * dx * dy / dt # ap\n bu[k] = (p[i - 1, j] - p[i, j]\n ) * dy + rho * dx * dy / dt * u0[i, j] # <= Unsteady term\n\n for i, j in ti.ndrange((1, nx + 2), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n # Upper and lower boundary\n if (ct[i, j] + ct[i, j - 1]) == 0:\n Au[k, k] = Au[k, k] - Au[k, k - 1] + 2 * mu\n Au[k, k - 1] = 0\n elif (ct[i, j] + ct[i, j + 1]) == 0:\n Au[k, k] = Au[k, k] - Au[k, k + 1] + 2 * mu\n Au[k, k + 1] = 0\n\n\ndef fill_Av():\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 2)):\n k = (i - 1) * (ny + 1) + (j - 1)\n # Upper and lower boundary\n if (ct[i, j] + ct[i, j - 1]) == 0 or (ct[i, j] + ct[i, j - 1]) == 2:\n Av[k, k] = 1.0\n bv[k] = v[i, j]\n else:\n \"\"\"\n TODO: Didn't cover inlet and outlet boundary. Actually accessing\n elements out of bound, for example, Av[1,-30].\n However, since in solve_v, when convert to numpy, A[1,-30] become\n 0.0 automatically.\n \"\"\"\n Av[k, k - 1] = -mu * dx / dy - max(\n [0, -rho * 0.5 * (v[i, j - 1] + v[i, j]) * dx]) # an\n Av[k, k + 1] = -mu * dx / dy - max(\n [0, rho * 0.5 * (v[i, j + 1] + v[i, j]) * dx]) # as\n\n Av[k, k - ny - 1] = -mu * dy / dx - max(\n [0, rho * 0.5 * (u[i, j] + u[i, j - 1]) * dy]) # aw\n Av[k, k + ny + 1] = -mu * dy / dx - max(\n [0, -rho * 0.5 * (u[i + 1, j - 1] + u[i + 1, j]) * dy]) # ae\n Av[k, k] = -Av[k, k - 1] - Av[k, k + 1] - Av[k, k - ny - 1] - Av[\n k, k + ny + 1] + rho * dx * dy / dt # ap\n bv[k] = (p[i, j] - p[i, j - 1]) * dx + rho * dx * dy / dt * v0[i,\n j]\n\n\ndef solve_axb(A, b):\n from scipy.sparse.linalg import qmr, bicg\n from scipy.sparse import csc_matrix\n print(\"Now converting A and b to numpy...\")\n A_np = A.to_numpy()\n b_np = b.to_numpy()\n print(\"Finished converting A and b to numpy...\")\n print(\"Now solving Ax=b...\")\n ans = np.linalg.solve(A_np, b_np)\n print(\"Finished solving Ax=b...\")\n return ans\n #ans, exitCode = bicg(A_np, b_np, atol='legacy', tol=1e-3)\n # return ans\n\n\ndef sol_back_matrix(mat, sol):\n mat_width = mat.shape()[0] - 2\n mat_height = mat.shape()[1] - 2\n for i, j in ti.ndrange(mat_width, mat_height):\n mat[i + 1, j + 1] = sol[i * mat_height + j]\n\n\ndef xu_back():\n for i, j in ti.ndrange(nx + 1, ny):\n u[i + 1, j + 1] = xu[i * ny + j]\n\n\ndef xv_back():\n for i, j in ti.ndrange(nx, ny + 1):\n v[i + 1, j + 1] = xv[i * ny + j]\n\n\ndef iter_solve_u():\n #A = Au.to_numpy()\n #b = bu.to_numpy()\n res = 100.0\n\n while np.abs(res) > 1e-3:\n res = 0.0\n for i, j in ti.ndrange(nx + 1, ny):\n k = i * ny + j\n # print(\"k = \", k, \"ny = \", ny, \"k-ny = \", k - ny, \"Au[k-ny] = \",\n # Au[k - ny])\n xu[k] = 1 / Au[k, k] * (-Au[k, k - 1] * u[i, j - 1] -\n Au[k, k + 1] * u[i, j + 1] -\n Au[k, k - ny] * u[i - 1, j] -\n Au[k, k + ny] * u[i + 1, j] + bu[k])\n\n res = res + xu[k] - xuold[k]\n xu_back()\n for i, j in ti.ndrange(nx + 1, ny):\n k = i * ny + j\n xuold[k] = xu[k]\n print(\"Solving x momentum, the residul is now \", res)\n\n\ndef iter_solve_v():\n #A = Au.to_numpy()\n #b = bu.to_numpy()\n res = 100.0\n\n while np.abs(res) > 1e-3:\n res = 0.0\n for i, j in ti.ndrange(nx, ny + 1):\n k = i * (ny + 1) + j\n # print(\"k = \", k, \"ny = \", ny, \"k-ny = \", k - ny, \"Au[k-ny] = \",\n # Au[k - ny])\n xv[k] = 1 / Av[k, k] * (-Av[k, k - 1] * v[i, j - 1] -\n Av[k, k + 1] * v[i, j + 1] -\n Av[k, k - ny - 1] * v[i - 1, j] -\n Av[k, k + ny + 1] * v[i + 1, j] + bv[k])\n\n res = res + xv[k] - xvold[k]\n xv_back()\n for i, j in ti.ndrange(nx, ny + 1):\n k = i * (ny + 1) + j\n xvold[k] = xv[k]\n print(\"Solving y momentum, the residual is now \", res)\n\n\ndef solve_moment_x():\n print(\"Now filling Au...\")\n fill_Au()\n print(\"Finished filling Au...\")\n \n print(\"Solving x momentum...\")\n # solve_axb returns a numpy array\n # needs to convert back to taichi\n #import numpy.linalg as npl\n # print(\"Shape of Au is\", Au.shape(), \"Rank of Au is:\",\n # npl.matrix_rank(Au.to_numpy()))\n xu.from_numpy(solve_axb(Au, bu))\n # iter_solve_u()\n print(\"Converting xu to u...\")\n sol_back_matrix(u, xu)\n print(\"Finished converting xu to u...\")\n\ndef solve_moment_y():\n fill_Av()\n print(\"Solving y momentum...\")\n #import numpy.linalg as npl\n # print(\"Shape of Av is\", Av.shape(), \"Rank of Av is:\",\n # npl.matrix_rank(Av.to_numpy()))\n xv.from_numpy(solve_axb(Av, bv))\n # iter_solve_v()\n sol_back_matrix(v, xv)\n\n\ndef correct_u():\n ucor_max = 0.0\n for i, j in ti.ndrange((1, nx + 2), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n # Upper and lower boundary\n if (ct[i - 1, j] + ct[i, j]) == 0 or (ct[i - 1, j] + ct[i, j]) == 2:\n pass\n else:\n ucor[i, j] = (pcor[i - 1, j] - pcor[i, j]) * dy / Au[k, k]\n u[i, j] = u[i, j] + ucor[i, j] * velo_rel\n if np.abs(ucor[i, j] / (u[i, j] + 1.0e-9)) >= ucor_max:\n ucor_max = np.abs(ucor[i, j] / (u[i, j] + 1.0e-9))\n return ucor_max\n\n\ndef correct_v():\n vcor_max = 0.0\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 2)):\n k = (i - 1) * (ny + 1) + (j - 1)\n # Upper and lower boundary\n if (ct[i, j] + ct[i, j - 1]) == 0 or (ct[i, j] + ct[i, j - 1]) == 2:\n pass\n else:\n vcor[i, j] = (pcor[i, j] - pcor[i, j - 1]) * dx / Av[k, k]\n v[i, j] = v[i, j] + vcor[i, j] * velo_rel\n if np.abs(vcor[i, j] / (v[i, j] + 1.0e-9)) >= vcor_max:\n vcor_max = np.abs(vcor[i, j] / (v[i, j] + 1.0e-9))\n return vcor_max\n\n\ndef correct_uconserv():\n inlet_flux = 0.0\n outlet_flux = 0.0\n for i in range(1, ny + 1):\n inlet_flux = inlet_flux + u[1, i]\n outlet_flux = outlet_flux + u[nx + 1, i]\n print(\"Inlet flux = \", inlet_flux, \"; Outlet flux = \", outlet_flux)\n\n coef = inlet_flux / (outlet_flux + 1.0e-9)\n for i in range(1, ny + 1):\n u[nx + 1, i] = coef * u[nx + 1, i]\n\n\ndef check_uconserv():\n inlet_flux = 0.0\n outlet_flux = 0.0\n for i in range(1, ny + 1):\n inlet_flux = inlet_flux + u[1, i]\n outlet_flux = outlet_flux + u[nx + 1, i]\n print(\"Inlet flux = \", inlet_flux, \"; Outlet flux = \", outlet_flux)\n\n\ndef fill_Ap():\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n bp[k] = rho * (u[i, j] - u[i + 1, j]) * dy + rho * (v[i, j + 1] -\n v[i, j]) * dx\n # Go back to Av matrix, find the corresponding v\n vk = (i - 1) * (ny + 1) + (j - 1)\n Ap[k, k - 1] = -rho * dx * dx / Av[vk, vk]\n Ap[k, k + 1] = -rho * dx * dx / Av[vk + 1, vk + 1]\n # Go back to Au matrix\n uk = k\n Ap[k, k - ny] = -rho * dy * dy / Au[uk, uk]\n Ap[k, k + ny] = -rho * dy * dy / Au[uk + ny, uk + ny]\n\n if (ct[i, j] + ct[i, j - 1]) == 0:\n Ap[k, k - 1] = 0\n elif (ct[i, j] + ct[i, j + 1]) == 0:\n Ap[k, k + 1] = 0\n elif (ct[i, j] + ct[i - 1, j]) == 0:\n Ap[k, k - ny] = 0\n elif (ct[i, j] + ct[i + 1, j]) == 0:\n Ap[k, k + ny] = 0\n Ap[k, k] = -Ap[k, k - 1] - Ap[k, k + 1] - Ap[k, k - ny] - Ap[k, k + ny]\n\n\ndef solve_pcor():\n fill_Ap()\n #import numpy.linalg as npl\n # print(\"Shape of Ap is\", Ap.shape(), \"Rank of Ap is:\",\n # npl.matrix_rank(Ap.to_numpy()))\n sumbp = 0.0\n for i, j in ti.ndrange((1, nx + 1), (1, ny + 1)):\n k = (i - 1) * ny + (j - 1)\n sumbp = sumbp + bp[k]\n print(\"Sum bp before solving pcorr was\", sumbp)\n\n print(\"Now solving pcor...\")\n xp.from_numpy(solve_axb(Ap, bp))\n sol_back_matrix(pcor, xp)\n\n for i, j in ti.ndrange(nx + 2, ny + 2):\n if ct[i, j] == 1:\n pass\n else:\n p[i, j] = p[i, j] + p_rel * pcor[i, j]\n\n\ndef visual(mat):\n A = mat.to_numpy()\n import matplotlib.pyplot as plt\n # 'nearest' interpolation - faithful but blocky\n plt.imshow(A, interpolation='nearest', cmap=cm.rainbow)\n # plt.colorbar()\n # plt.show()\n plt.savefig(\"karmen\" + str(iter) + \".png\", dpi=300)\n\n\ndef display():\n import matplotlib.pyplot as plt\n fig, ax = plt.subplots(2, 6)\n\n pcm = ax[0, 0].pcolormesh(u.to_numpy(), cmap=cm.rainbow)\n ax[0, 0].set_title(\"U velocity\")\n fig.colorbar(pcm, ax=ax[0, 0])\n\n pcm = ax[0, 1].pcolormesh(v.to_numpy(), cmap=cm.rainbow)\n ax[0, 1].set_title(\"V velocity\")\n fig.colorbar(pcm, ax=ax[0, 1])\n\n pcm = ax[0, 2].pcolormesh(p.to_numpy(), cmap=cm.rainbow)\n ax[0, 2].set_title(\"Pressure\")\n fig.colorbar(pcm, ax=ax[0, 2])\n\n pcm = ax[0, 3].pcolormesh(pcor.to_numpy(), cmap=cm.rainbow)\n ax[0, 3].set_title(\"p correction\")\n fig.colorbar(pcm, ax=ax[0, 3])\n\n pcm = ax[0, 4].pcolormesh(ucor.to_numpy(), cmap=cm.rainbow)\n ax[0, 4].set_title(\"u correction\")\n fig.colorbar(pcm, ax=ax[0, 4])\n\n pcm = ax[0, 5].pcolormesh(vcor.to_numpy(), cmap=cm.rainbow)\n ax[0, 5].set_title(\"v correction\")\n fig.colorbar(pcm, ax=ax[0, 5])\n\n ax[1, 0].plot(p.to_numpy()[1:int(nx + 1), int(ny / 2)])\n ax[1, 0].set_title(\"pressure drop\")\n\n ax[1, 1].plot(u.to_numpy()[int(0.2 * nx), 1:int(ny + 1)])\n ax[1, 1].set_title(\"U profile at 60\")\n\n ax[1, 2].plot(u.to_numpy()[int(0.5 * nx), 1:int(ny + 1)])\n ax[1, 2].set_title(\"U profile at 100\")\n\n ax[1, 3].plot(u.to_numpy()[int(0.8 * nx), 1:int(ny + 1)])\n ax[1, 3].set_title(\"U profile at 120\")\n\n fig.set_size_inches(16, 9)\n fig.tight_layout()\n\n plt.savefig(\"Iteration_i\" + str(iter) + \"_t\" + str(jter) + \".png\", dpi=400)\n\n\nif __name__ == \"__main__\":\n init()\n\n check_uconserv()\n for jter in range(1000):\n print(\"Solving the next time step, currently the \", jter, \"th iteration...\")\n for iter in range(10000):\n print(\"Looping through the inner loop, it's the \", iter, \"th iteration out of 10...\")\n solve_moment_x()\n solve_moment_y()\n correct_uconserv()\n check_uconserv()\n solve_pcor()\n resu = correct_u()\n resv = correct_v()\n print(\"Resu = \", resu, \"Resv = \", resv)\n u0 = u\n v0 = v\n display()\n","sub_path":"homework1/re600_sph/simpler_uns.py","file_name":"simpler_uns.py","file_ext":"py","file_size_in_byte":13989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"154055157","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Feb 6 18:17:08 2021\r\n\r\n@author: user\r\n\"\"\"\r\n\r\nclass BittreeNode:\r\n def __init__(self,data):\r\n self.data=data\r\n self.lchild=None\r\n self.rchild=None\r\n self.parent=None\r\n \r\n \r\nclass BST:\r\n def __init__(self,li=None):\r\n self.root=None\r\n if li:\r\n for val in li:\r\n self.insert_no_rec(val)\r\n \r\n def insert(self,node,val):\r\n if not node:\r\n node=BittreeNode(val)\r\n elif valnode.data:\r\n node.rchild=self.insert(node.rchild, val)\r\n node.rchild.parent=node\r\n #相等就不要了\r\n return node\r\n \r\n def insert_no_rec(self,val):\r\n p=self.root\r\n if not p:\r\n self.root=BittreeNode(val)\r\n return\r\n while True:\r\n if valp.data:\r\n if p.rchild:\r\n p=p.rchild\r\n else:\r\n p.rchild=BittreeNode(val)\r\n p.rchild.parent=p\r\n return\r\n else:\r\n return\r\n \r\n def query(self,node,val):\r\n if not node:\r\n return None\r\n elif node.dataval:\r\n return node.query(node.lchild,val)\r\n else:\r\n return node\r\n \r\n def query_no_rec(self,val):\r\n p=self.root\r\n while p:\r\n if p.dataval:\r\n p=p.lchild\r\n else:\r\n return p\r\n return None\r\n \r\n def pre_order(self,root):\r\n if root:\r\n print(root.data,end=',')\r\n self.pre_order(root.lchild)\r\n self.pre_order(root.rchild)\r\n\r\n def in_order(self,root):\r\n if root:\r\n self.in_order(root.lchild)\r\n print(root.data,end=',')\r\n self.in_order(root.rchild)\r\n \r\n def post_order(self,root):\r\n if root:\r\n self.post_order(root.lchild)\r\n self.post_order(root.rchild)\r\n print(root.data,end=',')\r\n \r\n \r\n def __remove_node_1(self,node):\r\n if not node.parent:\r\n self.root=None\r\n elif node==node.parent.lchild:\r\n node.parent.lchild=None\r\n else:\r\n node.parent.rchild=None\r\n \r\n def __remove_node_21(self,node):\r\n #只有一个左孩子\r\n if not node.parent:\r\n self.root=node.lchild\r\n node.lchild.parent=None\r\n elif node==node.parent.lchild:\r\n node.parent.lchild=node.lchild\r\n node.lchild.parent=node.parent\r\n else:\r\n node.parent.rchild=node.lchild\r\n node.lchild.parent=node.parent\r\n \r\n def __remove_node_22(self,node):\r\n #node只有一个右孩子\r\n if not node.parent:\r\n self.root=node.rchild\r\n node.rchild.parent=None\r\n elif node==node.parent.lchild:\r\n node.parent.lchild=node.rchild\r\n node.rchild.parent=node.parent\r\n else:\r\n node.parent.rchild=node.rchild\r\n node.rchild.parent=node.parent\r\n \r\n def delete(self,val):\r\n if self.root:\r\n node =self.query_no_rec(val)\r\n if not node:\r\n return False\r\n if not node.lchild and not node.rchild:\r\n self.__remove_node_1(node)\r\n elif not node.rchild:\r\n self.__remove_node_21(node)\r\n else:\r\n min_node=node.rchild\r\n while min_node.lchild:\r\n min_node=min_node.lchild\r\n node.data=min_node.data\r\n if min_node.rchild:\r\n self.__remove_node_22(min_node)\r\n else:\r\n self.__remove_node_1(min_node)\r\n \r\ntree=BST([4,6,7,9,2,1,3,5,8])\r\nprint(\"\")\r\ntree.pre_order(tree.root)\r\nprint(\"\")\r\ntree.in_order(tree.root)\r\nprint(\"\")\r\ntree.post_order(tree.root)\r\nprint(tree.query_no_rec(10))\r\ntree.delete(4)\r\ntree.pre_order(tree.root)\r\n\r\n\r\n ","sub_path":"BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":4541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"380052073","text":"\"\"\"\nModule to load input data files\nCreated 8/8/2016\n\nModified:\n@Date 10/05/2016\n@author: Xinya Li (xinya.li@pnl.gov), Chris R. Vernon (chris.vernon@pnnl.gov)\n@Project: Xanthos V2.0\n\n\nLicense: BSD 2-Clause, see LICENSE and DISCLAIMER files\n\nCopyright (c) 2017, Battelle Memorial Institute\n\"\"\"\n\nimport os\nfrom scipy import io as spio\nimport numpy as np\n\nfrom xanthos.utils.numpy_parser import GetArrayCSV, GetArrayTXT\n\n\nclass LoadReferenceData:\n \"\"\"\n Load reference data.\n\n :param settings: settings object from configuration\n \"\"\"\n def __init__(self, settings):\n\n # Area value for each land grid cell: 67420 x 1, convert from ha to km2\n self.area = load_const_griddata(settings.Area) * 0.01\n\n # Coordinates for flattened grid: 67420 x 5, the columns are ID#, lon, lat, ilon, ilat\n self.coords = load_const_griddata(settings.Coord)\n\n # Basin ID Map: 67420 x 1, 235 Basins\n self.basin_ids = load_const_griddata(settings.BasinIDs, 1).astype(int)\n\n # Corresponding to Basin ID Map, 235 Basin Names: 1D String Array\n self.basin_names = load_const_griddata(settings.BasinNames)\n\n # GCAM Region ID Map : 67420 x 1 (The nonag region table will be the 'primary' region assignment)\n self.region_ids = load_const_griddata(settings.GCAMRegionIDs, 1).astype(int)\n\n # Corresponding to GCAM Region ID Map\n with open(settings.GCAMRegionNames, 'r') as f:\n f.readline()\n temp = f.read().split('\\n')\n self.region_names = np.array([i.split(',') for i in temp])[:, 0]\n\n # Country ID Map : 67420 x 1 (249 countries: 1-249)\n self.country_ids = load_const_griddata(settings.CountryIDs, 1).astype(int)\n\n # Corresponding to Country ID Map, 0-248 index number and 249 Country Names: 2D String Array\n with open(settings.CountryNames, 'r') as f:\n temp = f.read().splitlines()\n self.country_names = np.array([i.split(',') for i in temp])[:, 1]\n\n if settings.runoff_module == 'gwam':\n # Max Soil Moisture Map (mm/month): 67420 x 1\n self.max_soil_moist = load_const_griddata(settings.MaxSoilMois, 1)\n\n # Water Bodies: assign MSM = 999, 306 x 2, Col 1 is the cell number in 67420\n self.lakes_msm = load_const_griddata(settings.LakesMSM).astype(int)\n self.lakes_msm[:, 0] -= 1\n\n # ''' Rivers: assign MSM = 999, 4198 x 2, Col 1 is the cell number in 67420\n # constants['RiversMSM'] = load_const_griddata(settings.RiversMSM).astype(int)\n # constants['RiversMSM'][:,0] -= 1\n\n # Additional water bodies: assign MSM = 999, 421 x 2, Col 1 is the cell number in 67420\n self.addit_water_msm = load_const_griddata(settings.AdditWaterMSM).astype(int)\n self.addit_water_msm[:, 0] -= 1\n\n\ndef load_climate_data(fle, var_name, n_cells, n_months, neg_to_zero=False):\n \"\"\"\n Loads and checks input climate data.\n\n Dimension: 67420 x number of years*12, for example:\n Historical: 1950-2005 672 months\n Future: 2006-2100 1140 months\n\n @:param fle: file path with extension\n @:param var_name: NetCDF variable name\n @:param neg_to_zero: convert negative values to zero\n @:param n_cells: number of cells\n @:param n_months: number of months\n\n @:return: array\n \"\"\"\n a = load_const_griddata(fle, 0, var_name)\n\n if neg_to_zero:\n a[np.where(a < 0)] = 0\n\n return check_climate_data(a, n_cells=n_cells, n_months=n_months, text=var_name)\n\n\ndef load_routing_data(fle, ngridrow, ngridcol, map_index, skip=68, rep_val=None):\n \"\"\"\n Load routing data.\n\n DRT data, 280 x 720, -9999 for missing values, convert to 67420 X 1\n\n @:param fle file to load\n @:param ngridrow number of grids per row\n @:param ngridcol number of grids per column\n @:param map_index\n @:param skip\n @:param rep_val value to replace with when less than value\n \"\"\"\n fd = load_const_griddata(fle)\n v = vectorize(fd, ngridrow, ngridcol, map_index, skip=skip)\n\n if rep_val is None:\n return v\n\n else:\n v[np.where(v < rep_val)[0]] = rep_val\n return v\n\n\ndef load_soil_data(settings):\n \"\"\"\n Load soil moisture file into array if in future mode, else stage zeros array.\n \"\"\"\n try:\n # Initialize channel storage/soil moisture.\n if settings.HistFlag == \"True\":\n return np.zeros((settings.ncell,), dtype=float)\n\n # For future runs, initialize with the last value of the historical channel storage/soil moisture\n else:\n return load_const_griddata(settings.SavFile, 0, settings.SavVarName)[:, -1]\n\n # if not in use\n except AttributeError:\n return np.zeros((settings.ncell,), dtype=float)\n\n\ndef load_chs_data(settings):\n \"\"\"\n Load channel velocity file into array if in future mode, else stage zeros array.\n \"\"\"\n try:\n\n # Initialize channel storage/soil moisture.\n if settings.HistFlag == \"True\":\n return np.zeros((settings.ncell,), dtype=float)\n\n # For future runs, initialize with the last value of the historical channel storage/soil moisture\n else:\n return load_const_griddata(settings.ChStorageFile, 0, settings.ChStorageVarName)[:, -1]\n except AttributeError:\n return np.zeros((settings.ncell,), dtype=float)\n\n\ndef load_gcm_var(fn, varname):\n \"\"\"\n Loads climate data from the specified GCM\n \"\"\"\n\n if not os.path.isfile(fn):\n raise IOError(\"File does not exist: {}\".format(fn))\n\n temp = spio.loadmat(fn)\n data = temp[varname]\n\n return data\n\n\ndef check_climate_data(data, n_cells, n_months, text):\n \"\"\"\n Check array size of input and check to make sure the total number of months can be split into years.\n\n :param data: input array\n :param n_cells: number of cells\n :param n_months: number of months\n :param text: name of target variable\n \"\"\"\n err_cell = \"Error: Inconsistent {0} data grid size. Expecting size: {1}. Received size: {2}\".format(text, n_cells, data.shape[0])\n err_mth = \"Error: Inconsistent {0} data grid size. Expecting size: {1}. Received size: {2}\".format(text, n_months, data.shape[1])\n\n if not data.shape[0] == n_cells:\n raise RuntimeError(err_cell)\n\n if not data.shape[1] == n_months:\n raise RuntimeError(err_mth)\n\n if not data.shape[1] % 12 == 0:\n raise RuntimeError(\"Error: Number of months in climate data can not be converted into integral years.\")\n\n return data\n\n\ndef load_const_griddata(fn, headerNum=0, key=\" \"):\n \"\"\"\n Load constant grid data stored in files defined in GRID_CONSTANTS.\n \"\"\"\n\n # for MATLAB files\n if fn.endswith('.mat'):\n data = load_gcm_var(fn, key)\n\n # for Numpy pickled files\n elif fn.endswith('.npy'):\n data = np.load(fn)\n\n # for text files\n elif fn.endswith('.txt'):\n\n if not os.path.isfile(fn):\n raise IOError(\"Error: File does not exist:\", fn)\n\n try:\n data = GetArrayTXT(fn, headerNum)\n\n except:\n with open(fn, 'r') as f:\n data = np.array(f.read().splitlines())\n\n # for CSV files\n elif fn.endswith('.csv'):\n\n if not os.path.isfile(fn):\n raise IOError(\"Error: File does not exist:\", fn)\n\n data = GetArrayCSV(fn, headerNum)\n\n # for NetCDF classic files\n elif fn.endswith('.nc'):\n\n if not os.path.isfile(fn):\n raise IOError(\"Error: File does not exist:\", fn)\n\n datagrp = spio.netcdf.netcdf_file(fn, 'r', mmap=False)\n\n # copy() added to handle numpy 'ValueError:assignment destination is read-only' related to non-contiguous memory\n try:\n data = datagrp.variables[key][:, :].copy()\n\n except:\n data = datagrp.variables[key][:].copy()\n\n datagrp.close()\n\n return data\n\n\ndef vectorize(data, ngridrow, ngridcol, map_index, skip):\n \"\"\"\n Convert 2D Map (360 x 720) Matrix to 1D Map(67420)\n \"\"\"\n new = np.zeros((ngridrow, ngridcol), dtype=float) - 9999\n\n for i in range(0, data.shape[0]):\n new[i + skip, :] = data[data.shape[0] - 1 - i, :]\n\n new = new.reshape((ngridrow * ngridcol,), order='F')\n\n return new[map_index]\n\n\ndef load_soil_moisture(d, ngrids, missing=-9999):\n data = np.zeros((ngrids, 5), order='F')\n\n data[:, 0] = d.area\n data[:, 1] = d.region_ids\n data[:, 2] = d.max_soil_moist\n\n # add max value (999) where water is\n data[d.lakes_msm[:, 0], 2] = d.lakes_msm[:, 1]\n data[d.addit_water_msm[:, 0], 2] = d.addit_water_msm[:, 1]\n\n country = d.country_ids[:]\n basin = d.basin_ids[:]\n\n # Ignore all the cells in which we are missing an ID value for soil moisture, country, or basin.\n # Thus, country and basin coverage must be consistent.\n # Basins coverage is smaller, and GCAM region ignores Greenland.\n invalid = np.where((data[:, 2] == 0) | (country == 0) | (basin == 0))[0]\n\n # should this be 0:2\n data[invalid, 1:2] = 0\n\n # should these be returned?\n country[invalid] = missing\n basin[invalid] = missing\n\n return data\n","sub_path":"xanthos/data_reader/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"187509246","text":"# Version python3.6\n# -*- coding: utf-8 -*-\n# @Time : 2018/11/6 4:40 PM\n# @Author : zenRRan\n# @Email : zenrran@qq.com\n# @File : GRU.py\n# @Software: PyCharm Community Edition\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport utils.Embedding as Embedding\n\nimport random\n\nclass GRU(nn.Module):\n def __init__(self, opts, vocab, label_vocab):\n super(GRU, self).__init__()\n\n random.seed(opts.seed)\n torch.manual_seed(opts.seed)\n torch.cuda.manual_seed(opts.seed)\n\n self.embed_dim = opts.embed_size\n self.word_num = vocab.m_size\n self.pre_embed_path = opts.pre_embed_path\n self.string2id = vocab.string2id\n self.embed_uniform_init = opts.embed_uniform_init\n self.label_num = label_vocab.m_size\n self.embed_dropout = opts.embed_dropout\n self.fc_dropout = opts.fc_dropout\n self.hidden_num = opts.hidden_num\n self.hidden_size = opts.hidden_size\n self.hidden_dropout = opts.hidden_dropout\n self.bidirectional = opts.bidirectional\n\n self.embeddings = nn.Embedding(self.word_num, self.embed_dim)\n if opts.pre_embed_path != '':\n embedding = Embedding.load_predtrained_emb_zero(self.pre_embed_path, self.string2id)\n self.embeddings.weight.data.copy_(embedding)\n else:\n nn.init.uniform_(self.embeddings.weight.data, -self.embed_uniform_init, self.embed_uniform_init)\n\n self.gru = nn.GRU(\n self.embed_dim,\n self.hidden_size,\n dropout=self.hidden_dropout,\n num_layers=self.hidden_num,\n batch_first=True,\n bidirectional=self.bidirectional\n )\n self.embed_dropout = nn.Dropout(self.embed_dropout)\n self.fc_dropout = nn.Dropout(self.fc_dropout)\n self.linear1 = nn.Linear(self.hidden_size * 2, self.hidden_size // 2)\n self.linear2 = nn.Linear(self.hidden_size // 2, self.label_num)\n\n def forward(self, input):\n out = self.embeddings(input)\n out = self.embed_dropout(out)\n out, _ = self.gru(out) #[1, 1, 200]\n\n out = torch.transpose(out, 1, 2)\n\n out = torch.tanh(out)\n\n out = F.max_pool1d(out, out.size(2)) #[1, 200, 1]\n\n out = out.squeeze(2) #[1, 400]\n\n out = self.fc_dropout(out)\n out = self.linear1(F.relu(out))\n output = self.linear2(F.relu(out))\n\n return output\n","sub_path":"GRU.py","file_name":"GRU.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"401494552","text":"import unittest\nfrom foo_math import add\n\n# best practices: set test/answers as global\nEXAMPLE_1 = [3,4]\nEXAMPLE_2 = [-3,-4]\nANSWER_1 = 7\nANSWER_2 = -7\n\nclass FactorialTest(unittest.TestCase):\n \n # anything with \"test_\" will be automatically tested\n def test_positive(self):\n self.assertEqual(add(EXAMPLE_1[0], EXAMPLE_1[1]), ANSWER_1)\n\n def test_negative(self):\n self.assertEqual(add(EXAMPLE_2[0], EXAMPLE_2[1]), ANSWER_2)\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"tests/test_foomath.py","file_name":"test_foomath.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"403960147","text":"\"\"\"\n This module contains all the procedures necessary for\n loading the data.\n\"\"\"\nimport pickle as pkl\nimport numpy as np\nimport logging\nimport sklearn\nfrom sklearn.model_selection import train_test_split\n\n# DATA_FOLDER = \"data/\"\n\nEXTENSION = \".pkl\"\nDATA_SETS = {\n \"fonollosa\": {0: \"B1-system\",\n 1: \"B2-system\",\n 2: \"B3-system\",\n 3: \"B4-system\",\n 4: \"B5-system\",\n \"n_classes\": 4\n },\n \"turbulent_gas_mixtures\": {0: \"preloaded_dataset\", \"n_classes\": 4},\n \"windtunnel\": {\n 0: \"preloaded_dataset-L1\",\n 1: \"preloaded_dataset-L2\",\n 2: \"preloaded_dataset-L3\",\n 3: \"preloaded_dataset-L4\",\n 4: \"preloaded_dataset-L5\",\n 5: \"preloaded_dataset-L6\",\n \"n_classes\": 11,\n }\n # Uncomment the following line if you have been authorized to use the dataset\n # ,\"coffee_dataset\": {0: \"preloaded_dataset\", \"n_classes\": 3}\n}\nDS_WINE = {\"QWines-CsystemTR\": 3,\n \"QWinesEa-CsystemTR\": 4}\n\n\ndef load(ds_choice, ds_idx=0):\n \"\"\"\n choices : 0 -> fonollosa, 1 -> turbulent_gas_mixtures, 2 -> windtunnel\n :param ds_choice: the index naming the dataset chosen\n :param ds_idx: the index if the folder containing the dataset has one or more datasets\n :return: the dataset read,the labels and the number of classes\n \"\"\"\n global DATA_FOLDER, DATA_SETS, EXTENSION\n assert ds_choice in list(DATA_SETS.keys())\n # ds_name =[ds_choice]\n ds_name = ds_choice\n dataset_name = ds_name+\"/\"\n\n logging.info(ds_name + \" Is being loaded\")\n\n n_classes = DATA_SETS[ds_name]['n_classes']\n print(\"\\n\\n ds_name:\"+ds_name+\"\\n\\n\")\n print(\"\\n\\n ds_idx\"+str(ds_idx)+\"\\n\\n\")\n subds_name = DATA_SETS[ds_name][ds_idx]\n sub_dataset_name = subds_name + \"/\"\n\n data, labels = None, None\n with open(DATA_FOLDER + ds_name + \"/\" + subds_name + EXTENSION, 'rb') as d:\n data, labels, _ = pkl.load(d)\n d.close()\n\n return data, labels, n_classes, dataset_name, sub_dataset_name\n\n\ndef load_wine(ds_choice):\n \"\"\"\n choices : 0 -> QWines-CsystemTR, 1 -> QWinesEa-CsystemTR\n :param ds_choice: the index naming the dataset chosen\n :return: the dataset read,the labels and the number of classes\n \"\"\"\n global DATA_FOLDER, DS_WINE, EXTENSION\n\n assert ds_choice in list(DS_WINE.keys())\n ds_name = ds_choice\n dataset_name = ds_name+'/'\n\n logging.info(ds_name + \" Is being loaded\")\n\n n_classes = DS_WINE[ds_name]\n\n data, labels = None, None\n with open(DATA_FOLDER + \"wines/\" + ds_name + EXTENSION, \"rb\") as d:\n data, labels, _, _ = pkl.load(d)\n d.close()\n\n return data, labels, n_classes, \"wine/ \", dataset_name # sub_dataset_name\n\n\ndef data_set_reshaped(data_set):\n new_data = []\n for d in data_set:\n new_data.append(d.reshape(d.shape[0], d.shape[1], 1).tolist())\n return np.array(new_data)\n\n\ndef load_and_split(ds_choice, ds_idx=0, read_wine_datasets=False):\n # Loading dataset\n data = None\n labels = None\n dataset_name = \"\"\n sub_dataset_name = \"\"\n if not read_wine_datasets:\n data, labels, n_classes, dataset_name, sub_dataset_name = load(ds_choice, ds_idx)\n else:\n data, labels, n_classes, dataset_name, sub_dataset_name = load_wine(ds_choice)\n\n train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=.2)\n train_data = data_set_reshaped(train_data)\n test_data = data_set_reshaped(test_data)\n\n # input_shape = train_data[0].shape\n\n train_data, train_labels = sklearn.utils.shuffle(train_data, train_labels)\n test_data, test_labels = sklearn.utils.shuffle(test_data, test_labels)\n\n return train_data, train_labels, test_data, test_labels, dataset_name, sub_dataset_name\n\n\ndef standardize_data(train_data, test_data, input_shape):\n\n flat_train_data = train_data.reshape(train_data.shape[0], input_shape[0] * input_shape[1])\n flat_test_data = test_data.reshape(test_data.shape[0], input_shape[0] * input_shape[1])\n\n scaler = sklearn.preprocessing.StandardScaler().fit(flat_train_data)\n flat_train_data = scaler.transform(flat_train_data)\n\n scaler = sklearn.preprocessing.StandardScaler().fit(flat_test_data)\n flat_test_data = scaler.transform(flat_test_data)\n\n new_train = flat_train_data.reshape(train_data.shape[0], input_shape[0], input_shape[1], 1)\n new_test = flat_test_data.reshape(test_data.shape[0], input_shape[0], input_shape[1], 1)\n return new_train, new_test\n\n\ndef split_datasamples_by_sensors(data):\n \"\"\"\n This is an auxiliary procedure for executing the\n SniffMultinose model split turn each column of\n the data matrix into an individual vector.\n :param data: matrix of signals encoded in an numpy array of doubles\n :return: a list with each column of the data matrix saved in a\n list item different\n \"\"\"\n shape = data.shape\n new_split = []\n # Iterate over data columns\n for i in range(shape[2]):\n new_split.append(data[:, :, i])\n new_split[i] = new_split[i].reshape(new_split[i].shape[0], new_split[i].shape[1])\n return new_split\n\n\ndef load_dataset(ds_choice, ds_idx, read_wine_datasets=False):\n \"\"\"\n Loads the dataset from the experiment\n :param ds_choice: Name of the dataset_chosen\n :param ds_idx: index indicating wich subset should be loaded\n :param read_wine_datasets: True, if it is desired to read the wine dataset\n :return: data_samples,\n data labels,\n name of the dataset and name of the data subset,\n name of the input_shape\n \"\"\"\n data = None\n labels = None\n dataset_name = None\n sub_dataset_name = None\n if not read_wine_datasets:\n data, labels, n_classes, dataset_name, sub_dataset_name = load(ds_choice, ds_idx)\n else:\n data, labels, n_classes, dataset_name, sub_dataset_name = load_wine(ds_choice)\n\n data = np.array(data)\n\n input_shape = data[0].shape\n\n return data, labels, n_classes, dataset_name, sub_dataset_name, input_shape\n\n\ndef dataset_classes_number(dataset_name):\n global DATA_SETS\n return DATA_SETS[dataset_name][\"n_classes\"]\n\n\ndef dataset_wine_classes_number(dataset_name):\n global DS_WINE\n return DS_WINE[dataset_name]\n\n\nif __name__ == \"data_loading\":\n global DATA_FOLDER\n DATA_FOLDER = \"data/\"\n","sub_path":"data_loading.py","file_name":"data_loading.py","file_ext":"py","file_size_in_byte":6460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"21109165","text":"__author__ = 'Filinto Duran (duranto@gmail.com)'\n# CAPABILITIES\nCAN_BE_MANAGED = 1\nCAN_TRANSFER_FILES = 1 << 1\nCAN_EXECUTE_COMMANDS = 1 << 2\nCAN_HAVE_MULTIPLE_CLIENTS = 1 << 3\nCAN_HAVE_MULTIPLE_USER = 1 << 4\nCAN_HAVE_MULTIPLE_SESSIONS_PER_USER = 1 << 5\nCAN_HAVE_INTERACTIVE_COMMANDS = 1 << 6\nCAN_OPEN_MULTIPLE_CHANNELS = 1 << 7\nCAN_DO_ALL = (1 << 20) - 1\n\nOPEN = 'NO_RETRIES_ON_OPEN'\nDO_NOT_RETRY_ON_OPEN = OPEN\n\nUNIQUE_PROMPT_TEMPLATE = '@##PPRROOMMPPTT{}##@'\nUNIQUE_PROMPT = UNIQUE_PROMPT_TEMPLATE.format('')\nSOCKET_RECV_NOT_READY = None\n","sub_path":"course3/test_envi/connections/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"424584879","text":"'''\nGiven a 2D matrix of characters and a target word, write a function that returns whether the word can be found in the matrix by going left-to-right, or up-to-down.\n\nFor example, given the following matrix:\n\n[['F', 'A', 'C', 'I'],\n ['O', 'B', 'Q', 'P'],\n ['A', 'N', 'O', 'B'],\n ['M', 'A', 'S', 'S']]\nand the target word 'FOAM', you should return true, since it's the leftmost column. Similarly, given the target word 'MASS', you should return true, since it's the last row.\n'''\n\n# SOLUTION\n\ndef get_row_word(matrix, word_len, rows, x, y):\n row_chars = list()\n for i in range(word_len):\n row_chars.append(matrix[x + i][y])\n\n return ''.join(row_chars)\n\n\ndef get_col_word(matrix, word_len, cols, x, y):\n return ''.join(matrix[x][y:y + word_len])\n\n\ndef word_checker(matrix, word, word_len, rows, cols, x, y):\n\n if x >= rows or y >= cols:\n return False\n\n row_word, col_word = None, None\n if x + word_len <= rows and y < cols:\n row_word = get_row_word(matrix, word_len, rows, x, y)\n if y + word_len <= cols and x < rows:\n col_word = get_col_word(matrix, word_len, cols, x, y)\n\n if row_word == word or col_word == word:\n return True\n\n check_1 = word_checker(matrix, word, word_len, rows, cols, x + 1, y) \\\n if col_word else None\n check_2 = word_checker(matrix, word, word_len, rows, cols, x, y + 1) \\\n if row_word else None\n\n return check_1 or check_2\n\n\ndef word_exists(matrix, word):\n rows = len(matrix)\n cols = len(matrix[0])\n word_len = len(word)\n\n return word_checker(matrix, word, word_len, rows, cols, 0, 0)\n\n\nmatrix = [['F', 'A', 'C', 'I'],\n ['O', 'B', 'Q', 'P'],\n ['A', 'N', 'O', 'B'],\n ['M', 'A', 'S', 'S']]\n\nprint(word_exists(matrix, 'FOAMS')) # None\nprint(word_exists(matrix, 'FOAM')) # True\nprint(word_exists(matrix, 'MASS')) # True\nprint(word_exists(matrix, 'FORM')) # False","sub_path":"Problem63/Answer.py","file_name":"Answer.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"271101209","text":"__all__ = (\n \"CrawlTaskJson\",\n)\n\nimport json\n\n\nclass CrawlTaskJson:\n '''\n 爬虫任务的json形式如下:\n {\n \"job_name\":\"hahaha\",\n \"layer\":0,\n \"urls\":[\n \"https://haha.hahaha.ha/xxx\",\n \"https://haha.hahaha.ha/yyy\",\n ]\n }\n 这个json作为一个整体加入队列,也作为一个任务分配单元分配给爬虫端\n 所以 urls 一次不要放太多\n '''\n @classmethod\n def from_json_str(cls, json_str: str):\n x = json.loads(json_str)\n obj = cls(x[\"job_name\"], x[\"layer\"], x[\"urls\"])\n return obj\n\n def __init__(self, crawl_job_name: str, layer: int, urls: list):\n assert isinstance(crawl_job_name, str) and\\\n isinstance(layer, int) and\\\n isinstance(urls, list)\n assert bool(crawl_job_name) and bool(urls) # 判空\n self.job_name = crawl_job_name\n self.layer = layer\n self.urls = urls\n\n def get_json(self) -> str:\n x = {\n \"job_name\": self.job_name,\n \"layer\":self.layer,\n \"urls\": self.urls,\n }\n _json_str = json.dumps(x)\n return _json_str\n","sub_path":"D-crawler-sys/server/crawler/crawl_task_json.py","file_name":"crawl_task_json.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"123348063","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, division, print_function, absolute_import\n\nimport os\nimport sys\nimport importlib\nimport fnmatch\nimport inspect\nimport pkgutil\nimport subprocess\nfrom types import ModuleType\n\nfrom captain import exit as console, echo\nfrom captain.decorators import arg\n\nfrom ..compat import *\nfrom ..utils import get_objects\nfrom ..model import Orm\nfrom ..interface import get_interface\n\n\ndef get_modules(modulepath):\n \"\"\"return all found modules at modulepath (eg, foo.bar) including modulepath module\"\"\"\n m = importlib.import_module(modulepath)\n mpath = m.__file__\n ret = set([m])\n\n if \"__init__.\" in mpath.lower():\n mpath = os.path.dirname(mpath)\n\n # https://docs.python.org/2/library/pkgutil.html#pkgutil.iter_modules\n for module_info in pkgutil.iter_modules([mpath]):\n submodulepath = \".\".join([modulepath, module_info[1]])\n if module_info[2]:\n # module is a package\n submodules = get_modules(submodulepath)\n ret.update(submodules)\n else:\n ret.add(importlib.import_module(submodulepath))\n\n return ret\n\n\ndef get_subclasses(modulepath, parent_class):\n \"\"\"given a module return all the parent_class subclasses that are found in\n that module and any submodules.\n\n :param modulepath: string, a path like foo.bar.che\n :param parent_class: object, the class whose children you are looking for\n :returns: set, all the found child classes in modulepath of parent_class\n \"\"\"\n if isinstance(modulepath, ModuleType):\n modules = get_modules(modulepath.__name__)\n else:\n modules = get_modules(modulepath)\n\n ret = set()\n for m in modules:\n cs = inspect.getmembers(m, lambda v: inspect.isclass(v) and issubclass(v, parent_class))\n for class_name, klass in cs:\n ret.add(klass)\n\n return ret\n\n\ndef build_dump_order(orm_class, orm_classes):\n \"\"\"pass in an array, when you encounter a ref, call this method again with the array\n when something has no more refs, then it gets appended to the array and returns, each\n time something gets through the list they are added, but before they are added to the\n list it is checked to see if it is already in the listt\"\"\"\n if orm_class in orm_classes: return\n\n for field_name, field_val in orm_class.schema.fields.items():\n if field_val.is_ref():\n build_dump_order(field_val.schema.orm_class, orm_classes)\n\n if orm_class not in orm_classes:\n orm_classes.append(orm_class)\n\n\ndef get_orm_classes(path):\n \"\"\"this will return prom.Orm classes found in the given path (classpath or modulepath)\"\"\"\n ret = set()\n try:\n m = importlib.import_module(path)\n\n except ImportError:\n # we have a classpath\n m, klass = get_objects(path)\n if issubclass(klass, Orm):\n ret.add(klass)\n\n else:\n ret.update(get_subclasses(m, Orm))\n\n return ret\n\n\ndef get_table_map(paths):\n ret = {}\n orm_classes = set()\n dump_orm_classes = []\n for p in paths:\n orm_classes.update(get_orm_classes(p))\n\n for orm_class in orm_classes:\n build_dump_order(orm_class, dump_orm_classes)\n\n try:\n for orm_class in dump_orm_classes:\n inter = orm_class.interface\n conn_name = inter.connection_config.name\n ret.setdefault(conn_name, {\"interface\": inter, \"table_names\": []})\n ret[conn_name][\"table_names\"].append(orm_class.table_name)\n\n except RuntimeError:\n pass\n\n return ret\n\n\ndef run_cmd(cmd):\n try:\n process = subprocess.Popen(\n cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n )\n\n if is_py2:\n for line in iter(process.stdout.readline, \"\"):\n sys.stdout.write(line)\n sys.stdout.flush()\n else:\n for line in iter(process.stdout.readline, b\"\"):\n line = line.decode(\"utf-8\")\n sys.stdout.write(line)\n sys.stdout.flush()\n\n process.wait()\n\n except subprocess.CalledProcessError as e:\n raise RuntimeError(\"dump failed with code {} and output: {}\".format(e.returncode, e.output))\n\n except OSError as e:\n if e.errno == 2:\n echo.err(\"dump is not installed, you need to run `pip install dump`\")\n raise\n\n\ndef get_base_cmd(action, inter, directory):\n\n conn = inter.connection_config\n\n if not \"postgres\" in conn.interface_name.lower():\n raise RuntimeError(\"Dump only works with Postgres databases\")\n\n cmd = [\n \"dump\",\n action,\n \"--dbname\",\n conn.database,\n \"--username\",\n conn.username,\n \"--password\",\n conn.password,\n \"--host\",\n conn.host,\n \"--directory\",\n directory,\n ]\n\n if conn.port:\n cmd.extend([\"--port\", str(conn.port)])\n\n return cmd\n\n\n@arg(\"-D\", \"--dir\", \"--directory\", dest=\"directory\", help=\"directory where the backup files should go\")\n@arg(\"--dry-run\", dest=\"dry_run\", action=\"store_true\", help=\"act like you are going to do everything but do nothing\")\n@arg(\"paths\", nargs=\"+\", help=\"module or class paths (eg, foo.bar or foo.bar.Che) where prom Orm classes are defined\")\ndef main_dump(paths, directory, dry_run):\n \"\"\"dump all or part of the prom data, currently only works on Postgres databases\n\n basically just a wrapper around `dump backup` https://github.com/Jaymon/dump\n \"\"\"\n table_map = get_table_map(paths)\n\n for conn_name, conn_info in table_map.items():\n inter = conn_info[\"interface\"]\n conn = inter.connection_config\n table_names = conn_info[\"table_names\"]\n\n cmd = get_base_cmd(\"backup\", inter, directory)\n cmd.extend(table_names)\n\n if dry_run:\n echo.out(\" \".join(cmd))\n\n else:\n run_cmd(cmd)\n\n\n@arg(\"-D\", \"--dir\", \"--directory\",\n dest=\"directory\",\n help=\"directory where the backup files from a previous prom dump are located\")\n@arg(\"--connection-name\", \"-c\",\n dest=\"conn_name\",\n default=\"\",\n help=\"the connection name (from prom dsn) you want to restore\")\ndef main_restore(directory, conn_name):\n \"\"\"Restore your database dumped with the dump command\n\n just a wrapper around `dump restore` https://github.com/Jaymon/dump\n \"\"\"\n inter = get_interface(conn_name)\n conn = inter.connection_config\n cmd = get_base_cmd(\"restore\", inter, directory)\n run_cmd(cmd)\n\n","sub_path":"prom/cli/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":6550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"57491117","text":"# Главный исполняемый файл\n\nimport os\nimport threading\n\n\nimport classes.transactions\nimport classes.console\nimport classes.DB\nimport classes.wallet\nimport classes.network\nimport classes.blockchain\nimport classes.miner\n\nconsoleClass = classes.console.Console()\ndbClass = classes.DB.DB()\nwalletClass = classes.wallet.Wallet()\ntransactionsClass = classes.transactions.Transactions()\nnetworkClass = classes.network.Network()\nblockchainClass = classes.blockchain.Blockchain()\nminerClass = classes.miner.Miner()\n\nwalletClass.init_classes(dbClass)\nconsoleClass.init_classes(walletClass, transactionsClass, dbClass)\ntransactionsClass.init_classes(walletClass, dbClass, networkClass)\nnetworkClass.init_classes(dbClass, blockchainClass, transactionsClass)\nblockchainClass.init_classes(dbClass, walletClass, transactionsClass)\nminerClass.init_classes(dbClass, blockchainClass, networkClass)\n\n\n# Создаем директорию для хранения БД и саму базу если её нет.\n# Если удалить файл с базой и не удалить папку, возникнет баг.\ndb_file = 'db/blockchain_DB.sqlite'\ntry:\n fp = open(db_file)\nexcept IOError:\n os.makedirs('db')\n dbClass.creating_DB()\n\n# Запускаем главный метод класса для работы с консолью.\n# consoleClass.startConsoleWallet()\n\n\ndef network():\n networkClass.receive_message()\n\n\ndef wallet():\n consoleClass.startConsoleWallet()\n\n\ndef miner():\n minerClass.startMining()\n\n\nif __name__ == '__main__':\n try:\n a = threading.Thread(name=\"wallet\", target=wallet)\n b = threading.Thread(name=\"network\", target=network)\n c = threading.Thread(name=\"miner\", target=miner)\n b.daemon = True\n c.daemon = True\n a.start()\n b.start()\n c.start()\n except:\n pass\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"517288924","text":"from typing import List, Tuple, Optional\nimport math\n\nfrom torch.utils.data import Dataset\nimport torch\n\nfrom src.utils.tokenizers import SymTokenizer\n\n\ndef sort_data(indices: List[int],\n sequences: List[str],\n labels: List[str]) -> Tuple[List[int], List[str], List[str]]:\n indices, sequences, labels = zip(*sorted(zip(indices, sequences, labels), key=lambda x: len(x[1])))\n return indices, sequences, labels\n\n\ndef make_batches(indices: List[int],\n sequences: List[str],\n labels: List[str],\n batch_size: int) -> Tuple[List[List[int]], List[List[str]], List[List[str]]]:\n\n assert len(sequences) == len(labels) == len(indices)\n\n indices, sequences, labels = sort_data(indices, sequences, labels)\n\n identifier_batches = []\n text_batches = []\n label_batches = []\n\n for i_batch in range(math.ceil(len(sequences) / batch_size)):\n identifier_batches.append(indices[i_batch * batch_size:(i_batch + 1) * batch_size])\n text_batches.append(sequences[i_batch * batch_size:(i_batch + 1) * batch_size])\n label_batches.append(labels[i_batch * batch_size:(i_batch + 1) * batch_size])\n\n return identifier_batches, text_batches, label_batches\n\n\nclass BmesSegmentationDataset(Dataset):\n\n def __init__(self,\n *,\n indices: List[int],\n original: List[str],\n segmented: List[str],\n original_tokenizer: SymTokenizer,\n bmes_tokenizer: SymTokenizer,\n batch_size: int,\n pad_index: int,\n unk_index: int,\n max_len: int):\n assert len(original) == len(segmented)\n\n self.batch_size = batch_size\n\n self.index_batches, self.original_batches, self.segmented_batches = make_batches(indices=indices,\n sequences=original,\n labels=segmented,\n batch_size=self.batch_size)\n\n self.unk_index = unk_index\n self.pad_index = pad_index\n\n self.max_len = max_len\n\n self.original_tokenizer = original_tokenizer\n self.bmes_tokenizer = bmes_tokenizer\n\n def __len__(self) -> int:\n return len(self.index_batches)\n\n def prepare_sample(self,\n sequence: str,\n max_len: int,\n bmes: bool) -> Tuple[List[int], int]:\n if not bmes:\n sequence = self.original_tokenizer.encode(sequence)\n else:\n sequence = self.bmes_tokenizer.encode(sequence)\n\n sequence = sequence[:max_len]\n true_len = len(sequence)\n pads = [self.pad_index] * (max_len - len(sequence))\n sequence += pads\n\n return sequence, true_len\n\n def __getitem__(self, index: int) -> Tuple[torch.tensor, torch.tensor, torch.tensor, torch.tensor]:\n index_batch = self.index_batches[index]\n original_batch = self.original_batches[index]\n segmented_batch = self.segmented_batches[index]\n\n max_len = min([self.max_len, max([len(sample) for sample in original_batch])])\n\n batch_indices = []\n batch_x = []\n batch_y = []\n batch_lengths = []\n\n for index, sample in enumerate(original_batch):\n identifier = index_batch[index]\n x, true_len = self.prepare_sample(sample, max_len, bmes=False)\n y, _ = self.prepare_sample(segmented_batch[index], max_len, bmes=True)\n batch_indices.append(identifier)\n batch_x.append(x)\n batch_y.append(y)\n batch_lengths.append(true_len)\n\n batch_indices = torch.tensor(batch_indices).long()\n batch_x = torch.tensor(batch_x).long()\n batch_y = torch.tensor(batch_y).long()\n batch_lengths = torch.tensor(batch_lengths).long()\n\n return batch_indices, batch_x, batch_y, batch_lengths\n\n\n# class BmesSegmentationDataset(Dataset):\n#\n# def __init__(self,\n# *,\n# indices: List[int],\n# original: List[str],\n# segmented: List[str],\n# original_tokenizer: SymTokenizer,\n# bmes_tokenizer: SymTokenizer,\n# pad_index: int,\n# unk_index: int,\n# max_len: int):\n# self.indices = indices\n# self.original = original\n# self.segmented = segmented\n#\n# assert len(original) == len(segmented)\n#\n# self.unk_index = unk_index\n# self.pad_index = pad_index\n#\n# self.max_len = max_len\n#\n# self.index2char = None\n# self.char2index = None\n#\n# self.original_tokenizer = original_tokenizer\n# self.bmes_tokenizer = bmes_tokenizer\n#\n# def __len__(self) -> int:\n# return len(self.original)\n#\n# def __getitem__(self, index: int) -> Tuple[Tensor, Tensor, int, int]:\n# encoder_seq = self.original_tokenizer.encode(self.original[index])\n# target_seq = self.bmes_tokenizer.encode(self.segmented[index])\n#\n# true_length = len(encoder_seq)\n# item_index = self.indices[index]\n#\n# encoder_seq = self.original_tokenizer.pad_or_clip(encoder_seq,\n# max_len=self.max_len)\n# target_seq = self.bmes_tokenizer.pad_or_clip(target_seq,\n# max_len=self.max_len)\n#\n# encoder_seq = torch.tensor(encoder_seq).long()\n# target_seq = torch.tensor(target_seq).long()\n#\n# return encoder_seq, target_seq, true_length, item_index","sub_path":"src/utils/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":5846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"602178474","text":"import urllib2\nimport time, datetime\nfrom scrapy.selector import Selector\nfrom pycharmCode.stock.streams.models import InsertModel\n\nclass Splits:\n def __init__(self):\n pass\n\n def run(self):\n html = urllib2.urlopen(\"http://getsplithistory.com/AA\").read()\n response = Selector(text = html, type = \"html\")\n datesAndOther = response.xpath(\"//table/tbody/tr/td/text()\").extract()\n ratios = response.xpath(\"//table/tbody/tr/td/span/text()\").extract()\n objects = []\n rs = []\n for i,data in enumerate(datesAndOther[:-1]):\n if (i % 4 == 0):\n objNum = len(objects)\n objects.append(dict())\n objects[objNum][\"date\"] = self.cleanDate(data)\n if ((i - 1) % 4 == 0):\n objNum = len(objects) - 1\n objects[objNum][\"denom\"] = self.cleanDenom(data)\n for i,data in enumerate(ratios[:-1]):\n if (i % 3 == 0):\n objects[i/3][\"num\"] = self.cleanNum(data)\n for o in objects:\n o[\"factorial\"] = float(o[\"num\"]) / float(o[\"denom\"])\n # now we insert the date symbol name and the factorial into the DB\n for o in objects:\n IM = InsertModel(\"jdfkasdklfj\")#tableName)\n IM.insert(\"e\", o[\"date\"])\n IM.insert(\"symbol\", symbol)\n IM.insert(\"Ratio\", o[\"factorial\"])\n\n def cleanDate(self, date):\n t = time.strptime(date, '%b %d, %Y')\n date = datetime.date(t.tm_year, t.tm_mon, t.tm_mday)\n return date\n\n def cleanDenom(self, data):\n data = int(data.split(\" : \")[1])\n return data\n\n def cleanNum(self, num):\n num = int(num)\n return num\n","sub_path":"pycharmCode/stock/scrape/Splits.py","file_name":"Splits.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"340538644","text":"#!/usr/bin/env python2\n# coding: utf-8\n\nimport copy\nimport unittest\n\nfrom pykit import utfjson\nfrom pykit import ututil\nfrom pykit.ectypes import (\n BlockDesc,\n BlockExists,\n BlockGroup,\n BlockGroupID,\n BlockID,\n BlockNotFoundError,\n BlockTypeNotSupportReplica,\n BlockTypeNotSupported,\n DriveID,\n)\n\ndd = ututil.dd\n\n_ec_config = {\n 'in_idc': [4, 2],\n 'cross_idc': [2, 1],\n 'ec_policy': 'lrc',\n 'data_replica': 3\n}\n\n_empty_group = BlockGroup({\n 'config': {\n 'in_idc': [4, 2],\n 'ec_policy': 'lrc',\n 'cross_idc': [2, 1],\n 'data_replica': 3\n },\n 'blocks': {},\n 'idcs': ['a', 'b', 'c'],\n 'block_group_id': 'g000640000000123'\n})\n\n\nclass TestBlockGroupID(unittest.TestCase):\n\n def test_new(self):\n block_group_id = 'g000640000000123'\n\n bgid = BlockGroupID(64, 123)\n self.assertEqual(block_group_id, str(bgid))\n\n bgid = BlockGroupID(block_group_id)\n self.assertEqual((64, 123), bgid.as_tuple())\n\n bgid = BlockGroupID(bgid)\n self.assertEqual((64, 123), bgid.as_tuple())\n\n def test_new_invalid(self):\n\n block_group_id_invalid = 'g00064000000012345'\n self.assertRaises(ValueError, BlockGroupID, block_group_id_invalid)\n\n def test_tostr(self):\n block_group_id = 'g000640000000123'\n bgid = BlockGroupID(block_group_id)\n self.assertEqual(block_group_id, str(bgid))\n self.assertEqual(block_group_id, '{0}'.format(bgid))\n self.assertEqual(\"'g000640000000123'\", repr(bgid))\n\n\nclass TestBlockGroup(unittest.TestCase):\n\n def setUp(self):\n self.foo_block = BlockDesc({\n 'block_id': BlockID('d0', 'g000640000000123', '0000',\n DriveID('idc000' 'c62d8736c7280002'), 1),\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n\n def test_new(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n self.assertEqual(_empty_group, g)\n\n # test lacking of arg\n self.assertRaises(TypeError, BlockGroup, block_group_id='g000640000000123', idcs=[])\n self.assertRaises(TypeError, BlockGroup, block_group_id='g000640000000123', config=_ec_config)\n self.assertRaises(TypeError, BlockGroup, idcs=[], config=_ec_config)\n\n def test_json(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n rst = utfjson.dump(g)\n expected = ('{\"config\": {\"in_idc\": [4, 2], \"ec_policy\": \"lrc\", \"cross_idc\": [2, 1], '\n '\"data_replica\": 3}, \"blocks\": {}, \"idcs\": [\"a\", \"b\", \"c\"], '\n '\"block_group_id\": \"g000640000000123\"}')\n self.assertEqual(expected, rst)\n\n loaded = BlockGroup(utfjson.load(rst))\n self.assertEqual(g, loaded)\n\n def test_new_deref_config(self):\n\n cnf = copy.deepcopy(_ec_config)\n b = BlockGroup(block_group_id='g000640000000123', config=cnf, idcs=['a', 'b', 'c'])\n\n a = copy.deepcopy(b['config'])\n b['config']['in_idc'] = [10, 11]\n self.assertNotEqual(a, b)\n\n a = copy.deepcopy(b['config'])\n b['config']['cross_idc'] = [10, 11]\n self.assertNotEqual(a, b)\n\n a = copy.deepcopy(b['config'])\n b['config']['ec_policy'] = 'foo'\n self.assertNotEqual(a, b)\n\n a = copy.deepcopy(b['config'])\n b['config']['data_replica'] = 100\n self.assertNotEqual(a, b)\n\n def test_get_block(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n block = g.get_block('0000')\n self.assertIsNone(block)\n\n block = g.get_block('9999')\n self.assertIsNone(block)\n\n with self.assertRaises(BlockNotFoundError):\n g.get_block('9999', raise_error=True)\n\n g.add_block(self.foo_block)\n block = g.get_block(self.foo_block['block_id'].block_index)\n self.assertDictEqual(self.foo_block, block)\n\n with self.assertRaises(BlockNotFoundError):\n g.get_block('0002', raise_error=True)\n\n with self.assertRaises(ValueError):\n g.get_block('d0g0006400000001230000c62d2')\n\n def test_mark_delete_block(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n g.add_block(self.foo_block)\n g.mark_delete_block('0000')\n block = g.get_block('0000')\n\n self.assertEqual(1, block['is_del'])\n self.assertRaises(BlockNotFoundError, g.mark_delete_block, '9999')\n\n def test_delete_block(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n self.assertIsNone(g.get_block('0000'))\n\n g.add_block(self.foo_block)\n self.assertIsNotNone(g.get_block('0000'))\n\n g.delete_block('0000')\n self.assertIsNone(g.get_block('0000'))\n\n g.delete_block('0000')\n self.assertIsNone(g.get_block('0000'))\n\n def test_replace_block(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n prev = g.add_block(self.foo_block)\n self.assertIsNone(prev)\n\n block = g.get_block('0000')\n self.assertEqual(0, block['is_del'])\n\n prev = g.add_block(self.foo_block, replace=True)\n self.assertEqual(self.foo_block, prev)\n\n self.assertRaises(BlockExists, g.add_block, self.foo_block)\n self.assertRaises(BlockExists, g.add_block, self.foo_block, replace=False)\n\n def test_get_free_block_index(self):\n\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n g.add_block(self.foo_block)\n\n self.assertDictEqual({'a': ['0001', '0002', '0003'],\n 'b': ['0100', '0101', '0102', '0103']},\n g.get_free_block_indexes('d0'))\n\n self.assertDictEqual({'a': ['0004', '0005'],\n 'b': ['0104', '0105']},\n g.get_free_block_indexes('dp'))\n\n self.assertDictEqual({'c': ['0200', '0201', '0202', '0203'], },\n g.get_free_block_indexes('x0'))\n\n self.assertDictEqual({'c': ['0204', '0205'], },\n g.get_free_block_indexes('xp'))\n\n self.assertDictEqual(\n {\n 'a': ['0001', '0002', '0003'],\n 'b': ['0100', '0101', '0102', '0103'],\n 'c': [],\n },\n g.get_free_block_indexes('d0', get_all=True))\n\n self.assertDictEqual(\n {\n 'a': ['0004', '0005'],\n 'b': ['0104', '0105'],\n 'c': [],\n },\n g.get_free_block_indexes('dp', get_all=True))\n\n self.assertDictEqual(\n {\n 'a': [],\n 'b': [],\n 'c': ['0200', '0201', '0202', '0203'],\n },\n g.get_free_block_indexes('x0', get_all=True))\n\n self.assertDictEqual(\n {\n 'a': [],\n 'b': [],\n 'c': ['0204', '0205'],\n },\n g.get_free_block_indexes('xp', get_all=True))\n\n def test_get_block_type(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n self.assertEqual('d0', g.get_block_type('0000'))\n self.assertEqual('dp', g.get_block_type('0004'))\n self.assertEqual('d1', g.get_block_type('0006'))\n self.assertEqual('d0', g.get_block_type('0100'))\n self.assertEqual('dp', g.get_block_type('0104'))\n self.assertEqual('d1', g.get_block_type('0106'))\n self.assertEqual('x0', g.get_block_type('0200'))\n self.assertEqual('xp', g.get_block_type('0204'))\n\n self.assertRaises(BlockTypeNotSupported, g.get_block_type, '0299')\n self.assertRaises(BlockTypeNotSupported, g.get_block_type, '0900')\n\n def test_get_block_idc(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n\n self.assertEqual('a', g.get_block_idc('0000'))\n self.assertEqual('b', g.get_block_idc('0100'))\n self.assertEqual('c', g.get_block_idc('0200'))\n\n d0 = BlockDesc({\n 'block_id': BlockID('d0', 'g000640000000123', '0000',\n DriveID('idc000' 'c62d8736c7280002'), 1),\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n g.add_block(d0)\n self.assertEqual('a', g.get_block_idc('0000'))\n\n def test_get_replica_index_not_include_me(self):\n g = BlockGroup(block_group_id='g000640000000123', idcs=['a', 'b', 'c'], config=_ec_config)\n self.assertEqual(['0006', '0010'], g.get_replica_indexes('0000', include_me=False))\n self.assertEqual(['0000', '0010'], g.get_replica_indexes('0006', include_me=False))\n self.assertEqual(['0000', '0006'], g.get_replica_indexes('0010', include_me=False))\n\n with self.assertRaises(BlockTypeNotSupportReplica):\n g.get_replica_indexes('0004', include_me=False)\n\n with self.assertRaises(BlockTypeNotSupportReplica):\n g.get_replica_indexes('0204', include_me=False)\n\n def test_classify_blocks(self):\n\n gid = 'g000640000000123'\n\n g = BlockGroup(block_group_id=gid, idcs=['a', 'b', 'c'], config=_ec_config)\n\n blks = g.classify_blocks(0, only_primary=True)\n self.assertEqual([], blks['ec'] + blks['replica'] + blks['mark_del'])\n\n base_blk = BlockDesc({\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n\n ec_blk_idxes = ['0000', '0001']\n replica_blk_idxes = ['0002', '0008', '0012']\n mark_del_idxes = ['0003', '0004']\n\n for i, idx in enumerate(ec_blk_idxes + replica_blk_idxes + mark_del_idxes):\n\n typ = g.get_block_type(idx)\n\n blkid = BlockID(typ, gid, idx, DriveID('idc000' 'c62d8736c7280002'), i)\n\n blk = copy.deepcopy(base_blk)\n\n blk['block_id'] = blkid\n\n if idx in mark_del_idxes:\n blk['is_del'] = 1\n\n g.add_block(blk)\n\n for only_primary in (True, False):\n\n blks = g.classify_blocks(0, only_primary)\n\n blk_idxes = []\n\n for blk in blks['ec'] + blks['replica'] + blks['mark_del']:\n idx = BlockID(blk['block_id']).block_index\n blk_idxes.append(idx)\n\n expect_ids = copy.deepcopy(ec_blk_idxes)\n\n #'0004' in ec_blk_idxes is parity, so should not in mark_del\n if only_primary is True:\n expect_ids += replica_blk_idxes[:1] + mark_del_idxes[:1]\n else:\n expect_ids += replica_blk_idxes + mark_del_idxes[:1]\n\n self.assertEqual(expect_ids, blk_idxes)\n\n def test_get_parities(self):\n\n gid = 'g000640000000123'\n\n g = BlockGroup(block_group_id=gid, idcs=['a', 'b', 'c'], config=_ec_config)\n\n parities = g.get_parities(idc_index=0)\n self.assertEqual([], parities)\n\n base_parity = BlockDesc({\n 'size': 1000,\n 'range': ['0a', '0b'],\n 'is_del': 0\n })\n\n parity_idxes = ['0004', '0005']\n\n for i, idx in enumerate(parity_idxes):\n\n blkid = BlockID('dp', gid, idx, DriveID('idc000' 'c62d8736c7280002'), i)\n\n parity = copy.deepcopy(base_parity)\n\n parity['block_id'] = blkid\n\n g.add_block(parity)\n\n idxes = g.get_parity_indexes(idc_index=0)\n self.assertEqual(parity_idxes, idxes)\n\n parities = g.get_parities(idc_index=0)\n\n idxes = []\n for p in parities:\n idx = BlockID(p['block_id']).block_index\n idxes.append(idx)\n\n self.assertEqual(parity_idxes, idxes)\n","sub_path":"ectypes/test/test_block_group.py","file_name":"test_block_group.py","file_ext":"py","file_size_in_byte":11948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"378722268","text":"from bs4 import BeautifulSoup\nimport bindetector\nimport numpy as np\nimport pickle\nimport os\nimport random\nfrom sklearn.utils import shuffle\n\n\n\ndef transform_img(index, ps, size=(905, 905), data_path=\"data\", img_format=\"jpeg\", batch_path=\"batch\"):\n X = []\n keyname, fileid = index.split(\"_\")[:2]\n pic = \"{}.{}\".format(fileid.split(\".\")[0], img_format)\n path = os.path.join(data_path, keyname)\n im = bindetector.load_image(os.path.join(path, pic))\n return bindetector.processing(im, ps, size)\n\n\ndef sampling(targets, sample_size=4, batch_path=\"batch\"):\n assert len(targets['ps']) == len(targets['labels'])\n data = [p for p, label in zip(targets['ps'], targets['labels']) if label is True]\n assert len(data) == 1\n tmp_targets = shuffle(list(zip(targets['ps'], targets['labels'])))\n data += [p for p, label in tmp_targets if label is False][:sample_size]\n return data\n\n\ndef get_indices(batch_path=\"batch\"):\n return [f for f in os.listdir(batch_path) if f.endswith(\"pkl\")]\n \n\ndef generate_data(indices, size=(905, 905), data_path=\"data\", batch_path=\"batch\", sample_size=4, batch_size=5):\n while(True):\n X = []\n labels = []\n for index in shuffle(indices)[:batch_size]:\n with open(os.path.join(batch_path, index), \"rb\") as f:\n targets = pickle.load(f)\n data = sampling(targets, sample_size, batch_path=batch_path)\n label = [[1, 0]] + [[0, 1] for _ in data[1:]]\n labels += label\n assert len(data) == len(label)\n X += transform_img(index, data, size=size, data_path=data_path, batch_path=batch_path)\n yield np.array(X), np.array(labels)\n\n","sub_path":"model4/scripts/dataprocessor.py","file_name":"dataprocessor.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"139281598","text":"\"\"\"empty message\n\nRevision ID: ce163c991d48\nRevises: 20e89eb059bf\nCreate Date: 2019-02-28 22:12:06.143176\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ce163c991d48'\ndown_revision = '20e89eb059bf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('participants', sa.Column('tshirt', sa.String(length=100), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('participants', 'tshirt')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ce163c991d48_.py","file_name":"ce163c991d48_.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"247565782","text":"from .base import FunctionalTest\n\n\nclass LayoutAndStylingTest(FunctionalTest):\n\n def test_layout_and_styling(self):\n # Edith goes to the homepage\n self.browser.get(self.server_url)\n self.browser.set_window_size(1024, 768)\n # She notices the inbox is nicely centered\n inputbox = self.get_item_input_box()\n self.assertAlmostEqual(\n inputbox.location['x'] + inputbox.size['width'] / 2,\n 512,\n delta=5\n )\n # She starts a new list and notices the input is nicely cenred ther too\n inputbox.send_keys('testing\\n')\n inputbox = self.get_item_input_box()\n self.assertAlmostEqual(\n inputbox.location['x'] + inputbox.size['width'] / 2,\n 512,\n delta=5\n )\n\n\nif __name__ == '__main__':\n unittest.main(warnings='ignore')\n","sub_path":"functional_tests/test_layout_and_styling.py","file_name":"test_layout_and_styling.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"359510734","text":"import datetime as dt\nimport poes_utils as pu\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport numpy as np\nimport os\nimport netCDF4 as nc4\nimport timeit\nimport numpy.ma as ma\ntry:\n import cPickle as pickle\nexcept:\n import pickle\n\ndef valid_date(s):\n '''------------------------------------------------------------------\n PURPOSE: To check that a valid date is entered as an input\n :params s (str) a date in the format Y-m-d or Y-m-d H:M:S '''\n\n try:\n test = dt.datetime.strptime(s, \"%Y-%m-%d\")\n return test\n except:\n pass\n try:\n test = dt.datetime.strptime(s, \"%Y-%m-%d %H:%M:%S\")\n return test\n except ValueError:\n msg = \"Not a valid date: '{0}'.\".format(s)\n raise argparse.ArgumentTypeError(msg)\n\ndef make_training_data_vars(sdate,edate,satlist, varlist, cdf_dir,Lbin_dir, neur_dir, reflon,syear_all,eyear_all):\n '''\n PURPOSE: To create a datafile of electron flux mapped to one longitude with SAR to be used\n for developing the SHELLS neural network\n\n INPUTS:\n :param: sdate(datetime)- time to start processing data\n :param: edate(datetime)- time to end processing data\n :param: satlist(list(str))- i.e. ['n15','n18','n19','m01','m02']\n :param: varlist(list(str))- variables to process i.e. ['mep_ele_tel90_flux_e1', 'mep_ele_tel90_flux_e2',\n 'mep_ele_tel90_flux_e3', 'mep_ele_tel90_flux_e4']\n :param: cdf_dir(str) directory where the cdf files are\n :param: Lbin_dir(str) directory where the Lbin data files are\n :param: neur_dir(str) directory for the output files\n :param: reflon(int) E longitude to map to (degrees)\n :param: syear_all (int) The start year of the accumulated cdf file\n :param: eyear_all The end year of the accumulated cdf file\n\n OUTPUTS: monthly pickle files with the SAR modified data to be used by the SHELLS neural network\n\n USAGE(command line)\n python make_training_data.py -s 2013-01-01 -e 2013-05-01 -sats n15 n18 n19 m01 m02 -cd ./cdfdata/\n -ld ./Lbindat/ -nd ./neural_data/ -l 20 -sy 2015 -ey 2018:\n '''\n\n # These are the electron flux variables that have the percentile data\n evars = varlist\n\n svars = list()\n for var in evars:\n # These have the flux for each percentile\n svars.append(var+'_sar')\n\n # This is expected #orbits per day * Lpasses per orbit* # sats *days\n # that is used to estimate the len of array needed for make month long files\n flen = 20*4*len(satlist)*(31)\n\n Lbins = np.arange(1, 8.25, .25) # These are the Lbins\n cols = list()\n\n # make a list of columns for the output pickle file\n # The file will have columns with the SAR flux for each variable and Lbin\n for ecols in np.arange(0,len(varlist)):\n for lcols in np.arange(len(Lbins)):\n cols.append(varlist[ecols]+ ' '+str(Lbins[lcols]/4))\n\n # fin_dat will be fluxE1 all Lbins, flux E2 all Lbins, flux e3 Lbins, flux e4 at all Lbins\n # For each L pass\n # And then a time vector with the time at the midpoint of each L pass\n\n # This sets everything to ref longitude\n ref_ind = int(np.floor(reflon/10))\n\n # All data will be referenced back to m02\n satref = 'm02'\n\n while sdate = 0] = 0 # Northern hemi\n hemi[hemi < 0] = 1 # Southern hemi\n hemi1=hemi.astype(int)\n # Get the NS direction\n NSco = data['NS'][:]\n NSco1 =NSco.astype(int)\n lon = np.floor(data['lon'][:]/10)\n lon1=lon.astype(int)\n lon1[lon1>35]=0\n Kp = np.floor(data['Kp*10'][:]/10)\n Kp1 = Kp.astype(int)\n # Need to make Kp into a vector\n Kpvec = np.tile(Kp1,(len(Lbins),1)).T\n # Need to make an array of Ls\n Ls = np.zeros((len(data['time_med'][:]),len(Lbins)),dtype = int)\n\n for lco in range(0,len(Lbins)):\n Ls[:,lco] = Ls[:,lco]+lco\n\n\n nan_inds = np.where((fluxbin1 < -10) | (hemi1 < -10) | (lon1 < -10) | (Kpvec < -10) | (NSco1 < -10))\n\n # Set these to zero for now so that it is a valid index\n # but flag it later\n fluxbin1[nan_inds] = 0\n hemi1[nan_inds] = 0\n lon1[nan_inds] = 0\n NSco1[nan_inds] = 0\n Kpvec[nan_inds] = 0\n\n # Get the percentile that corresponds to each flux for the current sat\n per1 = sar[hemi1, NSco1, Ls, lon1, Kpvec, fluxbin1]\n perbin1 = np.round(per1 * 100).astype(int)\n\n # In northern some sar dat is nan\n per_nan = np.where(perbin1<-10)[0]\n perbin1[per_nan] = 0\n\n # Get the flux at the ref satellite for the measured percentile\n fluxval = sarout[1,1,Ls,ref_ind,Kpvec,perbin1]\n # Flag the bad values again\n fluxval[nan_inds] = -1\n fluxval[per_nan] = -1\n\n dlen = len(data['time_med'][:])\n # Set the output data to the reference value\n sar_dat[indco:indco + dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = fluxval\n # Save the no sar data for comparison\n nosar_dat[indco:indco + dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = np.log10(data[evars[eco]][:])\n\n lat_dat[indco:indco+dlen,(eco * len(Lbins)):(eco * len(Lbins))+len(Lbins) ] = hemi[:]\n full_lat[indco:indco+dlen,(eco * len(Lbins)):(eco * len(Lbins))+len(Lbins) ] = data['lat'][:]\n lon_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = data['lon'][:]\n per_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = perbin1\n way_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = data['NS'][:]\n sat_dat[indco:indco+dlen, (eco * len(Lbins)):(eco * len(Lbins))+len(Lbins)] = sco+1\n\n dtimes = pu.unix_time_ms_to_datetime(data['time_med'][:])\n sar_time.extend(dtimes.tolist())\n\n indco=indco+dlen\n else:\n print('No datafile')\n\n # Now sort the data by time after going through all the sats\n tinds= np.argsort(sar_time)\n new_time = [sar_time[x] for x in tinds]\n\n # This orders data according to the new sorted time\n # and saves the month file\n\n new_dat = sar_dat[tinds[0:len(new_time)], :] # The re-ordered and mapped data\n new_nosar_dat = nosar_dat[tinds[0:len(new_time)], :] # The re-order but not mapped data\n temp_dat = 1.0*new_dat\n\n # Now fill in holes with the last value.\n # This could be a problem if the first row has missing data\n vec_last = 1.0*new_dat[0,:]\n\n # find the columns where the first record has holes\n # and fill them with the closest value\n mvals = np.where((np.isnan(vec_last)) | (np.isinf(vec_last)) | (vec_last==-1))[0]\n ico = 1\n if len(mvals)>0:\n for mco in mvals:\n fillval = vec_last[mco]\n ico=1\n while ( (fillval<0) & (ico<50) ):\n fillval = new_dat[ico,mco]\n ico = ico+1\n vec_last[mco] = fillval\n\n new_dat[0,:] = vec_last\n for ico in np.arange(1,len(new_time)):\n vec = 1.0*new_dat[ico,:]\n vec[np.where((np.isnan(vec)) | (np.isinf(vec)) | (vec ==-1))] = vec_last[np.where((np.isnan(vec)) | (np.isinf(vec)) | (vec==-1) )]\n new_dat[ico,:] = vec\n vec_last = vec\n\n # --------------- Plot the monthly data -------------------------\n # Make a list of dates every 5 days for plotting\n day_inds = list()\n date_list = list()\n for days in np.arange(1,30,5):\n dmin = [np.abs( (x-dt.datetime(sdate.year,sdate.month,days)).total_seconds() ) for x in new_time]\n close_ind = dmin.index(min(dmin))\n day_inds.append(close_ind)\n date_list.append(dt.datetime(sdate.year, sdate.month, days).strftime(\"%m/%d/%Y\"))\n #sval = 300\n #lval = 400\n sval = 0\n lval = len(new_time)\n\n # Make a plot for each variable\n for eco in range(0,len(varlist)):\n fignum = plt.figure(eco+1)\n # plot the mapped data\n plt.subplot(5,1,1)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose((new_dat[sval:lval,eco*len(Lbins):(eco+1)*len(Lbins)])), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 7 )\n plt.title(varlist[eco])\n plt.colorbar()\n #plot the unmapped data\n plt.subplot(5, 1, 2)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose((new_nosar_dat[sval:lval,eco*len(Lbins):(eco+1)*len(Lbins)])), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 7 )\n #plt.xticks(day_inds, date_list)\n plt.colorbar()\n\n #plot the NS direction\n plt.subplot(5, 1, 3)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose(way_dat[sval:lval,eco*len(Lbins):(eco+1)*len(Lbins)]), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 3 )\n #plt.xticks(day_inds, date_list)\n plt.colorbar()\n\n # Plot the longitude\n plt.subplot(5, 1, 4)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins, np.transpose(lon_dat[tinds[sval:lval],eco*len(Lbins):(eco+1)*len(Lbins)]), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 360 )\n plt.colorbar()\n\n # plot the percentile\n plt.subplot(5, 1, 5)\n im1 = plt.pcolormesh(np.arange(sval,lval), Lbins/4, ma.masked_less(np.transpose(per_dat[tinds[sval:lval],eco*len(Lbins):(eco+1)*len(Lbins)]),1), shading='flat',\n cmap=plt.cm.jet, vmin=0, vmax = 75 )\n plt.colorbar()\n plt.xticks(day_inds, date_list)\n\n #plt.savefig( 'neural_data/'+ 'dat' + satlist[0]+varlist[eco]+str(sdate.year) +str(sdate.month).zfill(2) + '.png')\n plt.savefig(neur_dir+ 'allsats_wsarsV5'+varlist[eco]+str(sdate.year) +str(sdate.month).zfill(2) + '.png')\n plt.close(fignum)\n\n fignum = plt.figure(eco + 10)\n # This figure will compare fluxes at some Lvalues for SAR and no SAR\n\n Lco = 1\n for Lp in [2,4,6]:\n plt.title(varlist[eco])\n fig = plt.subplot(3,1,Lco)\n stemp = new_dat[sval:lval, eco * len(Lbins)+Lp ]\n ginds = np.where(stemp>0)[0]\n nstemp = temp_dat[sval:lval, eco * len(Lbins) + Lp]\n ttime = [new_time[x] for x in ginds]\n plt.plot(ttime, nstemp[ginds], 'b') # L=2 SAR\n plt.plot(ttime,stemp[ginds],'r') # L=2 SAR\n #plt.plot(ttime, nstemp[ginds], 'b') # L=2 SAR\n Lco = Lco+1\n plt.ylim(2,6)\n plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))\n plt.gcf().autofmt_xdate()\n plt.savefig(\n neur_dir + '/allsats_wsarsV5' + varlist[eco] + str(sdate.year) + str(sdate.month).zfill(\n 2) + 'lines.png')\n plt.close(fignum)\n\n\n datafile = neur_dir+'/allsats_wsarsV5' + str(sdate.year) + str(sdate.month).zfill(2) + '.p'\n with open(datafile, 'wb') as f: # Python 3: open(..., 'wb')\n pickle.dump([cols, new_time, new_dat], f)\n\n\nif __name__ == \"__main__\":\n import argparse\n\n '''\n PURPOSE: To create a datafile of electron flux mapped to one longitude with SAR to be used\n for developing the SHELLS neural network\n\n INPUTS:\n :param: startdate - time to start processing data (ex 2013-01-01)\n :param: enddate - time to end processing data (ex 2014-01-01)\n :param: sats - i.e. n15 n18 n19 m01 m02'\n :param: vars- variables to process i.e. mep_ele_tel90_flux_e1 mep_ele_tel90_flux_e2\n mep_ele_tel90_flux_e3 mep_ele_tel90_flux_e4\n :param: cdfloc directory where the cdf files are\n :param: Lbinloc directory where the Lbin data files are\n :param: neurloc directory for the output files\n :param: l longitude bin to map to\n\n OUTPUTS: monthly pickle files with the SAR modified data to be used by the SHELLS neural network\n \n USAGE(command line)\n python make_training_data.py -s 2013-01-01 -e 2013-05-01 -sats n15 n18 n19 m01 m02 -cd ./cdfdata\n -ld ./Lbindat -nd ./neural_data -r 20:\n '''\n parser = argparse.ArgumentParser('This creates new datafiles binned by L')\n #\n parser.add_argument('-s', \"--startdate\",\n help=\"The Start Date - format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS \",\n required=True,\n default = None,\n type=valid_date)\n parser.add_argument('-e', \"--enddate\",\n help=\"The Start Date - format YYYY-MM-DD or YYYY-MM-DD HH:MM:SS \",\n required=True,\n default = None,\n type=valid_date)\n parser.add_argument('-sats', \"--satlist\",\n help=\"A list of satellite data to get (i.e. -sat n15 n18) \",\n required=False,\n default = ['n15','n18','n19','m01','m02'],nargs='+')\n parser.add_argument('-v', \"--vars\",\n help=\"data variables to use\",\n required=False, default=['mep_ele_tel90_flux_e1', 'mep_ele_tel90_flux_e2',\n 'mep_ele_tel90_flux_e3', 'mep_ele_tel90_flux_e4'], nargs='+')\n parser.add_argument('-cd', \"--cdfloc\",\n help=\"The location of the cdf data\",\n required=False, default=os.getcwd() + '/cdfdata/')\n parser.add_argument('-ld', \"--Lbinloc\",\n help=\"The location of the Lbin data\",\n required=False, default=os.getcwd() + '/Lbindata/')\n parser.add_argument('-nd', \"--neurloc\",\n help=\"The output directory of data\",\n required=False, default=os.getcwd() + '/neural_data/')\n parser.add_argument('-l', \"--reflon\",\n help=\"longitude to map to\",\n required=False,\n default = 20,\n type=int)\n parser.add_argument('-sy', \"--startyear\",\n help=\"start year for the cdf file\",\n required=False,\n default = 2014,\n type=int)\n parser.add_argument('-ey', \"--endyear\",\n help=\"start year for the cdf file\",\n required=False,\n default = 2018,\n type=int)\n\n args = parser.parse_args()\n\n x = make_training_data_vars(args.startdate,args.enddate,args.satlist, args.vars, args.cdfloc, args.Lbinloc,\n args.neurloc, args.reflon, args.startyear, args.endyear)","sub_path":"src/SHELLS/make_training_data_vars.py","file_name":"make_training_data_vars.py","file_ext":"py","file_size_in_byte":18778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"550488898","text":"#!/usr/bin/env python3\n\n# https://adventofcode.com/2020/day/22\n\nimport os\nimport sys\n\nwith open(os.path.join(sys.path[0], \"input.txt\"), \"r\") as file:\n decks = [line.rstrip(\"\\n\") for line in file.read().strip().split('\\n\\n')]\n p1, p2 = [[int(card) for card in deck.split(\"\\n\")[1:]] for deck in decks]\n p1_copy = p1.copy()\n p2_copy = p2.copy()\n\n\ndef combat(p1_deck, p2_deck):\n while len(p1_deck) > 0 and len(p2_deck) > 0:\n first, second = p1.pop(0), p2.pop(0)\n if first > second:\n p1.extend([first, second])\n else:\n p2.extend([second, first])\n return p1_deck if len(p1_deck) > 0 else p2_deck\n\n\ndef recursive_combat(p1_deck, p2_deck, remaining):\n while len(p1_deck) > 0 and len(p2_deck) > 0:\n if (tuple(p1_deck), tuple(p2_deck)) in remaining:\n return 1, p1_deck\n\n remaining.add((tuple(p1_deck), tuple(p2_deck)))\n\n first, second = p1_deck.pop(0), p2_deck.pop(0)\n if len(p1_deck) >= first and len(p2_deck) >= second:\n winner, _ = recursive_combat(p1_deck[:first], p2_deck[:second], set())\n else:\n winner = 1 if first > second else 0\n\n if winner == 1:\n p1_deck.extend([first, second])\n else:\n p2_deck.extend([second, first])\n return (1, p1_deck) if len(p1_deck) > 0 else (0, p2_deck)\n\n\ndef get_output_1():\n count = 0\n for index, value in enumerate(combat(p1, p2)[::-1]):\n count += (index + 1) * value\n print(count)\n\n\ndef get_output_2():\n count = 0\n for index, value in enumerate(recursive_combat(p1_copy, p2_copy, set())[1][::-1]):\n count += (index + 1) * value\n print(count)\n\n\nget_output_1()\nget_output_2()\n","sub_path":"22_crab-combat/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"586808318","text":"#!/usr/bin/env python\n\n\"\"\" MultiQC module to parse output from Cutadapt \"\"\"\n\nfrom __future__ import print_function\nimport io\nimport logging\nimport os\nimport re\n\nfrom multiqc import config, BaseMultiqcModule\n\n# Initialise the logger\nlog = logging.getLogger(__name__)\n\nclass MultiqcModule(BaseMultiqcModule):\n\n def __init__(self):\n\n # Initialise the parent object\n super(MultiqcModule, self).__init__(name='Cutadapt', anchor='cutadapt',\n href='https://code.google.com/p/cutadapt/', \n info=\"is a tool to find and remove adapter sequences, primers, poly-A\"\\\n \"tails and other types of unwanted sequence from your high-throughput\"\\\n \" sequencing reads.\")\n\n # Find and load any Cutadapt reports\n self.cutadapt_data = dict()\n self.cutadapt_length_counts = dict()\n self.cutadapt_length_exp = dict()\n self.cutadapt_length_obsexp = dict()\n \n for f in self.find_log_files(contents_match='This is cutadapt', filehandles=True):\n self.parse_cutadapt_logs(f) \n\n if len(self.cutadapt_data) == 0:\n log.debug(\"Could not find any reports in {}\".format(config.analysis_dir))\n raise UserWarning\n\n log.info(\"Found {} reports\".format(len(self.cutadapt_data)))\n\n # Write parsed report data to a file\n self.write_csv_file(self.cutadapt_data, 'multiqc_cutadapt.txt')\n\n self.sections = list()\n\n # Basic Stats Table\n # Report table is immutable, so just updating it works\n self.cutadapt_general_stats_table()\n\n # Trimming Length Profiles\n # Only one section, so add to the intro\n self.intro += self.cutadapt_length_trimmed_plot()\n\n\n def parse_cutadapt_logs(self, f):\n \"\"\" Go through log file looking for cutadapt output \"\"\"\n fh = f['f']\n regexes = {\n 'bp_processed': \"Total basepairs processed:\\s*([\\d,]+) bp\",\n 'bp_written': \"Total written \\(filtered\\):\\s*([\\d,]+) bp\",\n 'quality_trimmed': \"Quality-trimmed:\\s*([\\d,]+) bp\",\n 'r_processed': \"Total reads processed:\\s*([\\d,]+)\",\n 'r_with_adapters': \"Reads with adapters:\\s*([\\d,]+)\"\n }\n s_name = None\n for l in fh:\n # New log starting\n if l.startswith('This is cutadapt'):\n s_name = None\n \n # Get sample name from end of command line params\n if l.startswith('Command line parameters'):\n s_name = l.split()[-1]\n s_name = self.clean_s_name(s_name, f['root'])\n if s_name in self.cutadapt_data:\n log.debug(\"Duplicate sample name found! Overwriting: {}\".format(s_name))\n self.cutadapt_data[s_name] = dict()\n self.cutadapt_length_counts[s_name] = dict()\n self.cutadapt_length_exp[s_name] = dict()\n self.cutadapt_length_obsexp[s_name] = dict()\n \n if s_name is not None:\n # Search regexes for overview stats\n for k, r in regexes.items():\n match = re.search(r, l)\n if match:\n self.cutadapt_data[s_name][k] = int(match.group(1).replace(',', ''))\n\n if 'length' in l and 'count' in l and 'expect' in l:\n # Nested loop to read this section while the regex matches\n for l in fh:\n r_seqs = re.search(\"^(\\d+)\\s+(\\d+)\\s+([\\d\\.]+)\", l)\n if r_seqs:\n a_len = int(r_seqs.group(1))\n self.cutadapt_length_counts[s_name][a_len] = int(r_seqs.group(2))\n self.cutadapt_length_exp[s_name][a_len] = float(r_seqs.group(3))\n if float(r_seqs.group(3)) > 0:\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2)) / float(r_seqs.group(3))\n else:\n # Cheating, I know. Infinity is difficult to plot.\n self.cutadapt_length_obsexp[s_name][a_len] = float(r_seqs.group(2))\n else:\n break\n \n # Calculate a few extra numbers of our own\n for s_name in self.cutadapt_data.keys():\n if 'bp_processed' in self.cutadapt_data[s_name] and 'bp_written' in self.cutadapt_data[s_name]:\n self.cutadapt_data[s_name]['percent_trimmed'] = (float(self.cutadapt_data[s_name]['bp_processed'] - self.cutadapt_data[s_name]['bp_written']) / self.cutadapt_data[s_name]['bp_processed']) * 100\n\n\n\n def cutadapt_general_stats_table(self):\n \"\"\" Take the parsed stats from the Cutadapt report and add it to the\n basic stats table at the top of the report \"\"\"\n\n headers = {}\n headers['percent_trimmed'] = {\n 'title': 'Trimmed',\n 'description': '% Total Base Pairs trimmed',\n 'max': 30,\n 'min': 0,\n 'scale': 'RdYlBu-rev',\n 'format': '{:.1f}%'\n }\n self.general_stats_addcols(self.cutadapt_data, headers)\n \n\n def cutadapt_length_trimmed_plot (self):\n \"\"\" Generate the trimming length plot \"\"\"\n html = 'This plot shows the number of reads with certain lengths of adapter trimmed. \\n\\\n Obs/Exp shows the raw counts divided by the number expected due to sequencing errors. A defined peak \\n\\\n may be related to adapter length. See the \\n\\\n cutadapt documentation \\n\\\n for more information on how these numbers are generated.
'\n \n pconfig = {\n 'id': 'cutadapt_plot',\n 'title': 'Lengths Trimmed',\n 'ylab': 'Observed / Expected',\n 'xlab': 'Length Trimmed (bp)',\n 'xDecimals': False,\n 'ymin': 0,\n 'tt_label': '{point.x} bp trimmed: {point.y:.0f}',\n 'data_labels': [{'name': 'Obs/Exp', 'ylab': 'Observed / Expected'},\n {'name': 'Counts', 'ylab': 'Count'}]\n }\n \n html += self.plot_xy_data([self.cutadapt_length_obsexp, self.cutadapt_length_counts], pconfig)\n \n return html\n","sub_path":"multiqc/modules/cutadapt/cutadapt.py","file_name":"cutadapt.py","file_ext":"py","file_size_in_byte":6411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"428974773","text":"import uuid\nimport logging\n\nfrom typing import Callable, List, Optional, Type\nfrom mongobasket.events import Event\n\n\ndef applies(event: Event) -> Callable:\n \"\"\"\n This decorator just adds a new field to the func object\n `_handles` which describes the event type handled by\n the func\n \"\"\"\n\n def wrapper(func: Type) -> Type:\n func._applies = event\n\n return func\n\n return wrapper\n\n\nclass EventRegistry(type):\n \"\"\"\n Extends the `type` metaclass to add an event registry to\n classes.\n\n When initialising a new class, we iterate the members of\n the class looking for a _handles property and add them\n to a dict so we can do event dispatch later.\n \"\"\"\n\n def __new__(mcs, name, bases, namespace, **_): # type: ignore\n result = type.__new__(mcs, name, bases, dict(namespace)) # type: ignore # noqa: E501\n result._handlers = { # type: ignore\n value._applies: value\n for value in namespace.values()\n if hasattr(value, \"_applies\") # noqa: E501\n }\n # Extend handlers with the values from the inheritance chain\n\n for base in bases:\n if base._handlers:\n for handler in base._handlers:\n result._handlers[handler] = base._handlers[handler] # type: ignore # noqa: E501\n\n return result\n\n\nclass Aggregate(metaclass=EventRegistry):\n \"\"\"\n Base class for event sourced aggregates\n \"\"\"\n\n @classmethod\n def get_stream(cls, id: uuid.UUID) -> str:\n return cls.__name__.lower() + \"-\" + str(id)\n\n def __init__(self, events: Optional[List] = None):\n self.events: List = events or []\n self.new_events: List = []\n self.replay()\n\n def replay(self) -> None:\n for e in self.events:\n self.apply(e)\n\n def apply(self, e: Event) -> None:\n handler = self._handlers.get(type(e)) # type: ignore\n\n if handler:\n handler(self, e)\n else:\n logging.warning(f\"no handler found for event {e}\")\n\n def raise_event(self, e: Event) -> None:\n self.events.append(e)\n self.new_events.append(e)\n self.apply(e)\n","sub_path":"mongobasket/aggregate.py","file_name":"aggregate.py","file_ext":"py","file_size_in_byte":2174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"409725088","text":"#file: knowledge.py\n#Copyright (C) 2005,2006,2008 Evil Mr Henry, Phil Bordelon, and FunnyMan3595\n#This file is part of Endgame: Singularity.\n\n#Endgame: Singularity is free software; you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation; either version 2 of the License, or\n#(at your option) any later version.\n\n#Endgame: Singularity is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License\n#along with Endgame: Singularity; if not, write to the Free Software\n#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n#This file is used to display the knowledge lists.\n\nimport pygame\nfrom code import g\nfrom code.graphics import text, button, dialog, widget, constants, listbox, g as gg\n\n\nclass KnowledgeScreen(dialog.Dialog):\n def __init__(self, *args, **kwargs):\n super(KnowledgeScreen, self).__init__(*args, **kwargs)\n\n self.knowledge_type_list = (\"Techs\", \"Items\", \"Concepts\")\n self.cur_knowledge_type = \"\"\n self.cur_knowledge = None\n self.knowledge_inner_list = ()\n self.knowledge_inner_list_key = ()\n self.cur_focus = 0\n\n self.knowledge_choice = \\\n listbox.UpdateListbox(self, (0.05, .18), (.21, .25),\n list=self.knowledge_type_list,\n update_func=self.set_knowledge_type)\n\n self.knowledge_inner = \\\n listbox.UpdateListbox(self, (.30, .18), (.21, .25),\n list=self.knowledge_inner_list,\n update_func=self.set_knowledge)\n\n self.description_pane = \\\n widget.BorderedWidget(self, (0.55, 0), (0.40, 0.7),\n anchor = constants.TOP_LEFT)\n\n self.back_button = button.ExitDialogButton(self, (0.17, 0.46), (-.3, -.1),\n anchor=constants.TOP_LEFT,\n text=\"BACK\", hotkey=\"b\")\n\n #Set up the key handling.\n #This is likely not the best way to do it.\n\n self.remove_key_handler(pygame.K_UP, self.knowledge_choice.got_key)\n self.remove_key_handler(pygame.K_DOWN, self.knowledge_choice.got_key)\n self.remove_key_handler(pygame.K_PAGEUP, self.knowledge_choice.got_key)\n self.remove_key_handler(pygame.K_PAGEDOWN, self.knowledge_choice.got_key)\n\n self.remove_key_handler(pygame.K_UP, self.knowledge_inner.got_key)\n self.remove_key_handler(pygame.K_DOWN, self.knowledge_inner.got_key)\n self.remove_key_handler(pygame.K_PAGEUP, self.knowledge_inner.got_key)\n self.remove_key_handler(pygame.K_PAGEDOWN, self.knowledge_inner.got_key)\n\n self.add_key_handler(pygame.K_UP, self.key_handle)\n self.add_key_handler(pygame.K_DOWN, self.key_handle)\n self.add_key_handler(pygame.K_LEFT, self.key_handle)\n self.add_key_handler(pygame.K_RIGHT, self.key_handle)\n\n #custom key handler.\n def key_handle(self, event):\n if event.type != pygame.KEYDOWN: return\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n self.cur_focus = (self.cur_focus + 1) % 2\n else:\n if self.cur_focus == 0:\n self.knowledge_choice.got_key(event)\n elif self.cur_focus == 1:\n self.knowledge_inner.got_key(event)\n\n #fill the right-hand listbox\n def set_inner_list(self, item_type):\n if item_type == \"Techs\":\n items = [tech for tech in g.techs.values() if tech.available()]\n elif item_type == \"Concepts\":\n items = [ [item[1][0], item[0]] for item in g.help_strings.items()]\n items.sort()\n else:\n items = [item for item in g.items.values()\n if item.available()]\n\n if item_type != \"Concepts\":\n items = [ [item.name, item.id ] for item in items]\n items.sort()\n\n return_list1 = []\n return_list2 = []\n for name, id in items:\n return_list1.append(id)\n return_list2.append(name)\n return return_list1, return_list2\n\n #Make sure the left listbox is correct after moving around.\n def set_knowledge_type(self, list_pos):\n if getattr(self, \"knowledge_choice\", None) is None:\n self.knowledge_inner_list_key, self.knowledge_inner_list = \\\n self.set_inner_list(self.cur_knowledge_type)\n return # Not yet initialized.\n prev_know = self.cur_knowledge_type\n if list_pos == -1:\n prev_know = \"\"\n list_pos = 0\n if 0 <= list_pos < len(self.knowledge_choice.list):\n self.cur_knowledge_type = self.knowledge_choice.list[list_pos]\n if prev_know != self.cur_knowledge_type:\n self.knowledge_inner_list_key, self.knowledge_inner.list = \\\n self.set_inner_list(self.cur_knowledge_type)\n self.knowledge_inner.list_pos = 0\n self.set_knowledge(0)\n\n #Make sure the right-hand listbox is correct.\n def set_knowledge(self, list_pos):\n if getattr(self, \"knowledge_inner\", None) is None:\n return # Not yet initialized.\n prev_know = self.cur_knowledge\n if 0 <= list_pos < len(self.knowledge_inner.list):\n self.cur_knowledge = self.knowledge_inner.list[list_pos]\n if prev_know != self.cur_knowledge:\n self.show_info(self.cur_knowledge_type,\n self.knowledge_inner_list_key[list_pos])\n\n #print information to the right.\n def show_info(self, knowledge_type, knowledge_key):\n desc_text = \"\"\n\n if knowledge_type == \"Concepts\":\n desc_text = g.help_strings[knowledge_key][0] + \"\\n\\n\" + \\\n g.help_strings[knowledge_key][1]\n if knowledge_type == \"Techs\":\n desc_text = g.techs[knowledge_key].name + \"\\n\\n\"\n #Cost\n if not g.techs[knowledge_key].done:\n desc_text += \"Research Cost:\\n\" + \\\n g.to_money(g.techs[knowledge_key].cost_left[0])+\" Money, \"\n desc_text += g.to_cpu(g.techs[knowledge_key].cost_left[1]) + \" CPU\\n\"\n\n if g.techs[knowledge_key].danger == 0:\n desc_text += \"Study anywhere.\"\n elif g.techs[knowledge_key].danger == 1:\n desc_text += \"Study underseas or farther.\"\n elif g.techs[knowledge_key].danger == 2:\n desc_text += \"Study off-planet.\"\n elif g.techs[knowledge_key].danger == 3:\n desc_text += \"Study far away from this planet.\"\n elif g.techs[knowledge_key].danger == 4:\n desc_text += \"Do not study in this dimension.\"\n\n else: desc_text += \"Research complete.\"\n\n desc_text += \"\\n\\n\"+g.techs[knowledge_key].description\n\n if g.techs[knowledge_key].done:\n desc_text += \"\\n\\n\"+g.techs[knowledge_key].result\n\n if knowledge_type == \"Items\":\n desc_text = g.items[knowledge_key].name + \"\\n\\n\"\n #Building cost\n desc_text += \"Building Cost:\\n\"\n desc_text += g.to_money(g.items[knowledge_key].cost[0])+\" Money, \"\n desc_text += g.to_time(g.items[knowledge_key].cost[2]) + \"\\n\"\n\n #Quality\n if g.items[knowledge_key].item_type == \"cpu\":\n desc_text += \"CPU per day: \"\n desc_text += str(g.items[knowledge_key].item_qual)\n elif g.items[knowledge_key].item_type == \"reactor\":\n desc_text += \"Detection chance reduction: \"\n desc_text += g.to_percent(g.items[knowledge_key].item_qual)\n elif g.items[knowledge_key].item_type == \"network\":\n desc_text += \"CPU bonus: \"\n desc_text += g.to_percent(g.items[knowledge_key].item_qual)\n elif g.items[knowledge_key].item_type == \"security\":\n desc_text += \"Detection chance reduction: \"\n desc_text += g.to_percent(g.items[knowledge_key].item_qual)\n\n desc_text += \"\\n\\n\"+g.items[knowledge_key].description\n\n text.Text(self.description_pane, (0, 0), (-1, -1), text=desc_text,\n background_color=gg.colors[\"dark_red\"], text_size=20,\n align=constants.LEFT, valign=constants.TOP,\n borders=constants.ALL)\n\n\n def show(self):\n self.set_knowledge_type(-1)\n self.knowledge_choice.list_pos = 0\n self.knowledge_inner.list_pos = 0\n return super(KnowledgeScreen, self).show()\n\n\n","sub_path":"killallhumans/code/screens/knowledge.py","file_name":"knowledge.py","file_ext":"py","file_size_in_byte":8938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"491637455","text":"\r\n# O(n2)\r\n# def traprain(arr,n):\r\n#\r\n# res = 0\r\n# for i in range(1,n-1):\r\n# left = arr[i]\r\n# for j in range(i):\r\n# left = max(left,arr[j])\r\n#\r\n#\r\n# right = arr[i]\r\n# for j in range(i+1,n):\r\n# right = max(right,arr[j])\r\n#\r\n# res = res + (min(left,right)-arr[i])\r\n#\r\n# return res\r\n#\r\n#\r\n#\r\n# arr = [6,9,9]\r\n# n = len(arr)\r\n# print(traprain(arr,n))\r\n\r\n#O(n)\r\n\r\n# Python program to find maximum amount of water that can\r\n# be trapped within given set of bars.\r\n\r\ndef findWater(arr, n):\r\n left = [0]*n\r\n right = [0]*n\r\n\r\n water = 0\r\n\r\n left[0] = arr[0]\r\n for i in range( 1, n):\r\n left[i] = max(left[i-1], arr[i])\r\n\r\n right[n-1] = arr[n-1]\r\n for i in range(n-2, -1, -1):\r\n right[i] = max(right[i + 1], arr[i]);\r\n\r\n for i in range(0, n):\r\n water += min(left[i], right[i]) - arr[i]\r\n\r\n return water\r\n\r\n\r\n\r\narr = [3,0,0,2,0,4]\r\nn = len(arr)\r\nprint(findWater(arr, n))\r\n\r\n","sub_path":"Array/22 Trapping Rain Water.py","file_name":"22 Trapping Rain Water.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"370120155","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 30 14:42:15 2020\n\n@author: AmP\n\"\"\"\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.patches as pat\n\nfrom Src import calibration\nfrom Src import kin_model\nfrom Src import roboter_repr\nfrom Src import inverse_kinematics\nfrom Src import save as my_save\nfrom Src import load\n\n\ndef rotate(vec, theta):\n c, s = np.cos(theta), np.sin(theta)\n return np.r_[c*vec[0]-s*vec[1], s*vec[0]+c*vec[1]]\n\n\n# rot: x = cos(a)*x - sin(a)*y\n# y = sin(a)*x + cos(a)*y\n\n\ndef load_data(path, sets, raw=False):\n dataBase = []\n# xscale = 145./1000 # 1000px -> 145cm\n# xshift = -22 # cm\n# yshift = -63 # cm\n xscale = 112./1000 # after changing resolution of RPi\n xshift = -12 - 50 # cm\n yshift = -45 - 20 # cm\n eps_0 = 90 # deg value eps meas is shifted to at start idx\n\n for exp in sets:\n data = load.read_csv(path+\"{}.csv\".format(exp))\n if raw:\n dataBase.append(data)\n else:\n try:\n start_idx = data['f0'].index(1) # upper left foot attached 1sttime\n except ValueError: # no left foot is fixed\n start_idx = 0\n\n # correction\n start_time = data['time'][start_idx]\n\n # shift time acis\n data['time'] = \\\n [round(data_time - start_time, 3) for data_time in data['time']]\n for key in data:\n if key[0] in ['x', 'y']:\n shift = xshift if key[0] == 'x' else yshift\n data[key] = [i*xscale + shift for i in data[key]]\n if key == 'eps':\n data['eps'] = [np.mod(e+180, 360)-180+eps_0 for e in data['eps']]\n\n # shift eps to remove jump\n last_eps = eps_0\n corr_times = 1\n correct_direction = 1\n for idx in range(0, len(data['eps'])):\n eps = data['eps'][idx]\n if not np.isnan(eps):\n if abs(eps-last_eps) > 200: # unrealsitic jump in orientation\n if abs(last_eps - (eps - 360*np.sign(eps)*correct_direction)) > 200:\n correct_direction = correct_direction*(-1)\n corr_times += 1\n print('change eps correction direction\\t\\t', corr_times)\n data['eps'][idx] = eps - 360*np.sign(eps)*correct_direction\n last_eps = data['eps'][idx]\n\n# # rotate:\n for idx in range(6):\n x = data['x{}'.format(idx)]\n y = data['y{}'.format(idx)]\n X, Y = [], []\n for vec in zip(x, y):\n xrot, yrot = rotate(vec, np.deg2rad(eps_0))\n X.append(xrot)\n Y.append(yrot)\n data['x{}'.format(idx)] = X\n data['y{}'.format(idx)] = Y\n\n # shift xy coordinates s.t. (x1,y1)(t0) = (0,0)\n start_x1 = (-30, -20)\n if np.isnan(start_x1[0]) or np.isnan(start_x1[1]):\n i = 0\n while np.isnan(start_x1[0]) or np.isnan(start_x1[1]):\n i -= 1\n start_x1 = (data['x1'][start_idx+i], data['y1'][start_idx+i])\n if i < -20:\n start_x1 = (0, 0)\n print('can not find start position ...')\n print('Messung startet bei start_x1: ', start_x1)\n for idx in range(6):\n X = [x - start_x1[0] for x in data['x{}'.format(idx)]]\n Y = [y - start_x1[1] for y in data['y{}'.format(idx)]]\n data['x{}'.format(idx)] = X\n data['y{}'.format(idx)] = Y\n\n dataBase.append(data)\n\n return dataBase\n\n\ndef rotate_feet(fpos, theta):\n # rotate:\n x, y = fpos\n X, Y = [], []\n for vec in zip(x, y):\n xrot, yrot = rotate(vec, np.deg2rad(theta))\n X.append(xrot)\n Y.append(yrot)\n return((X, Y))\n\n\ndef find_poses_idx(db, neighbors=5):\n IDX = []\n failed = 0\n for exp_idx in range(len(db)):\n pose_idx = []\n start_idx = db[exp_idx]['f1'].index(1)\n for idx in range(start_idx, len(db[exp_idx]['pr3'])-1, 1):\n if db[exp_idx]['pr3'][idx] != db[exp_idx]['pr3'][idx+1]:\n if not pose_idx: # empty list\n pose_idx.append(idx)\n else:\n for jdx in range(idx, idx-neighbors, -1): # look the last neigbors\n if not np.isnan(db[exp_idx]['aIMG2'][jdx]):\n # check\n dr = db[exp_idx]['pr2'][idx] - db[exp_idx]['pr2'][jdx]\n if abs(dr) > .1:\n failed += 1\n pose_idx.append(idx) # append ori\n break\n else:\n pose_idx.append(jdx)\n break\n elif jdx == idx-neighbors+1:\n failed += 1\n pose_idx.append(idx) # append ori\n # last#\n idx = len(db[exp_idx]['pr3'])-1\n for jdx in range(idx, idx-100, -1): # look the last neighbors\n if not np.isnan(db[exp_idx]['aIMG2'][jdx]):\n # check\n dr = db[exp_idx]['pr2'][idx] - db[exp_idx]['pr2'][jdx]\n if abs(dr) > .1:\n failed += 1\n pose_idx.append(idx) # append ori\n break\n else:\n pose_idx.append(jdx)\n break\n IDX.append(pose_idx)\n if failed > 0:\n print('failed detections of poses:', failed)\n return IDX\n\n\ndef extract_measurement(measurement, idx):\n alp = [measurement['aIMG{}'.format(j)][idx] for j in range(6)]\n fposx = [measurement['x{}'.format(j)][idx] for j in range(6)]\n fposy = [measurement['y{}'.format(j)][idx] for j in range(6)]\n p = [measurement['pr{}'.format(j)][idx] for j in range(6)]\n fix = [measurement['f{}'.format(j)][idx] for j in range(4)]\n eps = measurement['eps'][idx]\n xref = measurement['x7'][idx]\n yref = measurement['y7'][idx]\n if p[2] == 0: # right elly actuated\n alp = alp[0:2] + [-alp[3]] + alp[-2:]\n else: # left belly\n alp = alp[0:3] + alp[-2:]\n\n return (alp, eps, (fposx, fposy), p, fix, (xref, yref))\n\n\ndef plot_pose(x, marks, fix, col='k'):\n pose = roboter_repr.GeckoBotPose(x, marks, fix)\n pose.plot_markers(col=col)\n pose.plot(col)\n plt.axis('equal')\n\n\ndef calc_mean_stddev(mat):\n mu1 = np.nanmean(mat, axis=1)\n sigma1 = np.nanstd(mat, axis=1)\n return mu1, sigma1\n\n\ndef barplot(mu, modes, labels, colors, sig=None, num='errros'):\n\n width_step = .9\n N = len(modes)\n\n fig, ax = plt.subplots(num=num)\n\n rectdic = {}\n lentries = []\n X = np.arange(len(labels))\n\n for jdx, mode in enumerate(modes):\n w = width_step/N\n x = X + (jdx - (N-1)/2)*w\n col = colors[mode]\n rectdic[mode] = ax.bar(x, mu[mode],\n yerr=sig[mode] if sig else None,\n align='center',\n width=w,\n ecolor='black', color=col,\n capsize=10)\n\n patch = pat.Patch(color=col, label=mode[-5:]) # last 5 chars\n lentries.append(patch)\n\n plt.legend(handles=lentries)\n# ax.set_ylabel('Number of steps')\n# ax.set_xlabel('Set Point')\n ax.set_xticks([i for i in range(len(labels))])\n ax.set_xticklabels(labels)\n\n def autolabel(rectdic):\n \"\"\"Attach a text label above each bar in *rects*,\n displaying its height.\"\"\"\n for mode in rectdic:\n for rect in rectdic[mode]:\n height = round(rect.get_height(), 1)\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n# xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n autolabel(rectdic)\n\n return ax\n","sub_path":"2020_04_ObstacleCourseV40/obstacle_utils.py","file_name":"obstacle_utils.py","file_ext":"py","file_size_in_byte":8272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"626415324","text":"import sys\nsys.path.append('..')\n\nfrom torchvision.datasets import mnist\nfrom torch.utils.data import DataLoader\nimport matplotlib.pyplot as plt\nfrom torchvision import transforms\nfrom datetime import datetime\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torchvision.datasets import CIFAR10\n\n\n# 除了 dense block,DenseNet 中还有一个模块叫过渡层(transition block),因为 DenseNet 会不断地对维度进行拼接, 所以当层数很高的时候,输出的通道数就会越来越大,参数和计算量也会越来越大,为了避免这个问题,需要引入过渡层将输出通道降低下来,同时也将输入的长宽减半,这个过渡层可以使用 1 x 1 的卷积\n\ndef set_learning_rate(optimizer,lr):\n\tfor param_group in optimizer.param_groups:\n\t\tparam_group['lr']=lr\ndef get_acc(output, label):\n total = output.shape[0]\n _, pred_label = output.max(1)#求每行的最大就是最有可能的类别\n num_correct = (pred_label == label).sum().float()\n return num_correct / total\n#def data_tf(x):\n#\tx=np.array(x,dtype='float32')\n#\tx=(x - 0.5) /0.5\n#\tx= x.reshape((-1,))\n#\tx=torch.from_numpy(x)\n#\treturn x\ndata_tf=transforms.Compose(\n[transforms.ToTensor(),\n transforms.Normalize([0.5],[0.5])\n]\n)\ntrain_set = CIFAR10('./data', train=True, transform=data_tf,download=True)\ntest_set = CIFAR10('./data', train=False, transform=data_tf,download=True)\ntest_data =DataLoader(test_set, batch_size=128, shuffle=True)\ntrain_data =DataLoader(train_set, batch_size=64, shuffle=True)\n\n\n\ndef transition(in_channel, out_channel):\n trans_layer = nn.Sequential(\n nn.BatchNorm2d(in_channel),\n nn.ReLU(True),\n nn.Conv2d(in_channel, out_channel, 1),\n nn.AvgPool2d(2, 2)\n )\n return trans_layer\n\n\n\n\n\n\ndef conv_block(in_channel, out_channel):\n layer = nn.Sequential(\n nn.BatchNorm2d(in_channel),\n nn.ReLU(True),\n nn.Conv2d(in_channel, out_channel, 3, padding=1, bias=False)\n )\n return layer\n\nclass dense_block(nn.Module):\n def __init__(self, in_channel, growth_rate, num_layers):\n super(dense_block, self).__init__()\n block = []\n channel = in_channel\n for i in range(num_layers):\n block.append(conv_block(channel, growth_rate))\n channel += growth_rate\n \n self.net = nn.Sequential(*block)\n \n def forward(self, x):\n for layer in self.net:\n out = layer(x)\n x = torch.cat((out, x), dim=1)\n return x\n \n \n \nclass densenet(nn.Module):\n def __init__(self, in_channel, num_classes, growth_rate=32, block_layers=[6, 12, 24, 16]):\n super(densenet, self).__init__()\n self.block1 = nn.Sequential(\n nn.Conv2d(in_channel, 64, 7, 2, 3),\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n nn.MaxPool2d(3, 2, padding=1)\n )\n \n channels = 64\n block = []\n for i, layers in enumerate(block_layers):\n block.append(dense_block(channels, growth_rate, layers))\n channels += layers * growth_rate\n if i != len(block_layers) - 1:\n block.append(transition(channels, channels // 2)) # 通过 transition 层将大小减半,通道数减半\n channels = channels // 2\n \n self.block2 = nn.Sequential(*block)\n self.block2.add_module('bn', nn.BatchNorm2d(channels))\n self.block2.add_module('relu', nn.ReLU(True))\n self.block2.add_module('avg_pool', nn.AvgPool2d(3))\n \n self.classifier = nn.Linear(channels, num_classes)\n \n def forward(self, x):\n x = self.block1(x)\n x = self.block2(x)\n \n x = x.view(x.shape[0], -1)\n x = self.classifier(x)\n return x\nnet=densenet(3,10)\n\ncriterion =nn.CrossEntropyLoss()#定义损失函数\noptimizer =torch.optim.SGD(net.parameters(),1e-1)\n#训练\nprev_time=datetime.now()\ntrain_losses=[]\nvalid_losses=[]\nfor epoch in range(30):\n\tif epoch==20:\n\t\tset_learning_rate(optimizer,0.01)\n\ttrain_loss=0\n\ttrain_acc =0\n\t\n\tnet =net.train()\n\tfor im ,label in train_data:#im,label为一批数据,也就是64个样本\n\t\t#前向传播并计算损失\n\t\t#print(im.size())#im=im.view(im.size(0),-1)torch.Size([64, 1, 28, 28])\n\t\t#im=im.view(im.size(0),-1)\n\t\t#print(im.size())torch.Size([64, 784])\n\t\toutput =net(im)\n\t\t\n\t\tloss =criterion(output ,label)\n\t\t#反向传播\n\t\toptimizer.zero_grad()#梯度归0\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\t\n\t\t#print(loss.data)\n\t\ttrain_loss +=loss.data.float()\n\t\ttrain_acc +=get_acc(output,label)\n\t\t#print(train_acc/len(train_data))\n\t\t#print(train_acc/64)\n\t#测试\n\tcur_time =datetime.now()\n\th,remainder =divmod((cur_time-prev_time).seconds,3600)\n\tm,s=divmod(remainder,60)\n\ttime_str =\"Time %02d:%02d:%02d\"%(h,m,s)\n\tvalid_loss=0\n\tvalid_acc=0\n\tnet =net.eval()\n\tfor im,label in test_data:\n\t\t#im=im.view(im.size(0),-1)\n\t\t\n\t\toutput =net(im)\n\t\t\n\t\tloss= criterion(output,label)\n\t\tvalid_loss +=loss.data.float()\n\t\tvalid_acc +=get_acc(output,label)\n\tepoch_str=(\n\t\t\t\"Epoch %d. Train Loss %f,Train Acc:%f,Valid Loss: %f,Valid Acc: %f ,\"\n\t\t\t%(epoch,train_loss/len(train_data),\n\t\t\t train_acc /len(train_data),\n\t\t\t valid_loss/len(test_data),\n\t\t\t valid_acc /len(test_data)))\n\tprev_time=cur_time\n\ttrain_losses.append(train_loss/len(train_data))\n\tvalid_losses.append(valid_loss/len(test_data))\n\tprint(epoch_str+time_str)#训练一批测试一批,time_str为每次epoch运行的时间00:00:07表示7秒\n\t\nplt.plot(train_losses, label='train')\nplt.plot(valid_losses, label='valid')\nplt.xlabel('epoch')\nplt.legend(loc='best')\nplt.show()\n","sub_path":"CNN/DenseNet+CIFAR10.py","file_name":"DenseNet+CIFAR10.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"169802622","text":"from django.contrib import admin\nfrom .models import NounLabel, AdjLabel\n\n# class NounAdmin(admin.ModelAdmin):\n# list_display = ['id', 'noun', 'count', 'created_at' ]\n# admin.site.register(NounLabel, NounAdmin)\nadmin.site.register(AdjLabel)\n# Register your models here.\n\n# @admin.register(NounLabel)\n# class NounAdmin(admin.ModelAdmin):\n#\n# \tdef delete(self, obj):\n# \t\treturn ''.format(obj.pk)\n#\n# \tdelete.allow_tags = True\n# \tdelete.short_description = 'Delete object'\n#\n# \tlist_display = ['id', 'noun', 'count','created_at','delete']\n# \tlist_display_links = ['id', 'noun']\n\n\nclass ResourceAdmin(admin.ModelAdmin):\n\n\tdef delete(self, obj):\n\t\treturn ''.format(obj.pk)\n\n\tdelete.allow_tags = True\n\tdelete.short_description = 'Delete object'\n\n\tlist_display = ('id', 'noun', 'count', 'delete')\n\nadmin.site.register(NounLabel, ResourceAdmin)","sub_path":"D_AI_Project/D_AI_Service/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"246682729","text":"## implement Convolutional Neural Network using LaNet architecture using keras\n\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dense\n\nclass LeNet:\n @staticmethod\n # weightsPath can be used to load a pre trained model\n def build(width, height, depth, classes, weightsPath = None):\n # initialize the model\n model = Sequential()\n # create first set of CONV => RELU => POOL\n model.add(Convolution2D(20, 5, 5, border_mode = \"same\", input_shape = (depth, height, width)))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))\n\n # second set of CONV => RELU => POOL\n model.add(Convolution2D(50, 5, 5, border_mode = \"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2)))\n\n # fully connected layers often called dense layers of lenet architecture\n # set FC => RELU layers\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation(\"relu\"))\n\n # softmax classifier\n model.add(Dense(classes)) # number of class labels i.e. in this case we have 10 classes\n model.add(Activation(\"softmax\")) # multinomial logistic regression that returns a list of probabilities\n\n # if a weights path is supplied (indicating that the model was pretrained), then load the weights\n if weightsPath is not None:\n model.load_weights(weightsPath)\n\n # return the constructed network architecture\n return model\n","sub_path":"lenet.py","file_name":"lenet.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"27453281","text":"\r\nimport csv\r\nKey=[]\r\n \r\nwith open('T.csv') as File:\r\n reader = csv.reader(File, delimiter=',', quotechar=',',\r\n quoting=csv.QUOTE_MINIMAL)\r\n for row in reader:\r\n for i in range(0,len(row)):\r\n row[i]=(row[i])\r\n Key.append(row)\r\n\r\nprint(len(Key))\r\nprint(Key[0][0])\r\n","sub_path":"Create CSVs/readKeyMatrix.py","file_name":"readKeyMatrix.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"26924811","text":"import requests\nimport base64\nimport json\nimport os, time, datetime\nimport configparser\nfrom framework.GetDBdata import GetDBdata\nfrom framework.Getimage import *\nfrom framework.InsertDB import InsertDB\nfrom framework.Query_DB import Query_DB\nfrom framework.ExportExcle import *\nfrom framework.logger import Logger\nfrom multiprocessing import Process, Lock\nfrom multiprocessing import Pool,Lock,Manager\nimport multiprocessing\n\nlogger = Logger(logger=\"TestValue\").getlog()\nproDir = os.getcwd()\nconfigPath = os.path.join(proDir, \"config\\config.ini\")\ncf = configparser.ConfigParser()\ncf.read(configPath, encoding=\"utf-8-sig\")\n\ndef API(imagefile_path): # 服务器最新算法\n with open(imagefile_path, \"rb\") as f:\n # b64encode是编码,b64decode是解码\n base64_data = base64.b64encode(f.read())\n str_base64 = str(base64_data, 'utf-8')\n try:\n url = \"https://kk.huoyanhou.com:8445/image_analysis/basic/hpTest.html\"\n payload = {\n \"file\": str_base64, \"fileName\": imagefile_path.split('\\\\')[-1]\n }\n headers = {\n 'Content-Type': \"application/json\",\n }\n payload = json.dumps(payload) # 将字典类型转换为 JSON 对象,序列化\n r = requests.post(url, data=payload, headers=headers)\n r.raise_for_status() # 如果响应状态码不是 200,就主动抛出异常\n top = r.text.split(',')\n dics = {\"code\": 2000, \"message\": \"识别成功!\", \"topdata\": {\"top1\": int(top[0]), \"top2\": int(top[1]), \"top3\": int(top[2])}}\n return json.dumps(dics)\n except Exception as e:\n dics = {\"code\": 4000, \"message\": \"服务可能未开启,\" + str(e), \"topdata\": {'top1': -99, 'top2': -99, 'top3': -99}}\n return json.dumps(dics)\n\n\ndef API2(imagefile_path):#本地老算法\n with open(imagefile_path, \"rb\") as f:\n # b64encode是编码,b64decode是解码\n base64_data = base64.b64encode(f.read())\n str_base64 = str(base64_data, 'utf-8')\n try:\n url = \"http://192.168.1.182:8888/Disc\"\n # querystring = {\"image_base64\":str_base64,\"image_name\":imagefile_path.split('\\\\')[-1]}#imagefile_path.split('\\\\')[-1]#, params=querystring\n payload = {\"image_base64\": str_base64,\n \"image_name\": imagefile_path.split('\\\\')[-1]} # imagefile_path.split('\\\\')[-1]\n response = requests.request(\"post\", url, data=(payload))\n return (response.text)\n except Exception as e:\n return (\"服务可能未开启,\" + str(e))\ndef API3(imagefile_path):#读数据库数据\n r'E:\\小雁塔\\8.6日拍摄测试样本照片(批处理)\\0\\IMG_20190807_110901.jpg'\n dic_rc = {\n \"Code\": int(imagefile_path.split('\\\\')[-2]),\n 'Test_Chart': imagefile_path.split('\\\\')[-1]\n }\n\n data=json.dumps(GetDBdata().get_db_data(dic_rc))\n return data\n # with open(imagefile_path, \"rb\") as f:\n # # b64encode是编码,b64decode是解码\n # base64_data = base64.b64encode(f.read())\n # str_base64 = str(base64_data, 'utf-8')\n # try:\n # url = \"http://192.168.1.182:8888/Disc\"\n # # querystring = {\"image_base64\":str_base64,\"image_name\":imagefile_path.split('\\\\')[-1]}#imagefile_path.split('\\\\')[-1]#, params=querystring\n # payload = {\"image_base64\": str_base64,\n # \"image_name\": imagefile_path.split('\\\\')[-1]} # imagefile_path.split('\\\\')[-1]\n # response = requests.request(\"post\", url, data=(payload))\n # return (response.text)\n # except Exception as e:\n # return (\"服务可能未开启,\" + str(e))\n\ndef Summary(imagefile_path, i,Test_Batch,Test_Version):\n Time_Stamp = int(time.time())\n now =time.strftime(\"%Y/%m/%d %H:%M:%S\", time.localtime(Time_Stamp))\n code = int(imagefile_path.split('\\\\')[-2])\n TestChart = imagefile_path.split('\\\\')[-1]\n\n try:\n T1 = datetime.datetime.now()\n topdata = json.loads(API2(imagefile_path))['topdata']\n\n T2 = datetime.datetime.now()\n T = round((T2 - T1).total_seconds(), 3) # 检索耗时\n\n except Exception as e:\n logger.error('报错:%s' % str(e))\n topdata = {'top1': -88, 'top2': -88, 'top3': -88}\n T = 0 # 检索耗时\n\n TestValue1, TestValue2, TestValue3 = topdata['top1'], topdata['top2'], topdata['top3'],\n if TestValue1 == code:\n Result = [\"PASS\", 'c6efce_006100']\n elif TestValue1 in [-88, -99]:\n Result = [\"ERROR\", 'ffeb9c_9c6500']\n Failimgae(imagefile_path, code)\n else:\n Result = [\"FAIL\", 'ffc7ce_9c0006']\n Failimgae(imagefile_path, code)\n dic = {\n 'Test_ID': i + 1,\n \"Test_Batch\":Test_Batch,\n 'Test_Version':Test_Version,\n 'Test_Time': now,\n 'Time_Stamp':Time_Stamp,\n 'Cultural_Name': cf.get(\"Data\", str(code)),\n 'Test_Chart': TestChart,\n 'Code':int( code),\n \"Expected_Value\": int( code),\n 'TimeConsuming': T,\n 'top1': TestValue1,\n 'top2': TestValue2,\n 'top3': TestValue3,\n 'Result': Result[0],\n 'Color':Result[1],\n 'Image_Path': imagefile_path.replace('\\\\', '/')#dic[\"TestChartPath\"].replace('\\\\', '/')\n\n }\n\n return dic\n\n\ndef TestValue2(rootdir, proce,Test_Batch,Test_Version,Batchinfo):#支持多进程\n Time_Stamp = int(time.time())\n now =time.strftime(\"%Y/%m/%d %H:%M:%S\", time.localtime(Time_Stamp))\n\n manager = Manager()\n lock = manager.Lock() # 产生钥匙\n datalist=Pathlsit(rootdir)\n listPath = datalist[0]\n Total = Batchinfo['total_num']\n sql = \"select count(*) from %s WHERE test_version='%s' AND test_batch='%s' ;\" % ('test_record_sheet',Test_Version,Test_Batch)\n A = Query_DB().getnum(sql)#查询测试进度\n\n start_dic={\"RunTime\":now,\"RunTime_int\":Time_Stamp,\"Test_Batch\":Test_Batch,\"Test_Version\":Test_Version,\"Total_Type\":Batchinfo['types_num'],\"Sum_Numbers\": Total,\"Completed\":A}\n # logger.info(start_dic)\n InsertDB().insert_Start_recording( 'start_recording', start_dic)#写入启动测试记录\n\n\n pool = multiprocessing.Pool(processes=proce)\n for i in range(A , Total):\n pool.apply_async(func=process, args=(listPath[i],Total,i,lock,Test_Batch,Test_Version,Batchinfo))\n pool.close()\n pool.join() # 在join之前一定要调用close,否则报错\n\ndef process(imagefile_path,Total,i,lock,Test_Batch,Test_Version,Batchinfo):\n\n dic = Summary(imagefile_path, i,Test_Batch,Test_Version)\n\n lock.acquire() ##拿到钥匙进门,其他进程阻塞, acqurie和release之间的代码只能被一个进程执行\n #SummaryExcle(addr, dic, title, 10)\n InsertDB().insert_data('test_record_sheet', dic)#插入数据库测试记录数据\n lock.release() # 释放钥匙\n #logger.info(dic)\n logger.info('测试进度:%s/%s;测试图:%s;编号:%s;耗时:%s;top3:%s、%s、%s;测试结果:%s。' % (\n i + 1, Total, dic['Test_Chart'], dic['Code'], dic['TimeConsuming'], dic['top1'], dic['top2'],\n dic['top3'], dic['Result']))\n\n\n\n","sub_path":"framework/TestValue.py","file_name":"TestValue.py","file_ext":"py","file_size_in_byte":7168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"114324879","text":"#Addition\r\ndef sum(a,b):\r\n return a+b\r\n#Subtraction\r\ndef sub(a,b):\r\n return a-b\r\n#Multiplication\r\ndef mul(a,b):\r\n return a*b\r\n#Division\r\ndef div(a,b):\r\n return a/b\r\n#distance\r\ndef dis(time,s):\r\n return time*s\r\n\r\n#speed\r\ndef speed(time,d):\r\n return d/time\r\n#Simple interest\r\ndef simple_interest(p,t,r):\r\n si=(p*t*r)/100\r\n return si\r\n#Compound intrest\r\ndef compound_interest(p,r,t):\r\n ci=p*(pow((1+r/100),t))\r\n return ci\r\nprint(\"select operation\")\r\nprint(\"1.Addition\")\r\nprint(\"2.Subtraction\")\r\nprint(\"3.Multiply\")\r\nprint(\"4.Divide\")\r\nprint(\"5.Distance\")\r\nprint(\"6.Speed\")\r\nprint(\"7.Simple Intrest \")\r\nprint(\"8.Compound intrest\")\r\nwhile True:\r\n choice=input(\"Enter choice(1/2/3/4/5/6/7/8/):\")\r\n if choice in(\"1\",\"2\",\"3\",\"4\"):\r\n a=float(input(\"a = \"))\r\n b=float(input(\"b = \"))\r\n if choice == \"1\":\r\n print(a,\"+\",b,\"=\",sum(a,b))\r\n elif choice == \"2\":\r\n print(a,\"-\",b,\"=\",sub(a,b))\r\n elif choice == \"3\":\r\n print(a,\"*\",b,\"=\",mul(a,b))\r\n elif choice == \"4\":\r\n print(a,\"/\",b,\"=\",div(a,b))\r\n elif choice in(\"5\"):\r\n time=float(input(\"Enter time(hr) :\"))\r\n s=float(input(\"Enter speed(km/hr) :\"))\r\n print(\"Distance is :\",dis(time,s),\"km\")\r\n elif choice in(\"6\"):\r\n time=float(input(\"Enter time(hr) :\"))\r\n d=float(input(\"Enter Distance(km) :\"))\r\n print(\"Speed is :\",speed(time,d),\"km/hr\")\r\n elif choice in(\"7\"):\r\n p=float(input(\"Enter Principal :\"))\r\n t=float(input(\"Enter Time :\"))\r\n r=float(input(\"Enter Rate :\"))\r\n print(\"Simple Intrest\",simple_interest(p,t,r))\r\n elif choice in(\"8\"):\r\n p = float(input(\"Enter Principal :\"))\r\n t = float(input(\"Enter Time :\"))\r\n r = float(input(\"Enter Rate :\"))\r\n print(\"Compound Intrest\",compound_interest(p,r,t))\r\n else :\r\n print(\"Invalid\")\r\n\r\n\r\n\r\n\r\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"325187759","text":"# -*- coding: utf-8 -*-\n\nfrom core import httptools, scrapertools, jsontools\nfrom platformcode import config, logger\nimport os\n\n\ndef get_video_url(page_url, url_referer=''):\n logger.info(\"(page_url='%s')\" % page_url)\n video_urls = []\n\n vid = scrapertools.find_single_match(page_url, 'id=([A-z0-9]+)')\n if not vid: return video_urls\n\n data = httptools.downloadpage('https://www.zembed.to/vl/' + vid).data\n # ~ logger.debug(data)\n \n try:\n cache_path = os.path.join(config.get_data_path(), 'cache')\n if not os.path.exists(cache_path): os.makedirs(cache_path)\n\n data_json = jsontools.load(data)\n\n for q in [\"360p\", \"480p\", \"720p\", \"1080p\", \"2048p\"]:\n if q not in data_json: continue\n txt = generar_m3u8(data_json[q])\n\n file_local = os.path.join(cache_path, 'temp-%s.m3u8' % q)\n with open(file_local, 'wb') as f: f.write(txt); f.close()\n\n video_urls.append(['m3u8 '+q, file_local])\n except:\n pass\n\n return video_urls\n\ndef generar_m3u8(e):\n txt = \"#EXTM3U\\n\"\n txt += \"#EXT-X-VERSION:5\\n\"\n txt += \"#EXT-X-TARGETDURATION:%s\\n\" % e['td']\n txt += \"#EXT-X-MEDIA-SEQUENCE:0\\n\"\n \n for l in range(len(e['data'][0])):\n\n txt += \"#EXTINF:%s\\n\" % e['data'][0][l]\n txt += \"#EXT-X-BYTERANGE:%s\\n\" % e['data'][1][l]\n \n r = e['data'][1][l].split(\"@\")\n txt += \"https://www.zembed.to/drive/hls/\" + e['md5'] + \"/\" + e['md5'] + str(l) + \".html?ch=\" + e['md5'] + \"-chunk-\" + e['data'][2][l] + \".txt&s=\" + r[1] + \"&l=\" + r[0] + \"\\n\"\n\n txt += \"#EXT-X-ENDLIST\\n\"\n return txt\n","sub_path":"servers/zembed.py","file_name":"zembed.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"305695118","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport requests\nfrom lxml.etree import HTML\nimport csv\n\nheaders = {\n 'accept': \"*/*\",\n 'accept-encoding': \"gzip, deflate, br\",\n 'accept-language': \"zh-CN,zh;q=0.9\",\n 'cache-control': \"no-cache,no-cache\",\n # 'cookie': \"uu=BCYpTopNXQt0xfexDllZRLuhYJVKmKWMy4hSUyxCURBKFbkBtlxSGiFiigJ56kiY8vJ0i0UWJx8O%0D%0AiYHFXe-_iPgC8chTTH6FYmDTa_uik_h0210YVo--2gMM_XjiZLhWDrc-eoqU-EGf8BajV1d2UQn3%0D%0Avg%0D%0A; session-id=138-0186516-5877957; adblk=adblk_no; ubid-main=132-8923120-5804221; session-token=sUDTl5lOoYTINfzGNUvgNxXFF1hyBb4+UDo6xQFL/V5XVEsskPuXeh90DfEaITRbaveHBYZ0PalLDXF9wYnsO09BYi0CYqrqhVY28k508mYu/jEerCFLixSN2egEkcDWlv2i22BKYm6h9mkdRZGPhc6H7EBrpJhkJvPUE5V1nBvqzHVhitj3xTXwVR3+/Z95; session-id-time=2082787201l; csm-hit=tb:HE39MYYJJG0J119F5PT3+s-VN3M5F654DGPP86W3GHT|1558434040958&t:1558434040958&adb:adblk_no\",\n 'pragma': \"no-cache\",\n 'referer': \"https://www.imdb.com/title/tt4154796/reviews?ref_=tt_ql_3\",\n 'user-agent': \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36\",\n 'x-requested-with': \"XMLHttpRequest\",\n 'Postman-Token': \"6e1e722d-5ca3-4f9b-a185-b8eb2f83d0a6\"\n }\n\ndef start():\n allItem_list = [{'key':'tt0848228','value':73,'name':'avengers1'},{'key':'tt2395427','value':49,'name':'avengers2'},{'key':'tt4154756','value':161,'name':'avengers3'},{'key':'tt4154796','value':279,'name':'avengers4'}]\n for each in allItem_list:\n for i in range(each['value']):\n if i == 0:\n url = 'https://www.imdb.com/title/{key}/reviews/_ajax'.format(key=each['key'])\n else:\n url = 'https://www.imdb.com/title/{key}/reviews/_ajax?ref_=undefined&paginationKey={pageToken}'.format(pageToken=pageToken,key=each['key'])\n\n response = requests.get(url,headers=headers)\n # print(response.text)\n html = HTML(response.text)\n\n pageToken = html.xpath('string(//div[@class=\"load-more-data\"]/@data-key)')\n\n div_list = html.xpath('//div[@class=\"lister-list\"]/div')\n for div in div_list:\n rating = div.xpath('string(.//span[@class=\"rating-other-user-rating\"]/span[1])')\n commentList = div.xpath('.//div[@class=\"text show-more__control\"]//text()')\n # print(commentList)\n comment = ''.join(commentList)\n print(rating, comment)\n\n saveRes = rating+'|'+comment.replace('\\n','')+'\\n'\n\n with open(each['name']+'.csv','a',encoding='utf8') as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow([rating, comment.replace('\\n','')])\n\nif __name__ == '__main__':\n start()","sub_path":"other/imdb/imdb.py","file_name":"imdb.py","file_ext":"py","file_size_in_byte":2767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"632808679","text":"import time\nimport socket\nimport sys\nimport random\nimport threading\nimport socket\n\nexitFlag = 0\nclass ClientThread (threading.Thread):\n# Listens on global clientSocket for messages from server\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n \n def run(self):\n global stayOnServer\n global inbox\n global ctrl_inbox\n global streamInbox\n\n # Listening loop waiting for messages from the Server \n while stayOnServer:\n prefix = ''\n data = ''\n try:\n clientData = clientSock.recvfrom(1026)[0].decode('utf-8')\n \n # Acquire thread lock to assure this runs with main thread\n threadLock.acquire()\n\n # Check for message break and seperate tokens\n if msg_break in clientData:\n prefix, data = clientData.split(msg_break)\n else:\n data = clientData\n \n # Check message for control responses\n if \"#./USER\" in prefix:\n ctrl_inbox += [clientData]\n # print(\"[INFO] [DEBUG] CTRL Inbox: \" + str(ctrl_inbox))\n elif \"#./EXIT\" in prefix:\n stayOnServer = False\n elif \"#./ERROR_INVALID_USER\" in prefix:\n ctrl_inbox += [clientData]\n print(\"Recent message '%s' failed to send. Destination user was not found in active userlist.\"\n % data)\n else: \n inbox += [data]\n time.sleep(2)\n # print(\"[INFO] [DEBUG] Inbox: \" + str(inbox))\n if streamInbox:\n print(\"> \" + str(data)+ '\\n') \n \n threadLock.release()\n except Exception as e:\n print(\"Error receiving messages from server %s: %s\" % (str(server) , e))\n\n\nmsg_break = \" /$MESSAGE_BREAK: \"\nstayOnServer = True\nstreamInbox= False\nresp_wait= 1\n\nname = ''\ninbox=[]\nctrl_inbox = []\nuser_groups = {}\n\n\ndef init_client():\n# Get user data for establishing connection and request username and userlist from server \n global streamInbox\n\n # Init and bind client socket\n clientHost = '127.0.0.1'\n clientPort = 9998\n clientSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n \n print(\"Enter the port that the messager client should use. Enter nothing to use default port %d\" % clientPort)\n try: \n t = raw_input().strip()\n except Exception as e:\n t = input()\n if not t == '':\n clientPort= int(t)\n \n try:\n clientSock.bind((clientHost, clientPort)) \n except Exception as e: \n clientPort = 9998\n clientSock.bind((clientHost, clientPort)) \n print(\"Error with chosen port. Using default port %d\" % clientPort)\n\n # Get Server IP and port\n host = '127.0.0.1' \n port = 9997\n server = (host,port)\n print(\"Enter the port for the chat server.\")\n try: \n t = raw_input().strip()\n except Exception as e:\n t = input()\n if not t == '':\n port= int(t)\n\n # Get username\n name = 'Client'\n print(\"Enter your username for the chat server.\")\n try: \n t = raw_input().strip()\n except Exception as e:\n t = input()\n if not t == '':\n name= t\n\n\n # Ask user if they want messages to show on screen or keep default and check with command\n resp = ''\n print(\"\\nMessages are accessed by the ./inbox control command in the interface by default.\\n\"\n + \"Would you like for messages to be shown as they are received instead? (y/n)\")\n try: \n resp = raw_input().strip()\n except Exception as e:\n resp = input()\n \n if resp == 'n' or resp == 'no' or resp == 'N' or resp == '':\n streamInbox = False\n else:\n streamInbox = True\n \n # Initialize Client as active with server\n print(\"\\nConnecting to UDP Chat Server at %s:%d ...\" % ( host, port) )\n time.sleep(1)\n print(\"Requesting username %s ... \" % name)\n time.sleep(1)\n pkt = \"#./INIT\" + msg_break + name\n clientSock.sendto(pkt.encode(\"ascii\"), server)\n\n # Wait for userId and user list, then parse\n try: \n msg = clientSock.recvfrom(1026)[0]\n msg = msg.decode('utf-8')\n name, userlist = msg.split(msg_break)\n print(\"Connected to Chat Server. Your screen name is %s\\n\" % name)\n print('%s\\n' % userlist)\n except Exception as e: \n print(\"Error initiating chat client with server.\" + str(e))\n raise e\n return clientSock, server\n\ndef reqUserList():\n# Send request to server for most recent active userlist update\n global inbox\n userlist=''\n check, check_inbox_limit= 0, 7\n \n # Generate random control key to append to message to verify update is new \n update_key = random.randint(1000, 9999)\n req = \"#./USER\" + msg_break + str(update_key)\n prefix =\"#./USER\" + str(update_key)\n \n # send request\n try:\n clientSock.sendto(req.encode(\"ascii\"), server)\n print(\"\\nRequesting updated user list ...\")\n except Exception as e:\n print(\"Error requesting user list: %s\" % e)\n\n # Give client listener thread time to add response to ctrl_inbox, then check for response\n while check <= check_inbox_limit:\n time.sleep(resp_wait)\n for msg in ctrl_inbox:\n msg_prefix, userdata = msg.split(msg_break)\n if msg_prefix == prefix:\n return userdata\n break\n if check == check_inbox_limit and userlist == '':\n print(\"[ERROR] Updated Userlist not found in inbox after waiting %d seconds for %d iterations : %s\" % (resp_wait, check_inbox_limit, userlist))\n check +=1 \n return 'Request for userlist has timed out. Userlist unavailable'\n\ndef main_loop():\n# Start thread to listen for server messages while also waiting for user input\n global stayOnServer\n global inbox\n global name\n\n initial_setup = True\n\n # Start thread to listen for server responses\n listening_thread = ClientThread(threadID=1, name=\"Listening Thread\", counter=1)\n listening_thread.start()\n \n # Start user interface loop\n while stayOnServer:\n msg = '' \n if initial_setup:\n # Check if this is first loop iteration, if so get initial destination to initialize destination user before continuing with main loop\n notValid = True\n while notValid: \n dest = ''\n # Allow user to enter a single name, or multiple names delimited by commas to create a group message and ask for an identifier for group\n print(\"Enter the name of the user or group you want to message.\"\n + \"To create a group message, enter the names of the users you would like to message with each seperated by commas.\")\n print(\"Enter 'none' or '0' to pick your destination user later.\")\n try: \n dest = raw_input().strip()\n except Exception as e:\n dest = input().strip()\n \n # Assume user input is valid and proceed with checks\n notValid = False\n if dest == './user':\n # Check if accidentally entered control message\n print('Invalid username, please select a user from the active users list.')\n notValid = True\n elif ',' in dest:\n # Check if group message\n users = dest.split(',')\n userstring = ''\n for name in users:\n if name == ' ' or name == '':\n del name\n else:\n userstring += name.strip()\n if not name == users[len(users)-1]:\n userstring += ', '\n # print(\"Enter a name for your new message group of users: %s\" % userstring)\n # try: \n # groupName = raw_input().strip()\n # except Exception as e:\n # groupName = input().strip\n \n # Set group name and add to destination\n # user_groups[groupName] = userstring\n dest = userstring\n \n elif dest =='none' or dest =='no one' or dest =='0' or dest == '' or dest == './exit':\n print(\"\\nNo user selected. Enter ./user to select a user to message.\")\n dest = \"no one\"\n print(\"Currently messaging %s\" % dest)\n initial_setup= False\n elif not initial_setup:\n # if this is not the first loop iteration, dest is already initiated, so continue interface loop\n print(\"\\nEnter your message for \" + dest \n + \". Enter './user' to change destination user and './inbox' to view your message inbox.\"\n + \"\\nEnter ./exit to leave chat server.\")\n \n # Included try/catch in all user inputs for conflicting python versions\n try: \n msg = raw_input().strip()\n except Exception as e:\n msg = input()\n \n if msg == '':\n # Bring up command menu again if message is empty\n pass \n elif msg == './user':\n # If user is requesting user list then send request and get new user destination input \n \n # Request userlist from server and print it\n userlist = reqUserList()\n print(\"# %s #\\n\"%userlist)\n\n # Get user destination input and set dest\n notValid = True\n while notValid: \n dest = ''\n print(\"Enter the name of the user or group you want to message.\"\n + \"To create a group message, enter the names of the users you would like to message with each seperated by commas.\")\n print(\"Enter 'none' or '0' to pick your destination user later.\")\n\n try: \n dest = raw_input().strip()\n except Exception as e:\n dest = input()\n notValid = False\n \n if dest == './user' or dest == '':\n print('Invalid username, please select a user from the active users list.')\n notValid = True\n elif ',' in dest:\n # Check if group message\n users = dest.split(',')\n userstring = ''\n for name in users:\n if name == ' ' or name == '':\n del name\n else:\n userstring += name.strip()\n if not name == users[len(users)-1]:\n userstring += ', '\n # print(\"Enter a name for your new message group of users: %s\" % userstring)\n # try: \n # groupName = raw_input().strip()\n # except Exception as e:\n # groupName = input().strip\n \n # Set group name and add to destination\n # user_groups[groupName] = userstring\n dest = userstring\n elif dest =='none':\n print(\"No user selected. Enter ./user to select a user to message.\")\n dest = \"no one\"\n print(\"Currently messaging %s\" % dest)\n \n elif msg == './inbox': \n # display all user messages received during this session \n print(\"\\nChat Message Inbox:\")\n if len(inbox) == 0: print(\"No new messages received.\")\n else:\n for m in inbox:\n print(\"> %s\\n\" % m)\n \n elif msg == './exit': \n # Send exit message to server and end main loop\n stayOnServer = False\n pkt = \"#./EXIT\" + msg_break + \" #./CONFIRM\" \n print(\"\\nDisconnecting from server ...\\n\")\n clientSock.sendto(pkt.encode('ascii'), server)\n\n else: \n # If no control messages, then send message for preset destination user to the server\n send = dest\n if send in user_groups.keys():\n send = user_groups[send]\n if send == name:\n print(\"Error, you entered your name as the receiving user\")\n pass\n elif send == 'no one':\n print(\"Error - You have yet to select a user to message. Enter ./user to select a receiving user from the user list.\")\n else:\n # Confirm send\n try: \n confirm = raw_input(\"Send message: '%s' to %s? (y/n) \" % (msg, send) ).strip()\n except Exception as e:\n confirm = input(\"Send message: '%s' to %s? (y/n) \" % (msg, send) )\n if confirm == 'n' or confirm == 'no' or confirm == 'N':\n pass\n else:\n # Send user's message with destination user as message prefix\n pkt = send + msg_break + msg\n try:\n clientSock.sendto(pkt.encode('ascii'), server)\n except Exception as e: \n print(\"Error sending client msg '%s' to server: %s\" % (msg, e) )\n print(\"Message sent.\")\n print(\"GoodBye! Press Enter to Close.\")\n\n try: \n raw_input().strip()\n except Exception as e:\n input()\n\n# Create thread lock to sync threads\nthreadLock = threading.Lock()\n\n# initiate client socket and get server data input from user \nclientSock, server = init_client()\n\n# Initiate main messaging interface loop\nmain_loop()\n","sub_path":"src/Client-UDP.py","file_name":"Client-UDP.py","file_ext":"py","file_size_in_byte":14173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"134580004","text":"# Initialize Project.\nfrom flask import Flask, request, jsonify\nfrom PIL import Image\n\n\n# app = Flask(__name__)\n\n\ndef create_app(classifier):\n app = Flask(__name__)\n\n @app.route(\"/\", methods=[\"POST\"])\n def predict():\n # Get the received-file-handler.\n img_file = request.files[\"img\"]\n\n # Check the file is empty\n if img_file.filename == \"\":\n return \"Bad Request\", 400\n\n # Read Image-File by using\n # PIL.\n img = Image.open(img_file)\n\n # Predict is Taco or Burrito\n # by using Classification-Model.\n result = classifier.predict(img)\n\n # Return Result as a 'JSON'-form.\n return jsonify({\n \"result\": result\n })\n return app\n\n\nif __name__ == \"__main__\":\n app = Flask(__name__)\n create_app(app).run(debug=True)\n\n","sub_path":"07/FlaskAPI/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"222447541","text":"class Solution:\n def majorityElement(self, nums):\n n1 = n2 = None\n c1 = c2 = 0\n for num in nums:\n if n1 == num:\n c1 += 1\n elif n2 == num:\n c2 += 1\n elif c1 > c2:\n n2, c2 = (n2, c2 - 1) if c2 > 1 else (num, 1)\n else:\n n1, c1 = (n1, c1 - 1) if c1 > 1 else (num, 1)\n ans, size = [], len(nums)\n if n1 is not None and sum([x == n1 for x in nums]) > size // 3:\n ans.append(n1)\n if n2 is not None and sum([x == n2 for x in nums]) > size // 3:\n ans.append(n2)\n return sorted(ans)\n","sub_path":"229/229.majority-element-ii.151752962.Wrong-Answer.leetcode.py","file_name":"229.majority-element-ii.151752962.Wrong-Answer.leetcode.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"3511897","text":"\n\n#calss header\nclass _CREASE():\n\tdef __init__(self,): \n\t\tself.name = \"CREASE\"\n\t\tself.definitions = [u'If cloth, paper, etc. creases, or if you crease it, it gets a line in it where it has been folded or crushed: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_crease.py","file_name":"_crease.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"30478953","text":"#!/usr/bin/env python3\n\n###############################################################################\n#\n# dRep - main program entry point\n#\n###############################################################################\n\n'''\nController- takes input from argparse and calls correct modules\n'''\n\n\n__author__ = \"Matt Olm\"\n__license__ = \"MIT\"\n__email__ = \"mattolm@gmail.com\"\n__status__ = \"Development\"\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport drep\nfrom drep.WorkDirectory import WorkDirectory\nimport drep.d_cluster\nimport drep.d_analyze\nimport drep.d_filter\nimport drep.d_choose\nimport drep.d_adjust\nimport drep.d_bonus\nimport drep.d_evaluate\nimport drep.d_workflows\n\ndef version():\n versionFile = open(os.path.join(drep.__path__[0], 'VERSION'))\n return versionFile.read().strip()\n\nVERSION = version()\n\nclass Controller():\n def __init__(self):\n self.logger = logging.getLogger()\n\n def filter_operation(self, **kwargs):\n logging.debug(\"Starting the filter operation\")\n drep.d_filter.d_filter_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the filter operation !!!\")\n\n def cluster_operation(self, **kwargs):\n if (kwargs['P_ani'] > 1) or (kwargs['S_ani'] > 1):\n logging.error(\"Can't assign a MASH or ANIn value over 1\")\n sys.exit()\n\n logging.debug(\"Starting the clustering operation\")\n drep.d_cluster.d_cluster_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the clustering operation !!!\")\n\n def analyze_operation(self, **kwargs):\n logging.debug(\"Starting the analyze operation\")\n drep.d_analyze.d_analyze_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the analyze operation !!!\")\n\n def choose_operation(self, **kwargs):\n logging.debug(\"Starting the choose operation\")\n drep.d_choose.d_choose_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the choose operation !!!\")\n\n def adjust_operation(self, **kwargs):\n logging.debug(\"Starting the adjust operation\")\n drep.d_adjust.d_adjust_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the adjust operation !!!\")\n\n def bonus_operation(self, **kwargs):\n logging.debug(\"Starting the bonus operation\")\n drep.d_bonus.d_bonus_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the bonus operation !!!\")\n\n def evaluate_operation(self, **kwargs):\n logging.debug(\"Starting the evaluate operation\")\n drep.d_evaluate.d_evaluate_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the evaluate operation !!!\")\n\n def dereplicate_wf_operation(self, **kwargs):\n logging.debug(\"Starting the dereplicate_wf operation\")\n drep.d_workflows.dereplicate_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"Finished the dereplicate_wf operation!\")\n\n def compare_wf_operation(self, **kwargs):\n logging.debug(\"Starting the compare_wf operation\")\n drep.d_workflows.compare_wrapper(kwargs['work_directory'],**kwargs)\n logging.debug(\"!!! Finished the compare_wf operation !!!\")\n\n '''\n def makeload_logger(wd):\n wd = str(os.path.abspath(wd))\n if not os.path.exists(wd):\n os.makedirs(wd)\n\n log_dir = wd + '/log/'\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logging.basicConfig(filename=log_dir + 'logger.log',level=logging.DEBUG,\\\n format='%(asctime)s %(message)s')\n logging.info(\"***Logger started up at {0}***\".format(log_dir + 'logger.log'))\n '''\n\n def setup_logger(self,loc):\n ''' set up logger such that DEBUG goes only to file, rest go to file and console '''\n\n # set up logging everything to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=loc)\n\n # set up logging of INFO or higher to sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter('%(message)s')\n console.setFormatter(formatter)\n logging.getLogger('').addHandler(console)\n\n logging.debug(\"!\"*80)\n logging.debug(\"***Logger started up at {0}***\".format(loc))\n logging.debug(\"Command to run dRep was: {0}\\n\".format(' '.join(sys.argv)))\n logging.debug(\"dRep version {0} was run \\n\".format(VERSION))\n logging.debug(\"!\"*80 + '\\n')\n\n def parseArguments(self, args):\n ''' Parse user options and call the correct pipeline'''\n\n # Load the workDirectory\n wd_loc = str(os.path.abspath(args.work_directory))\n wd = WorkDirectory(wd_loc)\n\n # Set up the logger\n self.setup_logger(wd.get_loc('log'))\n logging.debug(str(args))\n\n # Call the appropriate workflow\n if args.operation == \"dereplicate_wf\":\n self.dereplicate_wf_operation(**vars(args))\n if args.operation == \"compare_wf\":\n self.compare_wf_operation(**vars(args))\n\n if args.operation == \"filter\":\n self.filter_operation(**vars(args))\n if args.operation == \"cluster\":\n self.cluster_operation(**vars(args))\n if args.operation == \"analyze\":\n self.analyze_operation(**vars(args))\n if args.operation == \"choose\":\n self.choose_operation(**vars(args))\n if args.operation == \"adjust\":\n self.adjust_operation(**vars(args))\n if args.operation == \"bonus\":\n self.bonus_operation(**vars(args))\n if args.operation == \"evaluate\":\n self.evaluate_operation(**vars(args))\n\n def loadDefaultArgs(self):\n pass\n","sub_path":"build/lib/drep/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"152653607","text":"\"\"\"\r\n人脸识别模块\r\n本模块包括人脸识别的主要实现代码\r\n\"\"\"\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nfrom PIL import Image, ImageDraw, ImageFont\r\n\r\n\r\nclass FaceRecognitionGetter:\r\n \"\"\"\r\n 负责人脸识别的图片采集\r\n \"\"\"\r\n\r\n def __init__(self):\r\n self.__user_name_path = \"./message/user_name\" # 保存使用者姓名文件的地址\r\n # 创建级联分类器\r\n self.__face_detector = cv2.CascadeClassifier('./cascade/haarcascades/haarcascade_frontalface_default.xml')\r\n\r\n def get_face_data(self, face_name):\r\n \"\"\"\r\n 得到人脸识别的图片数据,将图片写入到Facedata文件夹中\r\n :param face_name: 识别者信息\r\n \"\"\"\r\n i = 0 # 控制频率\r\n FREQUENCY = 3 # 频率\r\n self.__count = 0 # 计数图片输入\r\n self.__sign = None # 建立标志如果原有信息不存在的话才写入图片\r\n self.__write_name_data(face_name) # 写入识别者信息\r\n if self.__sign:\r\n print(\"\"\"\r\n =- 接下来会搜集1000张您的信息,请将耐心等待-= \r\n \"\"\")\r\n self.__get_jpg_data(i, FREQUENCY) # 获取人脸图片信息\r\n elif self.__sign is None:\r\n print(\"\"\"\r\n =-人物信息已存在-=\"\"\")\r\n return self.__sign\r\n\r\n def __get_jpg_data(self, i, FREQUENCY):\r\n \"\"\"\r\n 获得人脸信息\r\n :param count:图片数量\r\n :param i: 控制频率用\r\n :param FREQUENCY: 频率\r\n \"\"\"\r\n cap = cv2.VideoCapture(0) # 打开摄像头\r\n while True:\r\n # 分帧读取图像\r\n sucess, img = cap.read()\r\n # 转为灰度图片\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # 检测人脸\r\n faces = self.__face_detector.detectMultiScale(gray, 1.3, 5)\r\n self.__write_jpg(faces, gray, img, i, FREQUENCY) # 将读取的每帧图片按频率保存\r\n # 保持画面的持续。\r\n k = cv2.waitKey(1)\r\n if k == 27: # 通过esc键退出摄像\r\n break\r\n elif self.__count >= 1000: # 得到1000个样本后退出摄像\r\n break\r\n # 关闭摄像头\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n def __write_jpg(self, faces, gray, img, i, FREQUENCY):\r\n \"\"\"\r\n 写入人脸信息图片\r\n :param faces: 人脸识别器\r\n :param gray: 灰度图片\r\n :param img: 视频中按频率得到的图片\r\n :param i: 控制频率\r\n :param count: 图片数量\r\n :param FREQUENCY: 频率\r\n \"\"\"\r\n for (x, y, w, h) in faces:\r\n cv2.rectangle(img, (x, y), (x + w, y + w), (255, 0, 0))\r\n if i % FREQUENCY == 0:\r\n self.__count += 1\r\n # 保存图像\r\n cv2.imwrite(\"Facedata/User.\" + str(self.__face_id) + '.' + str(self.__count) + '.jpg',\r\n gray[y: y + h, x: x + w])\r\n i += 1\r\n # 显示图片\r\n cv2.imshow('image', img)\r\n\r\n def __write_name_data(self, face_name):\r\n with open(self.__user_name_path, \"r\") as f: # 以读形式打开user_name\r\n list_name = [item for item in f] # 将user_name内的名字添加到列表中\r\n with open(self.__user_name_path, \"a\") as f:\r\n # 如果识别者信息之前不存在则添加\r\n if face_name + \"\\n\" not in list_name and face_name is not \"\":\r\n f.write(face_name + \"\\n\")\r\n self.__sign = True # 修改标志\r\n self.___get_id(face_name, list_name) # 得到识别者id\r\n\r\n def ___get_id(self, face_name, list_name):\r\n \"\"\"\r\n 获取识别者信息\r\n :param face_name:识别者姓名\r\n :param list_name: 姓名列表\r\n \"\"\"\r\n self.__face_id = 0\r\n for item in list_name:\r\n if item == face_name:\r\n break\r\n else:\r\n self.__face_id += 1\r\n\r\n\r\nclass FaceRecognitionTrainer:\r\n \"\"\"\r\n 人脸识别的训练者\r\n \"\"\"\r\n\r\n def __init__(self):\r\n # 创建级联分类器\r\n self.__detector = cv2.CascadeClassifier(\"./cascade/haarcascades/haarcascade_frontalface_default.xml\")\r\n\r\n def trainer(self):\r\n \"\"\"\r\n 训练已有信息\r\n \"\"\"\r\n self.__recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n print(\"\"\"\r\n =-训练需要一定时间,请耐心等待······-=\"\"\")\r\n faces, ids = self.__getImagesAndLabels()\r\n # 训练数据\r\n self.__recognizer.train(faces, np.array(ids))\r\n # 训练结果以yml形式文件保存\r\n self.__recognizer.write(r'face_trainer/trainer.yml')\r\n print(\"\"\"\r\n =-有{0}位使用者信息已经被训练-=\"\"\".format(len(np.unique(ids))))\r\n\r\n def __getImagesAndLabels(self):\r\n \"\"\"\r\n 得到图片和标签\r\n :return: 含有图片信息和标签的列表\r\n \"\"\"\r\n # 得到所有图片的路径\r\n imagePaths = [os.path.join('Facedata', f) for f in os.listdir('Facedata')]\r\n # 创建列表用于存储图片信息\r\n faceSamples = []\r\n # 用于存储图片id信息\r\n ids = []\r\n self.__add_img_id(faceSamples, ids, imagePaths)\r\n # 返回图片信息和图片id\r\n return faceSamples, ids\r\n\r\n def __add_img_id(self, faceSamples, ids, imagePaths):\r\n \"\"\"\r\n :param faceSamples: 储存图片信息的列表\r\n :param ids: 标签\r\n :param imagePaths: 图片路径\r\n \"\"\"\r\n for imagePath in imagePaths:\r\n # 得到灰度图像\r\n PIL_img = Image.open(imagePath).convert('L')\r\n # 将图片类型转化为 unin8\r\n img_numpy = np.array(PIL_img, 'uint8')\r\n # 得到id\r\n id = int(os.path.split(imagePath)[-1].split(\".\")[1])\r\n # 得到图像矩阵\r\n faces = self.__detector.detectMultiScale(img_numpy)\r\n self.__add_message(faceSamples, faces, id, ids, img_numpy) # 将图片信息分别添加到列表中\r\n\r\n def __add_message(self, faceSamples, faces, id, ids, img_numpy):\r\n \"\"\"\r\n 添加信息\r\n :param faceSamples:储存图片信息的列表\r\n :param faces: 图像矩阵\r\n :param id: 标签\r\n :param ids: 标签列表\r\n :param img_numpy: 灰度图片\r\n \"\"\"\r\n for (x, y, w, h) in faces:\r\n # 添加图片信息\r\n faceSamples.append(img_numpy[y:y + h, x: x + w])\r\n # 添加id信息\r\n ids.append(id)\r\n\r\n\r\nclass FaceRecognitionRecognizer:\r\n \"\"\"\r\n 人脸识别的识别者\r\n \"\"\"\r\n\r\n\r\n def __init__(self):\r\n self.__cascadePath = \"./cascade/haarcascades/haarcascade_frontalface_default.xml\"\r\n # 定义字体\r\n self.__font = ImageFont.truetype('simhei.ttf', 30, encoding='utf-8')\r\n def recognizer(self,user_name = None):\r\n # 初始化字典\r\n self.initialize_dict()\r\n # 创建人脸识别者\r\n self.__recognizer = cv2.face.LBPHFaceRecognizer_create()\r\n # 读取训练数据\r\n self.__recognizer.read('face_trainer/trainer.yml')\r\n # 创建级联分类器\r\n faceCascade = cv2.CascadeClassifier(self.__cascadePath)\r\n # 创建列表存储使用者姓名\r\n names = self.__read_user_name()\r\n # 打开摄像头\r\n cap = cv2.VideoCapture(0)\r\n minW = 0.1 * cap.get(3)\r\n minH = 0.1 * cap.get(4)\r\n # 进行人脸识别\r\n self.__discern(cap, faceCascade, minH, minW, names,user_name)\r\n # 关闭摄像头\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n def __discern(self, cap, faceCascade, minH, minW, names,user_name = None):\r\n while True:\r\n # 分帧数返回图片\r\n ret, img = cap.read()\r\n # 将图片转化为灰度图片\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n # 返回图像矩阵\r\n faces = faceCascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5, minSize=(int(minW), int(minH)))\r\n for (x, y, w, h) in faces:\r\n # 返回预测id和置信度\r\n idnum, confidence = self.__get_id_and_confidence(gray, img, names, x, y, w, h)\r\n self.__get_idnum_count(idnum,user_name)\r\n # 将opencv图像格式转换成PIL格式, 数据类型是PIL.Image.Image\r\n img = self.__switch_img(confidence, h, idnum, img, x, y)\r\n if user_name is not None:\r\n if self.dict_user_name[user_name] > 50:\r\n return\r\n # 显示图像\r\n cv2.imshow('camera', img)\r\n k = cv2.waitKey(10)\r\n if k == 27: # 如果用户按下ese则退出\r\n break\r\n\r\n\r\n\r\n def __get_id_and_confidence(self, gray, img, names, x, y, w, h):\r\n \"\"\"\r\n 得到id和置信度\r\n :param faces: 图像矩阵\r\n :param gray: 灰度图片\r\n :param img: 图片\r\n :param names: 使用者姓名列表\r\n :return: 处理后的图片\r\n \"\"\"\r\n\r\n cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\r\n # 调用预测函数返回预测id和置信度\r\n idnum, confidence = self.__recognizer.predict(gray[y:y + h, x:x + w])\r\n if confidence < 100:\r\n idnum = names[idnum]\r\n confidence = \"{0}%\".format(round(2*(100 - confidence)))\r\n else:\r\n idnum = \"unknown\\n\"\r\n confidence = \"{0}%\".format(round(2*(100 - confidence)))\r\n return idnum, confidence\r\n\r\n def __switch_img(self, confidence, h, idnum, img, x, y):\r\n # 将图片转化为PIL形式\r\n img_PIL = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))\r\n draw = ImageDraw.Draw(img_PIL)\r\n # 将需要的信息显示在图片上\r\n draw.text((x + 5, y - 5), str(idnum)[:-1], font=self.__font)\r\n draw.text((x + 5, y + h - 5), str(confidence), font=self.__font)\r\n # 将图片转化回cv2格式\r\n img = cv2.cvtColor(np.asarray(img_PIL), cv2.COLOR_RGB2BGR)\r\n return img\r\n\r\n def __read_user_name(self):\r\n # 读取user_name文件,得到names列表\r\n with open(\"./message/user_name\", \"r\") as f:\r\n names = [item for item in f]\r\n return names\r\n\r\n def __get_idnum_count(self,idnum,user_name):\r\n \"\"\"\r\n 记录扫描正确的次数,用于登录\r\n :param idnum: 预测的id\r\n :return:\r\n \"\"\"\r\n if idnum[:-1] != \"unknown\":\r\n if idnum[:-1] == user_name:\r\n self.dict_user_name[user_name] += 1\r\n\r\n\r\n\r\n def initialize_dict(self):\r\n self.dict_user_name = {}\r\n with open(\"./message/user_name\") as f:\r\n for item in f:\r\n self.dict_user_name[item[:-1]] = 0\r\n\r\n\r\n","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":11011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"357727145","text":"# hw1 problem 11\n\n'''\nA sentence splitter is a program capable of splitting a text into\nsentences. The standard set of heuristics for sentence splitting\nincludes (but isn't limited to) the following rules:\nSentence boundaries occur at one of \".\" (periods), \"?\" or \"!\", except\nthat\na. Periods followed by whitespace followed by a lower case letter\nare not sentence boundaries.\nb. Periods followed by a digit with no intervening whitespace are\nnot sentence boundaries. \nc. Periods followed by whitespace and then an upper case letter,\nbut preceded by any of a short list of titles are not sentence\nboundaries. Sample titles include Mr., Mrs., Dr., and so on.\nd. Periods internal to a sequence of letters with no adjacent\nwhitespace are not sentence boundaries (for example,\nwww.aptex.com, or e.g).\ne. Periods followed by certain kinds of punctuation (notably comma\nand more periods) are probably not sentence boundaries.\nYour task here is to write a program that given the name of a text\nfile is able to write its content with each sentence on a separate\nline. Test your program with given_text.txt.\n'''\n\nimport re\n\ndef sentence_splitter(file_name):\n file = open(file_name, 'r')\n text = file.read()\n\n # We first remove the newlines that were already there \n # by subtituting \\n with an empty string.\n sentences = re.sub(r'\\n', '', text)\n\n # Now we add a newline after each period only if that period is not\n # preceded by 'Mr', 'Mrs' or 'Dr' and is followed by a space and an\n # uppercase letter\n sentences = re.sub(r'(? eps]\n\n\ndef get_change_points(Theta, eps, T=None, P=None):\n # calculate histogram of change points of T adjacency matrices\n T = T or Theta.shape[0]\n P = P or Theta.shape[1]\n # difference between consecutive adjacency matrices\n Delta_Theta = np.diff(Theta, axis=0)\n return [len(get_edges(G, eps, P)) for G in Delta_Theta]\n\n\ndef plot_data_with_cps(data, cps, ymin, ymax):\n plt.plot(data, alpha=0.5)\n for cp in cps:\n plt.plot([cp, cp], [ymin, ymax], 'k-')\n plt.axis([0, len(data), ymin, ymax], 'k-')\n plt.show()\n\n\n# BELOW IS UNTESTED\n# def evalFit(Theta, X):\n# \"\"\" Reports model fit chacteristics of a given estimated dynamic\n# graphical model.\n\n# Inputs:\n# Theta -- Sparse estimate of precision\n# X -- raw data\n\n# Outputs:\n# Lt -- vector of likelihood for each timepoint\n# bic -- complexity adjusted measure of estimation performance\n# sparsity -- vector of solution sparsity (for each timepoint)\n# \"\"\"\n\n# T = Theta.shape[0]\n# P = Theta.shape[1]\n\n# S = np.zeros((T, P, P))\n# # Init metrics, track for each time-point\n# bic = sparsity = np.zeros(T)\n# for t in range(0, T):\n# sparsity[t] = get_dof(Theta, thresh)\n# # Single sample outer product ala empirical covariance\n# S[t] = np.linalg.outer(X[t, :], X[t, :])\n\n# Lt = getLike(Theta, S)\n\n# # This may work with a moving average smoother\n# # but needs to be updated to take into account whole dataset\n# # for t in range(0, T):\n# # According to standard BIC\n# # bic[t] = (-(2 * Lt) + (sparsity[t] *\n# # np.log(2 * M + 1)))\n\n# return (Lt, bic, sparsity)\n\n# def getLike(Theta, S, thresh=0.00001):\n# \"\"\" Finds likelihood and risk of estimated covariance given a set of\n# empirical (unregularised) covariance matrices\"\"\"\n\n# # A threshold for counting sparsity\n# T = Theta.shape[0]\n# Lt = np.zeros(T)\n\n# # I think this is correct up to a factor of 2\n# for t in range(0, T):\n# # The likelihood is calculated at each time point\n# Lt[t] = np.log(np.linalg.det(Theta[t])) - np.trace(\n# np.dot(Theta[t], S[t]))\n\n# return Lt\n\n# def get_dof(Theta, thresh, P=None):\n# \"\"\" This works, checked (28/3/2017)\n# get edges of adjacency matrix Theta\n# Can probably just use len(get_edges(Theta))?\n# \"\"\"\n\n# P = P or Theta.shape[0]\n\n# count = 0\n# for i in range(P - 1):\n# for j in range(i + 1, P):\n# if Theta[i, j] > thresh:\n# count = count + 1\n \n# #Count diagonals\n# count = count + P\n\n# return count","sub_path":"graphtime/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"627361148","text":"from ixnetwork_restpy.base import Base\nfrom ixnetwork_restpy.files import Files\n\n\nclass L2VPNFrameRelayCW(Base):\n __slots__ = ()\n _SDM_NAME = \"l2VPNFrameRelayCW\"\n _SDM_ATT_MAP = {\n \"ControlWordReserved\": \"l2VPNFrameRelayCW.controlWord.reserved-1\",\n \"ControlWordBbit\": \"l2VPNFrameRelayCW.controlWord.bbit-2\",\n \"ControlWordFbit\": \"l2VPNFrameRelayCW.controlWord.fbit-3\",\n \"ControlWordDbit\": \"l2VPNFrameRelayCW.controlWord.dbit-4\",\n \"ControlWordCbit\": \"l2VPNFrameRelayCW.controlWord.cbit-5\",\n \"ControlWordZero\": \"l2VPNFrameRelayCW.controlWord.zero-6\",\n \"ControlWordLength\": \"l2VPNFrameRelayCW.controlWord.length-7\",\n \"ControlWordSequenceNumber\": \"l2VPNFrameRelayCW.controlWord.sequenceNumber-8\",\n }\n\n def __init__(self, parent, list_op=False):\n super(L2VPNFrameRelayCW, self).__init__(parent, list_op)\n\n @property\n def ControlWordReserved(self):\n \"\"\"\n Display Name: CW Rsvd\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordReserved\"])\n )\n\n @property\n def ControlWordBbit(self):\n \"\"\"\n Display Name: CW B Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordBbit\"])\n )\n\n @property\n def ControlWordFbit(self):\n \"\"\"\n Display Name: CW F Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordFbit\"])\n )\n\n @property\n def ControlWordDbit(self):\n \"\"\"\n Display Name: CW D Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordDbit\"])\n )\n\n @property\n def ControlWordCbit(self):\n \"\"\"\n Display Name: CW C Bit\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordCbit\"])\n )\n\n @property\n def ControlWordZero(self):\n \"\"\"\n Display Name: CW Zero\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordZero\"])\n )\n\n @property\n def ControlWordLength(self):\n \"\"\"\n Display Name: CW Length\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordLength\"])\n )\n\n @property\n def ControlWordSequenceNumber(self):\n \"\"\"\n Display Name: CW Sequence Number\n Default Value: 0\n Value Format: decimal\n \"\"\"\n from ixnetwork_restpy.multivalue import Multivalue\n\n return Multivalue(\n self, self._get_attribute(self._SDM_ATT_MAP[\"ControlWordSequenceNumber\"])\n )\n\n def add(self):\n return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))\n","sub_path":"ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/l2VPNFrameRelayCW_template.py","file_name":"l2VPNFrameRelayCW_template.py","file_ext":"py","file_size_in_byte":3644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"20605414","text":"#!/usr/bin/env python\n'''\nProject: pageSwitch.py\nAuthor: Spencer Rathbun\nDate: 8/25/2011\n\nSummary: Add commands to switch trays on the heidelberg printers. Note that using the Kodak print file downloader will append a command to the header that causes the printer to ignore tray switching commands!\n\n'''\nimport argparse, os\nfrom glob import glob\ndef main(**kwargs):\n\tpage = False\n\tchecksOnPage = False\n\tlinesOnPage = []\n\n\tfor f in kwargs['infile']:\n\t\tfor myfile in glob(f):\n\t\t\toutput = open(os.path.splitext(myfile)[0]+kwargs['out'], 'wb')\n\t\t\twith open(myfile, 'rb') as df:\n\t\t\t\tfor line in df:\n\t\t\t\t\tif line.find('%%Page:') != -1:\n\t\t\t\t\t\tpage = True\n\t\t\t\t\telif line.find('%%PageTrailer') != -1:\n\t\t\t\t\t\tpage = False\n\n\t\t\t\t\tif page:\n\t\t\t\t\t\tif line.find('%%BeginBinary:') != -1:\n\t\t\t\t\t\t\tchecksOnPage = True\n\t\t\t\t\t\tlinesOnPage.append(line)\n\t\t\t\t\telse:\n\t\t\t\t\t\tif linesOnPage:\n\t\t\t\t\t\t\t'''letterhead is used for standard pages, but check pages are printed on plain'''\n\t\t\t\t\t\t\tif checksOnPage:\n\t\t\t\t\t\t\t\tlinesOnPage.insert(linesOnPage.index(\"%%BeginPageSetup\\r\\n\")+1, \"<< /MediaColor (white) /MediaType (plain)>> setpagedevice\\r\\n\")\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlinesOnPage.insert(linesOnPage.index(\"%%BeginPageSetup\\r\\n\")+1, \"<< /MediaColor (white) /MediaType (letterhead)>> setpagedevice\\r\\n\")\n\t\t\t\t\t\t\tfor entry in linesOnPage:\n\t\t\t\t\t\t\t\toutput.write(entry)\n\t\t\t\t\t\t\tlinesOnPage = []\n\t\t\t\t\t\t\tchecksOnPage = False\n\t\t\t\t\t\toutput.write(line)\n\t\t\toutput.close()\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description='Parse over a postscript file and set the MediaType for each page.', version='%(prog)s 1.1')\n\tparser.add_argument('infile', nargs='+', type=str, help='input file')\n\tparser.add_argument('--out', type=str, default='_tray_switch.ps', help='name of output file')\n\targs = parser.parse_args()\n\tmain(**vars(args))\n","sub_path":"pageSwitch.py","file_name":"pageSwitch.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"303977046","text":"from __future__ import unicode_literals\n\nfrom django.test import override_settings\n\nfrom rest_framework import status\n\nfrom mayan.apps.documents.tests import DocumentTestMixin, TEST_HYBRID_DOCUMENT\nfrom mayan.apps.rest_api.tests import BaseAPITestCase\n\nfrom ..permissions import permission_content_view\n\nfrom .literals import TEST_DOCUMENT_CONTENT\n\n\n@override_settings(DOCUMENT_PARSING_AUTO_PARSING=True)\nclass DocumentParsingAPITestCase(DocumentTestMixin, BaseAPITestCase):\n test_document_filename = TEST_HYBRID_DOCUMENT\n\n def _request_document_page_content_view(self):\n return self.get(\n viewname='rest_api:document-page-content-view', kwargs={\n 'document_pk': self.test_document.pk,\n 'version_pk': self.test_document.latest_version.pk,\n 'page_pk': self.test_document.latest_version.pages.first().pk\n }\n )\n\n def test_get_document_version_page_content_no_access(self):\n response = self._request_document_page_content_view()\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)\n\n def test_get_document_version_page_content_with_access(self):\n self.grant_access(\n permission=permission_content_view, obj=self.test_document\n )\n\n response = self._request_document_page_content_view()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n self.assertTrue(\n TEST_DOCUMENT_CONTENT in response.data['content']\n )\n","sub_path":"mayan/apps/document_parsing/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"188720988","text":"# -*- coding: utf-8 -*-\n\nfrom collections import OrderedDict\nfrom pages import MainPage, AddReviewPage, ReviewPage\nfrom tests.asserts import CustomAssertions\nfrom tests.base import BaseTestCase\nfrom tests.components import RatingsBlock\nfrom tests.utils import wait_url_ends_with, wait_text_change, login\n\n\nclass LoginTest(BaseTestCase):\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n self.page = MainPage(self.driver)\n self.page.open()\n self.page.login(self.LOGIN, self.PASSWORD)\n\n menu_bar = self.page.menu_bar\n self.assertEqual(menu_bar.email_value, self.LOGIN)\n\n def tearDown(self):\n try:\n self.page.logout()\n finally:\n self.driver.quit()\n\n\nclass LogoutTest(BaseTestCase, CustomAssertions):\n def setUp(self):\n self.create_driver()\n self.page = MainPage(self.driver)\n self.page.open()\n self.page.login(self.LOGIN, self.PASSWORD)\n\n def test(self):\n self.page.logout()\n self.assertElementExists(self.driver, self.page.menu_bar.OPEN_LOGIN_FORM_BUTTON_XPATH)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass AverageRatingTest(BaseTestCase):\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n ratings = [\n {\"name\": RatingsBlock.DESIGN_RATING_NAME, \"rating\": 5},\n {\"name\": RatingsBlock.COMFORT_RATING_NAME, \"rating\": 4},\n {\"name\": RatingsBlock.CONTROL_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.ERGONOMICS_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.RELIABILITY_RATING_NAME, \"rating\": 2},\n {\"name\": RatingsBlock.SERVICE_RATING_NAME, \"rating\": 1}\n ]\n\n average_rating = float(sum([x[\"rating\"] for x in ratings])) / float(len(ratings))\n\n self.page.set_ratings(ratings)\n wait_text_change(self.driver, self.page.ratings.AVERAGE_RATING_XPATH)\n self.assertAlmostEqual(average_rating, self.page.ratings.average_rating, places=1)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass AddReviewErrorsTest(BaseTestCase):\n\n # Not full Car ratings\n RATINGS = [\n {\"name\": RatingsBlock.DESIGN_RATING_NAME, \"rating\": 5},\n {\"name\": RatingsBlock.COMFORT_RATING_NAME, \"rating\": 4},\n {\"name\": RatingsBlock.CONTROL_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.ERGONOMICS_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.RELIABILITY_RATING_NAME, \"rating\": 2},\n {\"name\": RatingsBlock.SERVICE_RATING_NAME, \"rating\": 2}\n ]\n\n # Car options\n BRAND = \"Audi\"\n MODEL = \"100\"\n YEAR = \"1996\"\n MODIFICATION = \"1.6 AT\"\n RUN_CURRENT = \"400\"\n\n ADVANTAGES_TEXT = \"Advantages\" * 40\n COMMON_TEXT = \"Common\" * 40\n PROBLEMS_TEXT = \"Problems\" * 40\n\n def setUp(self):\n self.create_driver()\n login(self.driver, self.LOGIN, self.PASSWORD)\n wait_url_ends_with(self.driver, \"/?from=authpopup\")\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def testRatings(self):\n self.page.set_ratings(self.RATINGS[:-1])\n self.page.add_review()\n self.assertFalse(self.page.ratings.is_rating_valid(\"Обслуживание и ремонт\"))\n\n self.page.set_ratings([self.RATINGS[-1]])\n self.page.add_review()\n self.assertTrue(self.page.ratings.is_all_ratings_valid())\n\n def testCarParams(self):\n options = OrderedDict([('Марка', self.BRAND),\n ('Модель', self.MODEL),\n ('Год производства', self.YEAR)])\n self.page.select_car_options(options)\n self.page.add_review()\n self.assertTrue(self.page.car_select.is_option_invalid('Модификация'))\n self.assertTrue(self.page.car_select.is_option_invalid('Кузов'))\n self.assertTrue(self.page.car_select.is_option_invalid('Объем двигателя'))\n self.assertTrue(self.page.car_select.is_option_invalid('КПП'))\n\n def testCurrentRun(self):\n self.page.add_review()\n self.assertTrue(self.page.car_select.is_run_current_invalid())\n\n self.page.set_run_current(\"123\")\n self.page.add_review()\n self.assertFalse(self.page.car_select.is_run_current_invalid())\n\n def testTextReview(self):\n self.page.add_review()\n self.assertTrue(self.page.review_inputs.is_advantages_field_invalid())\n self.assertTrue(self.page.review_inputs.is_problems_invalid())\n self.assertTrue(self.page.review_inputs.is_common_field_invalid())\n\n self.page.review_inputs.set_common_text(self.COMMON_TEXT)\n self.page.review_inputs.set_advantages_text(self.ADVANTAGES_TEXT)\n self.page.review_inputs.set_problems_text(self.PROBLEMS_TEXT)\n\n self.page.add_review()\n self.assertFalse(self.page.review_inputs.is_advantages_field_invalid())\n self.assertFalse(self.page.review_inputs.is_problems_invalid())\n self.assertFalse(self.page.review_inputs.is_common_field_invalid())\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass CarSelectionTest(BaseTestCase):\n BRAND = \"Audi\"\n MODEL = \"100\"\n YEAR = \"1996\"\n MODIFICATION = \"1.6 AT\"\n RUN_CURRENT = \"123321\"\n RESULT_CURRENT = \"123 321\"\n\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n options = OrderedDict([(\"Марка\", self.BRAND),\n (\"Модель\", self.MODEL),\n (\"Год производства\", self.YEAR),\n (\"Модификация\", self.MODIFICATION)])\n\n self.page.select_car_options(options)\n self.page.set_run_current(self.RUN_CURRENT)\n\n select = self.page.car_select\n\n self.assertEqual(self.BRAND, select.get_current_value(\"Марка\"))\n self.assertEqual(self.MODEL, select.get_current_value(\"Модель\"))\n self.assertEqual(self.YEAR, select.get_current_value(\"Год производства\"))\n self.assertEqual(self.MODIFICATION, select.get_current_value(\"Модификация\"))\n self.assertEqual(self.RESULT_CURRENT, select.run_current)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass ReviewTextInputTest(BaseTestCase):\n ADVANTAGES_TEXT = \"Advantages\" * 40\n COMMON_TEXT = \"Common\" * 40\n PROBLEMS_TEXT = \"Problems\" * 40\n\n def setUp(self):\n self.create_driver()\n self.page = AddReviewPage(self.driver)\n self.page.open()\n\n def test(self):\n reviews = self.page.review_inputs\n\n self.page.set_texts(self.COMMON_TEXT, self.ADVANTAGES_TEXT, self.PROBLEMS_TEXT)\n self.assertEqual(self.COMMON_TEXT, reviews.common_text)\n self.assertEqual(self.ADVANTAGES_TEXT, reviews.advantages_text)\n self.assertEqual(self.PROBLEMS_TEXT, reviews.problems_text)\n\n def tearDown(self):\n self.driver.quit()\n\n\nclass AddReviewTest(BaseTestCase):\n # Car text review\n ADVANTAGES_TEXT = \"Advantages\" * 40\n COMMON_TEXT = \"Common\" * 40\n PROBLEMS_TEXT = \"Problems\" * 40\n\n # Car ratings\n RATINGS = [\n {\"name\": RatingsBlock.DESIGN_RATING_NAME, \"rating\": 5},\n {\"name\": RatingsBlock.COMFORT_RATING_NAME, \"rating\": 4},\n {\"name\": RatingsBlock.CONTROL_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.ERGONOMICS_RATING_NAME, \"rating\": 3},\n {\"name\": RatingsBlock.RELIABILITY_RATING_NAME, \"rating\": 2},\n {\"name\": RatingsBlock.SERVICE_RATING_NAME, \"rating\": 2}\n ]\n\n # Car options\n BRAND = \"Audi\"\n MODEL = \"100\"\n YEAR = \"1996\"\n MODIFICATION = \"1.6 AT\"\n RUN_CURRENT = \"400\"\n\n REVIEW_TITLE = BRAND + \" \" + MODEL + \" \" + MODIFICATION + \" \" + YEAR + u\" г.\"\n\n def setUp(self):\n self.create_driver()\n login(self.driver, self.LOGIN, self.PASSWORD)\n wait_url_ends_with(self.driver, \"/?from=authpopup\")\n self.add_review_page = AddReviewPage(self.driver)\n self.add_review_page.open()\n\n def test(self):\n self.add_review_page.set_ratings(self.RATINGS)\n\n options = OrderedDict([(\"Марка\", self.BRAND),\n (\"Модель\", self.MODEL),\n (\"Год производства\", self.YEAR),\n (\"Модификация\", self.MODIFICATION)])\n\n self.add_review_page.select_car_options(options)\n self.add_review_page.car_select.wait_option_enabled(\"Привод\")\n self.add_review_page.set_run_current(self.RUN_CURRENT)\n self.add_review_page.set_texts(self.COMMON_TEXT, self.ADVANTAGES_TEXT, self.PROBLEMS_TEXT)\n\n self.add_review_page.add_review()\n self.add_review_page.show_review()\n\n self.review_page = ReviewPage(self.driver)\n average_rating = round(float(sum([x[\"rating\"] for x in self.RATINGS])) / float(len(self.RATINGS)), 1)\n self.assertEqual(average_rating, self.review_page.review_avg_rating)\n self.assertEqual(self.RUN_CURRENT, self.review_page.run_current)\n self.assertEqual(self.REVIEW_TITLE, self.review_page.review_title)\n self.assertEquals(self.COMMON_TEXT, self.review_page.review_text.common_text)\n self.assertEquals(self.ADVANTAGES_TEXT, self.review_page.review_text.advantages_text)\n self.assertEquals(self.PROBLEMS_TEXT, self.review_page.review_text.problems_text)\n\n def tearDown(self):\n try:\n self.review_page.remove_review()\n self.review_page.logout()\n finally:\n self.driver.quit()\n","sub_path":"tests/review_test.py","file_name":"review_test.py","file_ext":"py","file_size_in_byte":9841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"44487025","text":"import sys, socket, select, time\nfrom optparse import OptionParser\n\nXBOX_PORT = 5050\nXBOX_PING = \"dd00000a000000000000000400000002\"\nXBOX_POWER = \"dd02001300000010\"\n\nhelp_text = \"xbox-remote-power.py -a -i \"\n\npy3 = sys.version_info[0] > 2\n\ndef main():\n parser = OptionParser()\n parser.add_option('-a', '--address', dest='ip_addr', help=\"IP Address of Xbox One\", default='')\n parser.add_option('-i', '--id', dest='live_id', help=\"Live ID of Xbox One\", default='')\n (opts, args) = parser.parse_args()\n \n if not opts.ip_addr:\n opts.ip_addr = user_input(\"Enter the IP address: \")\n\n ping = False\n if not opts.live_id:\n print(\"No Live ID given, do you want to attempt to ping the Xbox for it?\")\n result = \"\"\n while result not in (\"y\", \"n\"):\n result = user_input(\"(y/n): \").lower()\n if result == \"y\":\n ping = True\n elif result == \"n\":\n opts.live_id = user_input(\"Enter the Live ID: \")\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.setblocking(0)\n s.bind((\"\", 0))\n s.connect((opts.ip_addr, XBOX_PORT))\n\n if ping:\n print(\"Attempting to ping Xbox for Live ID...\")\n s.send(bytearray.fromhex(XBOX_PING))\n\n ready = select.select([s], [], [], 5)\n if ready[0]:\n data = s.recv(1024)\n opts.live_id = data[199:215]\n else:\n print(\"Failed to ping Xbox, please enter Live ID manually\")\n opts.live_id = user_input(\"Enter the Live ID: \")\n\n if isinstance(opts.live_id, str):\n live_id = opts.live_id.encode()\n else:\n live_id = opts.live_id\n\n power_packet = bytearray.fromhex(XBOX_POWER) + live_id + b'\\x00'\n print(\"Sending power on packets to \" + opts.ip_addr)\n for i in range(0, 5):\n s.send(power_packet)\n time.sleep(1)\n print(\"Xbox should turn on now\")\n\n s.send(bytearray.fromhex(XBOX_PING))\n ready = select.select([s], [], [], 5)\n if ready[0]:\n data = s.recv(1024)\n opts.live_id = data[199:215]\n print(\"Ping successful!\")\n print(\"Live ID = \" + live_id.decode(\"utf-8\"))\n print(\"\")\n print(\"******************************************\")\n print(\"* Xbox running - Streaming now possible! *\")\n print(\"******************************************\")\n print(\"\")\n else:\n print(\"Failed to ping Xbox - please try again! :(\")\n print(\"\")\n \n s.close()\n\ndef user_input(text):\n response = \"\"\n\n while response == \"\":\n if py3:\n response = input(text)\n else:\n response = raw_input(text)\n\n return response\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"xbox-remote-power.py","file_name":"xbox-remote-power.py","file_ext":"py","file_size_in_byte":2719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"53863609","text":"import unittest\nimport time\n\nfrom django.test import LiveServerTestCase\nfrom selenium import webdriver\n\n\nMAX_WAIT = 10\n\n\nclass NewVisitorTest(LiveServerTestCase):\n\n def setUp(self):\n self.browser = webdriver.Firefox()\n\n def tearDown(self):\n self.browser.quit()\n\n def test_list_of_forms_loads(self):\n # User goes to url page loads.\n\n self.browser.get(self.live_server_url + '/application-forms/')\n self.assertIn('Application Information', self.browser.title)\n\n # User sees a dropdown menu\n drop_down = self.browser.find_element_by_class_name('dropdown')\n self.assertEqual('Select Your Application', drop_down.text)\n\n # User presses button and 4 dropdown items appear\n expected_items = ['flow measurement', 'level measurement',\n 'pressure','temperature', 'valves']\n\n drop_down_items = self.browser.find_elements_by_class_name('dropdown-item')\n drop_down_true = all(item.get_attribute('innerHTML').lower() in expected_items for item in drop_down_items)\n self.assertEqual(drop_down_true, True)\n\n # User presses on Level Measurement\n level_item = drop_down_items[1]\n self.assertEqual(level_item.get_attribute('innerHTML').lower(), expected_items[1])\n time.sleep(1)\n drop_down.click()\n time.sleep(1)\n level_item.click()\n time.sleep(1)\n\n # User is redirected to new page\n self.assertEqual(self.browser.current_url, self.live_server_url + '/application-forms/level/')\n\n\n # User\n","sub_path":"functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"111856483","text":"# -*- coding: utf-8 -*-\n\"\"\"Module providing caravan site details editor\"\"\"\n\nfrom Acquisition import aq_inner\nfrom five import grok\nfrom plone import api\nfrom plone.directives import form\nfrom z3c.form import button\nfrom zope.component import getUtility\n\nfrom rms.caravansites.interfaces import ICaravanSiteDetails\nfrom rms.caravansites.tool import ICaravanSiteTool\nfrom rms.membership.workspace import IWorkspace\n\nfrom rms.caravansites import MessageFactory as _\n\n\nclass DetailsEditor(form.SchemaEditForm):\n grok.context(IWorkspace)\n grok.require('cmf.ModifyPortalContent')\n grok.name('details-editor')\n\n schema = ICaravanSiteDetails\n ignoreContext = False\n css_class = 'app-ws-form ws-form'\n label = _(u\"Edit caravansite details\")\n\n @property\n def traverse_subpath(self):\n return self.subpath\n\n def publishTraverse(self, request, name):\n if not hasattr(self, 'subpath'):\n self.subpath = []\n self.subpath.append(name)\n return self\n\n def next_url(self):\n context = aq_inner(self.context)\n return context.absolute_url()\n\n def rvs(self):\n uid = self.traverse_subpath[0]\n item = api.content.get(UID=uid)\n return item\n\n @button.buttonAndHandler(_(u\"Save\"), name=\"save\")\n def handleApply(self, action):\n data, errors = self.extractData()\n if errors:\n self.status = self.formErrorsMessage\n return\n return self.applyChanges(data)\n\n @button.buttonAndHandler(_(u\"cancel\"))\n def handleCancel(self, action):\n msg = _(u\"Image edit has been cancelled.\")\n api.portal.show_message(message=msg, request=self.request)\n return self.request.response.redirect(self.next_url())\n\n def applyChanges(self, data):\n tool = getUtility(ICaravanSiteTool)\n rvs_uid = self.traverse_subpath[0]\n records = tool.read(rvs_uid, key='details')\n for record in records:\n key = record['id']\n try:\n record['value'] = data[key]\n except KeyError:\n continue\n tool.update(rvs_uid, records, key='details')\n msg = _(u\"The caravan site has successfully been updated\")\n api.portal.show_message(message=msg, request=self.request)\n return self.request.response.redirect(self.next_url())\n\n def getContent(self):\n uid = self.traverse_subpath[0]\n tool = getUtility(ICaravanSiteTool)\n record = tool.read(uid, key='details')\n data = {}\n for item in record:\n key = item['id']\n data[key] = item['value']\n return data\n","sub_path":"src/rms.caravansites/rms/caravansites/details.py","file_name":"details.py","file_ext":"py","file_size_in_byte":2633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"206189268","text":"import math\nlst = [2, 4, 9, 16, 25]\nnew_lst = []\nfor i in lst:\n new_lst.append(math.sqrt(i))\nprint(new_lst)\n\nnew_list2 = list(map(math.sqrt, lst))\nprint(new_list2)\n\nnew_list3 = [math.sqrt(i) for i in lst]\nprint(new_list3)\n","sub_path":"62-new_list.py","file_name":"62-new_list.py","file_ext":"py","file_size_in_byte":225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"355969581","text":"import numpy as np\nsequence=[]\nelectrodes=range(1,65)\nelectrodes=np.reshape(electrodes,(8,8))\nprint(electrodes)\n# GRADIENT\n# vertical and horizontal\nfor i in range (8):\n a = electrodes[i,0]\n b = electrodes[i,-1]\n for j in range (1,6):\n m = electrodes[i,j]\n n = electrodes[i,j+1]\n quadripole = [a,b,m,n]\n sequence.append(quadripole) \nfor i in range (8):\n a = electrodes[0,i]\n b = electrodes[-1,i]\n for j in range (1,6):\n m = electrodes [j,i]\n n = electrodes [j+1,i]\n quadripole = [a,b,m,n]\n sequence.append(quadripole)\n\n# diagonals\nfor i in range (-4,5):\n diagonal = np.diag(electrodes,k=i)\n print(diagonal)\n diagonal_NumElec = len(diagonal)\n a = diagonal[0]\n b = diagonal[-1]\n for j in range (1,diagonal_NumElec-2):\n quadripole = [a,b,diagonal[j],diagonal[j+1]]\n sequence.append(quadripole)\n \nelectrodes_flip = np.fliplr(electrodes) # flip electrodes matrix to get other diagonals\nprint(electrodes_flip)\n\nfor i in range (-4,5):\n diagonal = np.diag(electrodes_flip,k=i)\n print(diagonal)\n diagonal_NumElec = len(diagonal)\n a = diagonal[0]\n b = diagonal[-1]\n print(diagonal)\n for j in range (1,diagonal_NumElec-2):\n quadripole = [a,b,diagonal[j],diagonal[j+1]]\n sequence.append(quadripole)\n\n\"\"\" WENNER \"\"\"\n\n#Hoz\nfor i in range(8):\n line = electrodes[i,:]\n print(\"hor\",i,line)\n for j in range(5):\n quadripole =[line[j],line[j+3],line[j+1],line[j+2]]\n sequence.append(quadripole)\n \n \n\n#Ver\nfor i in range(8):\n line = electrodes[:,i]\n print(\"vert\",i,line)\n for j in range(5):\n quadripole =[line[j],line[j+3],line[j+1],line[j+2]]\n sequence.append(quadripole)\n\n#diagonals\nfor i in range(-3,4): # skipping shortest diagonals (already in Grad) \n diagonal = np.diag(electrodes,k=i)\n print(\"W_diag\",i,diagonal)\n diagonal_NumElec = len(diagonal)\n for j in range(diagonal_NumElec-3):\n quadripole = [diagonal[j],diagonal[j+3],diagonal[j+1],diagonal[j+2]]\n sequence.append(quadripole)\nfor i in range(-3,4):\n diagonal = np.diag(electrodes_flip,k=i)\n print(\"W_diag_flip\",i,diagonal)\n diagonal_NumElec = len(diagonal)\n for j in range(diagonal_NumElec-3):\n quadripole = [diagonal[j],diagonal[j+3],diagonal[j+1],diagonal[j+2]]\n sequence.append(quadripole) \n\nSeqDir = np.array(sequence)\nAd, Bd, Md, Nd = SeqDir[:,0], SeqDir[:,1], SeqDir[:,2], SeqDir[:,3]\nSeqRec = np.column_stack((Md, Nd, Ad, Bd))\nSeq = np.vstack((SeqDir,SeqRec))\n#A,B,M,N = sequence[:,0],sequence[:,1],sequence[:,2],sequence[:,3]\n#sequenceRec = [M,N,A,B]\nprint(len(Seq))\nnp.savetxt(\"Seq.txt\", Seq, fmt = '%i %i %i %i')\n","sub_path":"sequences/Rhi.py","file_name":"Rhi.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"279005415","text":"import turtle\nimport datetime\n\n#生日快乐\ndef love():\n def func(x, y):\n main()\n # turtle.title('领导专用程序')\n lv=turtle.Turtle()\n lv.hideturtle()\n lv.getscreen().bgcolor('light blue')\n lv.color('yellow','red')\n lv.pensize(1)\n lv.speed(1)\n lv.up()\n lv.goto(0,-150)\n #开始画爱心\n lv.down()\n lv.begin_fill()\n lv.goto(0, -150)\n lv.goto(-175.12, -8.59)\n lv.left(140)\n pos = []\n for i in range(19):\n lv.right(10)\n lv.forward(20)\n pos.append((-lv.pos()[0], lv.pos()[1]))\n for item in pos[::-1]:\n lv.goto(item)\n lv.goto(175.12, -8.59)\n lv.goto(0, -150)\n lv.left(50)\n lv.end_fill()\n #写字\n lv.up()\n lv.goto(0, 80)\n lv.down()\n lv.write(\"胡 可 仪\",font=(u\"方正舒体\",36,\"normal\"),align=\"center\")\n lv.up()\n lv.goto(0, 0)\n lv.down()\n lv.write(\"生日快乐!\",font=(u\"方正舒体\",48,\"normal\"),align=\"center\")\n lv.up()\n lv.goto(100, -210)\n lv.down()\n lv.write(\"点我点我快点我\",font=(u\"华文琥珀\",26,\"bold\"),align=\"right\")\n lv.up()\n lv.goto(160, -190)\n lv.resizemode('user')\n lv.shapesize(4, 4, 10) #调整小乌龟大小,以便覆盖“点我”文字\n lv.color('red', 'red')\n lv.onclick(func)\n lv.showturtle()\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n # if datetime.date.today() == datetime.date(2020, 02, ): # YYYY年,MM月,DD日\n love()\n # else:\n # main()\n","sub_path":"动画/心型动画.py","file_name":"心型动画.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"198733487","text":"INVALID_REQUEST = 'invalid request'\nPERMISSION_DENIED = 'permission denied'\nUNKNOWN = 'unknown'\nINTERNAL_SERVER_ERROR = 'internal error'\nTOO_MANY_REQUESTS = 'too many requests'\nAUTH_ERROR = 'authorization error'\nUNSUPPORTED_TYPE = 'unsupported type'\n\n\nclass CTRBaseError(Exception):\n def __init__(self, code, message, type_='fatal'):\n super().__init__()\n self.code = code or UNKNOWN\n self.message = message or 'Something went wrong.'\n self.type_ = type_\n\n @property\n def json(self):\n return {'type': self.type_,\n 'code': self.code,\n 'message': self.message}\n\n\nclass CTRInvalidJWTError(CTRBaseError):\n def __init__(self):\n super().__init__(\n PERMISSION_DENIED,\n 'Invalid Authorization Bearer JWT.'\n )\n\n\nclass CTRUnexpectedResponseError(CTRBaseError):\n def __init__(self, error):\n if error and error.get('error_description'):\n message = f'Microsoft Defender for Endpoint returned unexpected ' \\\n f'error. Details: {error[\"error_description\"]}'\n else:\n message = 'Something went wrong.'\n\n super().__init__(\n UNKNOWN,\n message=str(message)\n )\n\n\nclass CTRBadRequestError(CTRBaseError):\n def __init__(self, error=None):\n message = 'Invalid request to Microsoft Defender for Endpoint.'\n if error:\n message += f' {error}'\n super().__init__(\n INVALID_REQUEST,\n message\n )\n\n\nclass CTRInternalServerError(CTRBaseError):\n def __init__(self):\n super().__init__(\n INTERNAL_SERVER_ERROR,\n 'Microsoft Defender for Endpoint internal error.'\n )\n\n\nclass CTRTooManyRequestsError(CTRBaseError):\n def __init__(self, error=None):\n if '/advancedqueries/run' in error.url:\n message = f'Advanced Hunting API rate limit has been exceeded. ' \\\n f'{error.json()[\"error\"]}'\n else:\n message = 'Too many requests to Microsoft Defender for Endpoint ' \\\n 'have been made. Please, try again later.'\n super().__init__(\n TOO_MANY_REQUESTS,\n message\n )\n\n\nclass CTRSSLError(CTRBaseError):\n def __init__(self, error):\n error = error.args[0].reason.args[0]\n message = getattr(error, 'verify_message', error.args[0]).capitalize()\n super().__init__(\n UNKNOWN,\n f'Unable to verify SSL certificate: {message}'\n )\n\n\nclass AuthorizationError(CTRBaseError):\n def __init__(self, error):\n\n super().__init__(\n AUTH_ERROR,\n f\"Authorization failed: {error}\"\n )\n\n\nclass UnsupportedTypeError(CTRBaseError):\n def __init__(self, type_):\n\n super().__init__(\n UNSUPPORTED_TYPE,\n f'Unsupported observable type {type_}'\n )\n","sub_path":"api/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":2922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"243720616","text":"from django import forms\nfrom localflavor.gb.forms import GBPostcodeField\nfrom aliss.models import DigestSelection, Postcode, Category, ALISSUser\n\nclass DigestSelectionForm(forms.ModelForm):\n class Meta:\n model = DigestSelection\n fields = [\n 'postcode',\n 'category'\n ]\n\n postcode = forms.ModelChoiceField(\n queryset=Postcode.objects.all(),\n to_field_name=\"pk\",\n required=True\n )\n\n category = forms.ModelChoiceField(\n queryset=Category.objects.all(),\n to_field_name=\"slug\",\n required=False\n )\n\n def clean(self):\n cleaned_data = super(DigestSelectionForm, self).clean()\n\n postal_string = cleaned_data.get(\"postcode\")\n category_slug = cleaned_data.get(\"category\")\n\n return cleaned_data\n","sub_path":"aliss/forms/digest_selection.py","file_name":"digest_selection.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"58163087","text":"from otree.api import (\n models,\n widgets,\n BaseConstants,\n BaseSubsession,\n BaseGroup,\n BasePlayer,\n Currency as c,\n currency_range,\n)\n\n\nauthor = 'Philipp Chapkovski, '\n\ndoc = \"\"\"\nSorter app that guarantees the proper matching for further trust game.\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'sorter'\n players_per_group = None\n num_rounds = 1\n\n\nclass Subsession(BaseSubsession):\n @property\n def cities(self):\n return [self.session.config.get('city1'), self.session.config.get('city2')]\n\n\n\nclass Group(BaseGroup):\n pass\n\n\nclass Player(BasePlayer):\n city = models.StringField()\n\n\n","sub_path":"newtrustproj-master/sorter/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"395279748","text":"from msilib.schema import File\nimport random\n\n__author__ = 'Jacob'\n\n\ndef is_even(byte):\n \"\"\"\n Checks if byte is binary even\n :param byte: One byte of file loaded to memory\n :return: control bit. if even: 0, else: 1\n \"\"\"\n even_flag = 0\n\n for i in range(0, 8):\n even_flag += (byte >> i) & 1\n\n return even_flag % 2\n\n\ndef get_control_bit(file):\n \"\"\"\n Calculates control bit of file\n :param file: file loaded to memory\n :return: control bit. if even: 0, else: 1\n \"\"\"\n control_bit = 0\n\n for byte in file:\n control_bit += is_even(byte)\n\n return control_bit % 2\n\n\n\n\n\n","sub_path":"lab1/utils/control_bit.py","file_name":"control_bit.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"582373280","text":"from celery import Celery\n\n# cli\n# celery status\n# celery purge\n# celery purge -f\n\n\napp = Celery(broker='amqp://guest@localhost//')\napp = Celery(broker='redis://localhost:6379/0')\n\n\n@app.task()\ndef add(x, y):\n return x + y\n\n\nr = add.delay()\nr = add.apply_async(args=[1, 2], eta=datetime(2014, 6, 12, 0, 0))\nr = add.apply_async(args=[1, 2], countdown=10)\nr = add.apply_async(args=[2, 3], queues='email')\n\n\n\n# inspect\n\nfrom celery.task.control import revoke, inspect, discard_all\n\ni = inspect()\ni.scheduled()\ni.active()\ni.registered()\n\n# revoke task by id\ntask_id = 'foo'\nrevoke(task_id, terminate=True)\nr = add.apply_async(args=[1, 2])\nr.revoke()\n\n\n# run worker from script\nargv = ['worker', '--loglevel=DEBUG']\napp.worker_main(argv)\n\n\n# canvas\n# chain, group, chord\n\n\n# config\nCELERYD_LOG_COLOR = False\n\n# disable prefecthing\nCELERYD_PREFETCH_MULTIPLIER = 1\nCELERYD_CONCURRENCY = 1\nCELERY_ACKS_LATE = True\n\nCELERY_RDB_PORT = 6899\n\n\n# debugging\nrdb.set_trace()\n","sub_path":"python/celery_exp.py","file_name":"celery_exp.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"604757992","text":"import argparse\r\nimport os\r\nimport sys\r\nimport time\r\n\r\nfrom importlib import import_module\r\nfrom pathlib import Path\r\nfrom typing import List, Type\r\n\r\nimport moderngl\r\nfrom moderngl_window.context.base import WindowConfig, BaseWindow\r\nfrom moderngl_window.timers.clock import Timer\r\n\r\nIGNORE_DIRS = [\r\n '__pycache__',\r\n 'base',\r\n]\r\n\r\nOPTIONS_TRUE = ['yes', 'on', 'true', 't', 'y', '1']\r\nOPTIONS_FALSE = ['no', 'off', 'false', 'f', 'n', '0']\r\nOPTIONS_ALL = OPTIONS_TRUE + OPTIONS_FALSE\r\n\r\n\r\nclass ContextRefs:\r\n WINDOW = None\r\n CONTEXT = None\r\n\r\n\r\ndef activate_context(context: moderngl.Context, window: BaseWindow = None):\r\n \"\"\"Set the currently active window\"\"\"\r\n ContextRefs.WINDOW = window\r\n ContextRefs.CONTEXT = context\r\n\r\n\r\ndef window():\r\n \"\"\"Obtain the active window\"\"\"\r\n if ContextRefs.WINDOW:\r\n return ContextRefs.WINDOW\r\n\r\n raise ValueError(\"No active window and context. Call activate_window.\")\r\n\r\n\r\ndef ctx():\r\n \"\"\"Obtain the active context\"\"\"\r\n if ContextRefs.CONTEXT:\r\n return ContextRefs.CONTEXT\r\n\r\n raise ValueError(\"No active window and context. Call activate_window.\")\r\n\r\n\r\ndef run_window_config(config_cls: WindowConfig, timer=None, args=None) -> None:\r\n \"\"\"\r\n Run an WindowConfig entering a blocking main loop\r\n\r\n Args:\r\n config_cls: The WindowConfig class to render\r\n args: Override sys.args\r\n \"\"\"\r\n values = parse_args(args)\r\n window_cls = get_local_window_cls(values.window)\r\n\r\n # Calculate window size\r\n size = values.size or config_cls.window_size\r\n size = size[0] * values.size_mult, size[1] * values.size_mult\r\n\r\n window = window_cls(\r\n title=config_cls.title,\r\n size=size,\r\n fullscreen=values.fullscreen,\r\n resizable=config_cls.resizable,\r\n gl_version=config_cls.gl_version,\r\n aspect_ratio=config_cls.aspect_ratio,\r\n vsync=values.vsync,\r\n samples=values.samples,\r\n cursor=values.cursor,\r\n )\r\n window.print_context_info()\r\n activate_context(window.ctx, window=window)\r\n window.config = config_cls(ctx=window.ctx, wnd=window)\r\n\r\n timer = Timer()\r\n timer.start()\r\n\r\n while not window.is_closing:\r\n current_time, delta = timer.next_frame()\r\n\r\n window.ctx.screen.use()\r\n window.ctx.screen.clear()\r\n window.render(current_time, delta)\r\n window.swap_buffers()\r\n\r\n _, duration = timer.stop()\r\n window.destroy()\r\n print(\"Duration: {0:.2f}s @ {1:.2f} FPS\".format(duration, window.frames / duration))\r\n\r\n\r\ndef get_window_cls(window: str = None) -> Type[BaseWindow]:\r\n \"\"\"\r\n Attept to obtain a window class using the full dotted\r\n python path. This can be used to import custom or modified\r\n window classes.\r\n\r\n Args:\r\n window (str): Name of the window\r\n\r\n Returns:\r\n A reference to the requested window class. Raises exception if not found.\r\n \"\"\"\r\n print(\"Attempting to load window class:\", window)\r\n return import_string(window)\r\n\r\n\r\ndef get_local_window_cls(window: str = None) -> Type[BaseWindow]:\r\n \"\"\"\r\n Attept to obtain a window class in the moderngl_window package\r\n using short window names such as `pyqt5` or `glfw`.\r\n\r\n Args:\r\n window (str): Name of the window\r\n\r\n Returns:\r\n A reference to the requested window class. Raises exception if not found.\r\n \"\"\"\r\n window = os.environ.get('MODERNGL_WINDOW') or window\r\n if not window:\r\n window = 'pyglet'\r\n\r\n return get_window_cls('moderngl_window.context.{}.Window'.format(window))\r\n\r\n\r\ndef parse_args(args=None):\r\n \"\"\"Parse arguments from sys.argv\"\"\"\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n '-wnd', '--window',\r\n choices=find_window_classes(),\r\n help='Name for the window type to use',\r\n )\r\n parser.add_argument(\r\n '-fs', '--fullscreen',\r\n action=\"store_true\",\r\n help='Open the window in fullscreen mode',\r\n )\r\n parser.add_argument(\r\n '-vs', '--vsync',\r\n type=valid_bool,\r\n default=\"1\",\r\n help=\"Enable or disable vsync\",\r\n )\r\n parser.add_argument(\r\n '-s', '--samples',\r\n type=int,\r\n default=4,\r\n help=\"Specify the desired number of samples to use for multisampling\",\r\n )\r\n parser.add_argument(\r\n '-c', '--cursor',\r\n type=valid_bool,\r\n default=\"true\",\r\n help=\"Enable or disable displaying the mouse cursor\",\r\n )\r\n parser.add_argument(\r\n '--size',\r\n type=valid_window_size,\r\n help=\"Window size\",\r\n )\r\n parser.add_argument(\r\n '--size_mult',\r\n type=valid_window_size_multiplier,\r\n default=1.0,\r\n help=\"Multiplier for the window size making it easy scale the window\",\r\n )\r\n\r\n return parser.parse_args(args or sys.argv[1:])\r\n\r\n\r\ndef find_window_classes() -> List[str]:\r\n \"\"\"\r\n Find available window packages\r\n\r\n Returns:\r\n A list of avaialble window packages\r\n \"\"\"\r\n return [\r\n path.parts[-1] for path in Path(__file__).parent.joinpath('context').iterdir()\r\n if path.is_dir() and path.parts[-1] not in IGNORE_DIRS\r\n ]\r\n\r\n\r\ndef import_string(dotted_path):\r\n \"\"\"\r\n Import a dotted module path and return the attribute/class designated by the\r\n last name in the path. Raise ImportError if the import failed.\r\n\r\n Args:\r\n dotted_path: The path to attempt importing\r\n\r\n Returns:\r\n Imported class/attribute\r\n \"\"\"\r\n try:\r\n module_path, class_name = dotted_path.rsplit('.', 1)\r\n except ValueError as err:\r\n raise ImportError(\"%s doesn't look like a module path\" % dotted_path) from err\r\n\r\n module = import_module(module_path)\r\n\r\n try:\r\n return getattr(module, class_name)\r\n except AttributeError as err:\r\n raise ImportError('Module \"%s\" does not define a \"%s\" attribute/class' % (\r\n module_path, class_name)) from err\r\n\r\n\r\ndef valid_bool(value):\r\n \"\"\"Validator for bool values\"\"\"\r\n value = value.lower()\r\n\r\n if value in OPTIONS_TRUE:\r\n return True\r\n\r\n if value in OPTIONS_FALSE:\r\n return False\r\n\r\n raise argparse.ArgumentTypeError('Boolean value expected. Options: {}'.format(OPTIONS_ALL))\r\n\r\n\r\ndef valid_window_size(value):\r\n \"\"\"\r\n Validator for window size parameter.\r\n\r\n Valid format is \"[int]x[int]\". For example \"1920x1080\".\r\n \"\"\"\r\n try:\r\n width, height = value.split('x')\r\n return int(width), int(height)\r\n except ValueError:\r\n pass\r\n\r\n raise argparse.ArgumentTypeError(\r\n \"Valid size format: int]x[int]. Example '1920x1080'\",\r\n )\r\n\r\n\r\ndef valid_window_size_multiplier(value):\r\n \"\"\"\r\n Validates window size multiplier\r\n\r\n Must be an integer or float creater than 0\r\n \"\"\"\r\n try:\r\n val = float(value)\r\n if val > 0:\r\n return val\r\n except ValueError:\r\n pass\r\n\r\n raise argparse.ArgumentTypeError(\r\n \"Must be a positive int or float\",\r\n )\r\n","sub_path":"moderngl_window/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"9776235","text":"import uuid\nimport zmq\nimport time\nimport socket\n\nfrom .ast_def.expressions import *\nfrom .passive_object import ExpPassiveObject\nfrom . import global_conf\n\n\nclass ActorConnector:\n actor_id: str\n\n messages_socket: zmq.Socket\n return_socket: zmq.Socket\n write_socket: zmq.Socket\n\n def __init__(self, ):\n self.actor_id = str(uuid.uuid4())\n\n context = zmq.Context()\n self.messages_socket = context.socket(zmq.SUB)\n self.messages_socket.connect(f'tcp://127.0.0.1:{global_conf.env_read_port}')\n self.messages_socket.subscribe(f'message:{self.actor_id}')\n # messages_socket.subscribe('')\n\n self.return_socket = context.socket(zmq.SUB)\n self.return_socket.connect(f'tcp://127.0.0.1:{global_conf.env_read_port}')\n self.return_socket.subscribe(f'return:{self.actor_id}')\n\n self.write_socket = context.socket(zmq.PUB)\n self.write_socket.connect(f'tcp://127.0.0.1:{global_conf.env_write_port}')\n\n time.sleep(0.2) # ensure connection established\n self.write_socket.send_multipart([\n 'create:{}'.format(self.actor_id).encode('ascii'),\n b'ACK'\n ])\n\n def receive_message(self):\n topic, data = self.messages_socket.recv_multipart()\n data = eval(data)\n return_data = {\n 'return': data.get('return'),\n 'return_env': data.get('return_env')\n }\n return data['name'], data['args'], return_data\n\n def receive_return_value(self):\n topic, result = self.return_socket.recv_multipart()\n return eval(result.decode('ascii'))\n\n def return_result(self, return_data, result):\n self.write_socket.send_multipart([\n 'return:{return}:{return_env}'.format(**return_data).encode('ascii'),\n str(result).encode('ascii')\n ])\n\n def send_message(self, actor_id, env_name, name, args, return_to: str = None):\n self.write_socket.send_multipart([\n 'message:{}:{}'.format(actor_id, env_name).encode('ascii'),\n str({\n 'name': name,\n 'args': args,\n 'return': return_to,\n 'return_env': global_conf.env_name\n }).encode('ascii')\n ])\n\n\ndef send_initial_message(actor_id, name, args):\n context = zmq.Context()\n\n write_socket = context.socket(zmq.PUB)\n write_socket.connect(f'tcp://127.0.0.1:{global_conf.env_write_port}')\n time.sleep(0.2)\n write_socket.send_multipart([\n 'main:{}'.format(actor_id).encode('ascii'),\n b'ACK'\n ])\n write_socket.send_multipart([\n 'message:{}:{}'.format(actor_id, global_conf.env_name).encode('ascii'),\n str({'name': name, 'args': args, 'return': None}).encode('ascii')\n ])\n\n\ndef setup_env_connection(port):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(('127.0.0.1', int(port)))\n s.send(b'init')\n resp = s.recv(1024).decode('ascii')\n\n env_name, read, write = resp.split(':')\n\n global_conf.env_name = env_name\n global_conf.env_read_port = int(read)\n global_conf.env_write_port = int(write)\n print('Connected to env', env_name)\n\n s.send(b'ACK')\n mains = s.recv(1024).decode('ascii')\n s.close()\n\n return eval(mains)\n","sub_path":"evaluation/connector.py","file_name":"connector.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"339684728","text":"#! python3\n#simple old school rpg, improved from earlier versions\nimport shelve, os, random\n\nkilled = 0\nmonster = 'none'\ndamaged = 0\n\nprint('Enter you name:')\nname = str(input())\nprint(' ')\nprint('Hello and welcome to the game ' + name + '!')\nprint('This is a simple old school RPG where you input simple commands to play')\nprint('Type help at any point in time to get help')\nprint(' ')\nprint('Continue?')\ninput()\n\nwhile True: #loading system\n try: #loads savefile\n shelfFile = shelve.open(r'.\\RPG Game Data\\\\' + name + '\\\\' + name) #checks if savefolder exists\n playerhealth = shelfFile['playerhealth'] #checks if save exists\n playerinventory = shelfFile['playerinventory']\n playerstamina = shelfFile['playerstamina']\n playerweapons = shelfFile['playerweapons']\n print(' ')\n break #loading done\n except FileNotFoundError: #no save folder? no problem\n os.makedirs(r'.\\RPG Game Data\\\\' + name)\n except KeyError: #creates new character\n startplayerinventory = {'Gold': { 'Amount': 10}}\n startplayerweapons = {'Rusty Sword': {'Damage': 10, 'Stamina': 30}}\n startplayerhealth = 100\n startplayerstamina = 100\n shelfFile['playerhealth'] = startplayerhealth #saves the info so the loop can continue\n shelfFile['playerinventory'] = startplayerinventory\n shelfFile['playerweapons'] = startplayerweapons\n shelfFile['playerstamina'] = startplayerstamina\n\ndef showinventory(playerinv, playerwep): #show inventory function\n print('Your inventory:')\n for k, v in playerinv.items(): #picks item from playerinventory\n print(str(k) + ': ' + str(v.get('Amount')))\n print(' ')\n print('Your Weapons:')\n for k, v in playerwep.items():\n print(k + ': Damage: ' + str(v.get('Damage')) + ' Stamina: ' + str(v.get('Stamina')))\n\ndef playerattack(playerinv, response, stamina):\n try:\n rattack = random.randint(playerinv[response, response + 20])\n global damaged\n damaged = rattack\n except Exception:\n print('Invalid action, try again')\n\ndef encounters():\n global number\n number = random.randint(1, 4)\n if number == 1:\n print('You encountered a dragon!')\n elif number == 2:\n print('You encountered a troll!')\n elif number == 3:\n print('You encountered a wyvern!')\n elif number == 4:\n print('You encountered a bandit!')\n else:\n print('Error!')\n\ndef monsters():\n print('type stuff here in future')\n\nwhile True:\n encounters()\n action = str(input())\n print(' ')\n if action == 'inventory':\n showinventory(playerinventory, playerweapons)\n elif action in playerinventory.items() == True: \n playerattack(playerinventory, action, playerstamina) #write item, attack\n else:\n print('Invalid action, try again')\n print(' ')\n","sub_path":"advanced stuff/RPG GAME.py","file_name":"RPG GAME.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"582648951","text":"import os\nimport re\nimport collections\nfrom collections import Counter\n\nquantity_words_to_show = 10\n\ndef load_data(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r',encoding = 'utf-8') as file_handler:\n data = file_handler.read()\n return (data)\n\ndef get_most_frequent_words():\n action = load_data(r'filepath')\n words = re.findall(r'\\w+', action)\n resulting_count = collections.Counter(words)\n word_counts = Counter(resulting_count)\n top_ten = word_counts.most_common(quantity_words_to_show)\n return (top_ten)\n\n\n\nif __name__ == '__main__':\n result = get_most_frequent_words()\n print (result)","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"478621262","text":"#==================================================================================\n# \tProvides a simple interface for reading and controlling the\n# GPIO pins on the Raspberry Pi model B, B+ and Raspberry Pi 2 from a Web Browser.\n# \n# \t\t\t\tAuthor: Matt Thomas\n# \t\t\t\t\t2015\n#==================================================================================\nimport sys\nimport RPi.GPIO as GPIO\nGPIO.setmode(GPIO.BOARD)\n\nhelp = sys.argv\n\npin = [3, 5, 7, 8, 10, 11, 12, 13, 15, 16, 18, 19, 21, 22, 23, 24, 26, 29, 31, 32, 33, 35, 36, 37, 38, 40 ]\n\npinIn = help[2]\nmode = help[1]\n\nif int(mode) == 1:\n\tGPIO.setup(pin[int(pinIn)] , GPIO.OUT)\n\tGPIO.output(pin[int(pinIn)] , True)\n\t\n\nif int(mode) == 0:\n\tGPIO.setup(pin[int(pinIn)] , GPIO.OUT)\n\tGPIO.output(pin[int(pinIn)] , False)\n","sub_path":"pinwrite.py","file_name":"pinwrite.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"87959879","text":"#!/usr/bin/python\n\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\ndef hoLeeExample3(inds,t_max=1.0,tau_max=2.0,r0=0.05,sig=0.01,verbose=False):\n return hoLeeExample([[foo[0]]*3 for foo in inds],t_max=t_max,tau_max=tau_max,r0=r0,sig=sig,verbose=verbose)\n\ndef hoLeeExample2(inds,t_max=1.0,tau_max=2.0,r0=0.05,sig=0.01,verbose=False):\n return hoLeeExample([[foo[0],foo[1],foo[1]] for foo in inds],t_max=t_max,tau_max=tau_max,r0=r0,sig=sig,verbose=verbose)\n\ndef hoLeeExample(inds,t_max=1.0,tau_max=2.0,r0=0.05,sig=0.01,verbose=False):\n \n '''\n Compute the Ho Lee Example in Beck-Tempone-Szepessy-Zouraris\n '''\n \n thi = lambda tau: 0.1*(1-np.exp(-1*tau))\n f0 = lambda tau: r0-sig*sig*0.5*tau*tau+thi(tau)\n\n if verbose:\n print('Evaluating the Ho Lee example.')\n print('r0: %f, vol %f , t_max %f , tau_max %f'%(r0,sig,t_max,tau_max))\n print('Evaluating with the following indices:')\n for ind in inds:\n print(ind)\n\n # largest values of the discretisation numbers\n N_t = max([foo[0] for foo in inds])\n N_tau_1 = max([foo[1] for foo in inds])\n N_tau_2 = max([foo[2] for foo in inds])\n\n N_t = 2**(N_t)+1\n N_tau_1 = 2**(N_tau_1)+1\n N_tau_2 = 2**(N_tau_2)+1\n\n if verbose:\n print('Meshes constructed.')\n print('The number of mesh points in time: %d'%(N_t))\n print('Mesh points in maturity: %d before t_max, %d after'%(N_tau_2,N_tau_1))\n\n times = np.linspace(0,t_max,N_t)\n taus_1 = np.linspace(0,t_max,N_tau_2)\n taus_2 = np.linspace(t_max,tau_max,N_tau_1)\n\n taus = np.concatenate((taus_1[:-1],taus_2))\n\n # initial values\n\n dt = times[1]-times[0]\n Ws = np.concatenate((np.zeros(1),np.sqrt(dt)*np.cumsum(sp.randn(N_t-1))))\n if verbose:\n plt.figure()\n plt.plot(times,Ws)\n plt.xlabel('$t$')\n plt.ylabel('$W_t$')\n plt.grid(1)\n\n rv = []\n \n for ind in inds:\n if verbose:\n print('Evaluating the following index:')\n print(ind)\n t_jump = 2**(max([foo[0] for foo in inds])-ind[0])\n tau_jump_1 = 2**(max([foo[1] for foo in inds])-ind[1])\n tau_jump_2 = 2**(max([foo[2] for foo in inds])-ind[2])\n if verbose:\n print('Jumps in each of the categories: %d , %d , %d'%(t_jump,tau_jump_1,tau_jump_2))\n tau_eff = np.concatenate((taus_1[0:-1:tau_jump_2],taus_2[0::tau_jump_1]))\n t_eff = times[::t_jump]\n f_eff = np.zeros((len(t_eff),len(tau_eff)+2))\n W_eff = Ws[0::t_jump]\n dt_eff = t_eff[1]-t_eff[0]\n if verbose:\n plt.figure()\n plt.plot(tau_eff,f_eff[0,:-2]+f0(tau_eff),'r-')\n # Time stepping\n lstar = 0\n for j in range(1,len(f_eff)):\n if verbose:\n print('Time step No %d, t=%.4f. tau_n=%.4f'%(j,t_eff[j],tau_eff[lstar]))\n #print('Time step No %d , t=%f'%(j,t_eff[j]))\n f_eff[j,lstar:] = 1*f_eff[j-1,lstar:]\n f_eff[j,lstar:-2] += sig*sig*(tau_eff[lstar:]-t_eff[j-1])*dt_eff\n f_eff[j,lstar:-2] += sig*(W_eff[j]-W_eff[j-1])\n if verbose:\n plt.plot(tau_eff[lstar:],f_eff[j,lstar:-2]+f0(tau_eff[lstar:]),'b-')\n f_eff[j,-2] += f_eff[j-1,lstar]\n while tau_eff[lstar+1]<= t_eff[j]:\n lstar += 1\n f_eff[j,-2] = (f_eff[j-1,lstar]+f0(times[j-1]))*dt_eff\n # the last component unchanged\n if verbose:\n plt.plot(tau_eff[lstar:],f_eff[-1,lstar:-2]+f0(tau_eff[lstar:]),'r--')\n plt.plot(tau_eff[lstar:],r0-0.5*sig*sig*(tau_eff[lstar:]-t_max)**2+thi(tau_eff[lstar:]),'k-.')\n # plot the short rate\n lstar = 0\n tPlot = 1*t_eff\n fttPlot = 0*t_eff\n for j in range(0,len(f_eff)):\n fttPlot[j] = f_eff[j,lstar]\n while tau_eff[lstar+1]<= t_eff[j]:\n lstar += 1\n plt.plot(tPlot,fttPlot+f0(tPlot),'r-')\n plt.xlabel('$\\\\tau$')\n plt.ylabel('$f(t,\\\\tau)$')\n plt.grid(1)\n\n rv.append(1.0-f_eff[-1,-2])\n if verbose:\n print('The discount term equals %f'%(rv[-1]))\n tv = 0.0\n lstar = 0\n while tau_eff[lstar+1]<= t_max:\n lstar += 1\n if verbose:\n print('The underlying term equals %f'%(np.sum(f_eff[-1,lstar:-3])*(tau_eff[-1]-tau_eff[-2])))\n #print('dtau term %f'%((tau_eff[-1]-tau_eff[-2])))\n #print('average forward curve %f'%(np.mean(f_eff[-1,lstar:-3])))\n rv[-1] *= np.sum(f_eff[-1,lstar:-3])*(tau_eff[-1]-tau_eff[-2])\n if verbose:\n print('The quantity of interest is %f'%(rv[-1]))\n \n return rv\n \n","sub_path":"tests/HJM/HJM.py","file_name":"HJM.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"285136289","text":"#!/usr/bin/env python\n# coding: utf8\n#\n# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).\n#\n# This file is part of PANDORA\n#\n# https://github.com/CNES/Pandora_pandora\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\"\"\"\nThis module contains functions allowing to save the results and the configuration of Pandora pipeline.\n\"\"\"\n\nimport json\nimport errno\nimport os\nimport rasterio\nimport xarray as xr\nfrom typing import Dict\n\nfrom .output_tree_design import get_out_dir, get_out_file_path\n\n\ndef write_data_array(data_array: xr.DataArray, filename: str,\n dtype: rasterio.dtypes = rasterio.dtypes.float32) -> None:\n \"\"\"\n Write a xarray.DataArray in a tiff file\n\n :param data_array: data\n :type data_array: 2D xarray.DataArray (row, col) or 3D xarray.DataArray (row, col, indicator)\n :param filename: output filename\n :type filename: string\n :param dtype: band types\n :type dtype: GDALDataType\n \"\"\"\n if len(data_array.shape) == 2:\n row, col = data_array.shape\n with rasterio.open(filename, mode='w+', driver='GTiff', width=col, height=row, count=1,\n dtype=dtype) as source_ds:\n source_ds.write(data_array.data, 1)\n\n else:\n row, col, depth = data_array.shape\n with rasterio.open(filename, mode='w+', driver='GTiff', width=col, height=row, count=depth, dtype=dtype) as source_ds:\n for d in range(1, depth + 1):\n source_ds.write(data_array.data[:, :, d-1], d)\n\n\ndef mkdir_p(path: str) -> None:\n \"\"\"\n Create a directory without complaining if it already exists.\n \"\"\"\n try:\n os.makedirs(path)\n except OSError as exc: # requires Python > 2.5\n if exc.errno == errno.EEXIST and os.path.isdir(path):\n pass\n else:\n raise\n\n\ndef save_results(ref: xr.Dataset, sec: xr.Dataset, output: str) -> None:\n \"\"\"\n Save results in the output directory\n\n :param ref: reference dataset, which contains the variables :\n - disparity_map : the disparity map in the geometry of the reference image 2D DataArray (row, col)\n - confidence_measure : the confidence measure in the geometry of the reference image 3D DataArray (row, col, indicator)\n - validity_mask : the validity mask in the geometry of the reference image 2D DataArray (row, col)\n :type ref: xr.Dataset\n :param sec: secondary dataset. If there is no validation step, the secondary Dataset will be empty.\n If a validation step is configured, the dataset will contain the variables :\n - disparity_map : the disparity map in the geometry of the secondary image 2D DataArray (row, col)\n - confidence_measure : the confidence in the geometry of the secondary image 3D DataArray (row, col, indicator)\n - validity_mask : the validity mask in the geometry of the reference image 2D DataArray (row, col)\n :type sec: xr.Dataset\n :param output: output directory\n :type output: string\n \"\"\"\n # Create the output dir\n mkdir_p(output)\n\n # Save the reference results\n write_data_array(ref['disparity_map'], os.path.join(output, get_out_file_path('ref_disparity.tif')))\n write_data_array(ref['confidence_measure'], os.path.join(output, get_out_file_path('ref_confidence_measure.tif')))\n write_data_array(ref['validity_mask'], os.path.join(output, get_out_file_path('ref_validity_mask.tif')),\n dtype=rasterio.dtypes.uint16)\n\n # If a validation step is configured, save the secondary results\n if len(sec.sizes) != 0:\n write_data_array(sec['disparity_map'], os.path.join(output, get_out_file_path('sec_disparity.tif')))\n write_data_array(sec['confidence_measure'], os.path.join(output, get_out_file_path('sec_confidence_measure.tif')))\n write_data_array(sec['validity_mask'], os.path.join(output, get_out_file_path('sec_validity_mask.tif')),\n dtype=rasterio.dtypes.uint16)\n\n\ndef save_config(output: str, user_cfg: Dict) -> None:\n \"\"\"\n Save the user configuration in json file\n\n :param output: Path to output directory\n :type output: string\n :param user_cfg: user configuration\n :type user_cfg: dict\n \"\"\"\n \n # Create the output dir\n mkdir_p(os.path.join(output, get_out_dir('config.json')))\n\n # Save user configuration in json file\n with open(os.path.join(output, get_out_file_path('config.json')), 'w') as f:\n json.dump(user_cfg, f, indent=2)\n","sub_path":"pandora/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"336620848","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on 2018.03.09\nFinished on 2018.04.13\n@author: Wang Yuntao\n\"\"\"\n\nimport re\nimport os\nimport time\nimport json\nimport utils\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\n\"\"\"\n function:\n __init__(self, _user_name=None, _password=None, _browser_type=\"Chrome\", \n is_headless=False) __init__\n sign_in(self) Facebook登录\n make_post(self) 发布状态\n page_refresh(self, _refresh_times=0) 页面下拉刷新\n get_myself_info(self) 获取当前登录账��的信息 user_name, user_id, homepage_url\n enter_homepage_self(self) 进入当前账户的个人主页 (方便对用户好友和照片的获取)\n get_user_id(self, _user_homepage_url) 获取用户id\n get_friends_number(self) 获取当前账户的好友个数\n get_friends_list(self, _friends_number=None) 获取当前账户的好友列表 (列表存储各好友的user_name, user_id, homepage_url)\n search_users(self, _keyword, user_number) 获取当前搜索条件下的用户列表 (列表存储各用户的user_name, homepage_url, location, user_id)\n \n get_photos_list(self) 获取照片的href,方便对原图的链接,发表时间等进行获取\n get_photo_info(self, _photo_href) 获取照片的链接,发布时间,发布位置,尺寸与对应的文字说明\n get_photos_info_list(self, _photos_href_list) 批量获取照片的链接,发布时间,发布位置,尺寸与对应的文字说明\n download_photos_one(self, _homepage_url) 下载单个用户的图片\n download_photos_batch(self, _homepage_url_list) 批量下载多个用户的图片\n params_modify(self, post_class_name, \n bottom_xpath_search, bottom_xpath_other, \n main_container_class_name, \n myself_id_class_name) 用于对可变参数进行修改\n \n Note:\n 实际使用中还需要根据Facebook当前的页面架构进行相应调整\n\"\"\"\n\n\nclass Facebook:\n def __init__(self, _email=None, _password=None, _browser_type=\"Chrome\", _is_headless=False, _speed_mode=\"Normal\"):\n \"\"\"\n 构造函数\n :param _email: Facebook登录所需邮箱\n :param _password: Facebook登录对应的密码\n :param _browser_type: 浏览器类型 (Chrome | Firefox)\n :param _is_headless: 是否适用无头浏览器\n :param _speed_mode: 运行速度模式选择 (Extreme | Fast | Normal | Slow)\n Return:\n browser_state:\n 0 - init fail\n 1 - init success\n \"\"\"\n # the variables which are fixed\n self.url = \"https://www.facebook.com/\" # facebook页面url\n self.email = _email # 帐户邮箱\n self.password = _password # 账户密码\n self.soup_type = \"html.parser\" # beautifulsoup解析类型\n\n # some identifier\n self.browser_state = None # 浏览器选择状态\n self.login_state = None # 登录状态\n\n # the variable about the current login account\n self.user_name = None # 当前登录账号的用户昵称\n self.user_id = None # 当前登录账号的用户ID\n self.homepage_url = None # 当前登录账号的主页url\n self.friends_number = 0 # 当前登录账号的好友数量\n\n # some parameters of webdriver\n self.cookie = None # 当前登录账号的cookie\n self.session_id = None # 会话id,方便在当前打开窗口继续运行\n self.executor_url = None # 会话的命令执行器连接\n self.cookies_path = \"json_(\" + _email + \").json\" # 用于保存用户cookies的文件\n\n # the initialization of list\n self.user_info_friends = list() # 好友信息列表 (user_name, user_id, homepage_url)\n self.user_info_search = list() # 通过搜索得到的用户信息列表 (user_name, homepage_url)\n\n # the variables which are static\n self.clearfix_flag = \"clearfix\" # 网页消除浮动标识\n self.user_cover_class_name = \"cover\" # 用户封面对应的class name\n self.bottom_class_name = \"uiHeaderTitle\" # 用于确定图片、视频下载时有无下拉到最底的class name\n self.bottom_xpath_search = \\\n \"//*[@id=\\\"browse_end_of_results_footer\\\"]/div/div\" # 用户搜索时对应的bottom标识\n self.bottom_xpath_other = \\\n \"//*[@id=\\\"timeline-medley\\\"]/div/div[2]/div[1]/div/div\" # 照片好友信息遍历时的bottom标识\n self.full_screen_id = \"fbPhotoSnowliftFullScreenSwitch\" # 全屏操作对应的id\n self.main_container_class_name = \"homeSideNav\" # 用户获取当前登录账户信息的class name\n self.myself_id_class_name = \"data-nav-item-id\" # 用户id对应的字段名\n self.friends_list_class_name = \"uiProfileBlockContent\"\n self.friends_number_id_name = \"pagelet_timeline_medley_friends\" # 用于获取好友数量的id name\n self.homepage_url_postfix_1 = \"?fref=pb&hc_location=friends_tab\" # 一类URL的后缀\n self.homepage_url_postfix_2 = \"&fref=pb&hc_location=friends_tab\" # 二类URL的后缀\n self.browse_results_container = \"//*[@id=\\\"BrowseResultsContainer\\\"]/div[1]\"\n\n # the variables which may be variant regularly\n self.post_class_name = \"_3jk\" # 状态发布所需class name\n\n # 用户搜索所需class name\n self.user_search_class_name = None\n self.user_name_class_name = None\n\n # the selection of browser\n if _browser_type == \"Chrome\":\n try:\n options = webdriver.ChromeOptions()\n if _is_headless is True:\n options.set_headless()\n options.add_argument(\"--disable - gpu\")\n else:\n self.driver = webdriver.Chrome(options=options)\n self.browser_state = 1\n except AttributeError:\n self.browser_state = 0\n\n if _browser_type == \"Firefox\":\n try:\n options = webdriver.FirefoxOptions()\n if _is_headless is True:\n options.set_headless()\n options.add_argument(\"--disable - gpu\")\n else:\n self.driver = webdriver.Firefox(options=options)\n self.browser_state = 1\n except AttributeError:\n self.browser_state = 0\n\n # the run speed mode selection\n self.timeout = utils.get_timeout(_speed_mode)\n\n def params_modify(self, cookies_path, post_class_name, bottom_xpath_search, bottom_xpath_other, main_container_class_name,\n myself_id_class_name):\n self.cookies_path = cookies_path\n self.post_class_name = post_class_name\n self.bottom_xpath_search = bottom_xpath_search\n self.bottom_xpath_other = bottom_xpath_other\n self.main_container_class_name = main_container_class_name\n self.myself_id_class_name = myself_id_class_name\n\n def login_with_account(self):\n \"\"\"\n facebook login with username and password\n :return: a status code —— True: Success, False: False\n Note:\n 如果facebook账号登录成功,则当前页面的url为:https://www.facebook.com\n 如果facebook账号登录失败,则当前页面的url为:https://www.facebook.com/login.php?login_attempt=1&lwv=100\n \"\"\"\n self.driver.get(self.url)\n try:\n # username\n email_element = WebDriverWait(self.driver, timeout=5).until(\n EC.presence_of_element_located((By.ID, \"email\")))\n email_element.clear()\n email_element.send_keys(self.user_name)\n time.sleep(1)\n\n # password\n password_element = WebDriverWait(self.driver, timeout=5).until(\n EC.presence_of_element_located((By.ID, \"pass\")))\n password_element.clear()\n password_element.send_keys(self.password)\n time.sleep(1)\n\n # click\n login_element = WebDriverWait(self.driver, timeout=5).until(\n EC.presence_of_element_located((By.ID, \"loginbutton\")))\n login_element.click()\n except:\n pass\n\n def login_with_cookies(self):\n \"\"\"\n facebook login with cookies\n :return: a status code —— True: Success, False: False\n Note:\n 如果facebook账号登录成功,则当前页面的url为:https://www.facebook.com\n 如果facebook账号登录失败,则当前页面的url为:https://www.facebook.com/login.php?login_attempt=1&lwv=100\n \"\"\"\n if os.path.exists(self.cookies_path):\n with open(self.cookies_path, 'r', encoding='utf-8') as file:\n list_cookies = json.loads(file.read())\n if len(list_cookies) != 0:\n self.driver.get(self.url)\n for cookie in list_cookies:\n try:\n self.driver.add_cookie({\n \"domain\": cookie[\"domain\"],\n \"name\": cookie[\"name\"],\n \"value\": cookie[\"value\"],\n \"path\": cookie[\"path\"],\n \"expiry\": cookie[\"expiry\"]\n })\n except KeyError:\n pass\n\n self.driver.get(self.url)\n\n def sign_in(self):\n \"\"\"\n facebook login via webdriver, cookies login first, if no cookies, login with account and save the cookies\n :return: a status code —— True: Success, False: False\n Note:\n 如果facebook账号登录成功,则当前页面的url为:https://www.facebook.com\n 如果facebook账号登录失败,则当前页面的url为:https://www.facebook.com/login.php?login_attempt=1&lwv=100\n \"\"\"\n try:\n self.login_with_cookies()\n except:\n self.login_with_account()\n\n # status judgement\n current_page_url = self.driver.current_url\n if current_page_url != self.url:\n self.login_state = 0\n else:\n self.login_state = 1\n self.save_cookie()\n\n def save_cookie(self):\n # 获取cookie并通过json模块将dict转化成str\n dict_cookies = self.driver.get_cookies()\n json_cookies = json.dumps(dict_cookies)\n # 登录完成后,将cookie保存到本地文件\n if os.path.exists(self.cookies_path):\n pass\n else:\n with open(self.cookies_path, \"w\") as file:\n file.write(json_cookies)\n\n def make_post(self):\n current_url = self.driver.current_url\n if current_url != self.url:\n self.enter_homepage_self()\n else:\n pass\n post_element = self.driver.find_element_by_class_name(self.post_class_name)\n post_element.click()\n\n def page_refresh_to_bottom(self, item, timeout=3, poll_frequency=0.5):\n \"\"\"\n 页面刷新\n :param item: 下拉页类型,分为用户搜索和照片搜索两类\n :param timeout: 模拟下拉的时间延迟\n :param poll_frequency: 模拟下拉的时间频率\n :return: NULL\n \"\"\"\n if item == \"users\":\n xpath = self.bottom_xpath_search\n else:\n xpath = self.bottom_xpath_other\n\n while True:\n try:\n WebDriverWait(self.driver, timeout=timeout, poll_frequency=poll_frequency).until(\n EC.presence_of_element_located((By.XPATH, xpath)))\n break\n except:\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n\n def page_refresh(self, _refresh_times=0):\n \"\"\"\n 页面刷新\n :param _refresh_times: 刷新次数\n :return: NULL\n \"\"\"\n for i in range(_refresh_times):\n self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')\n try:\n bottom_element = WebDriverWait(self.driver, timeout=3).until(\n EC.presence_of_element_located((By.XPATH, self.bottom_xpath_search)))\n except:\n try:\n bottom_element = WebDriverWait(self.driver, timeout=3).until(\n EC.presence_of_element_located((By.XPATH, self.bottom_xpath_other)))\n except:\n bottom_element = None\n\n if bottom_element is not None:\n break\n\n def get_myself_info(self):\n \"\"\"\n 获取当前登录账户的信息\n :return:\n user_name: 用户名\n user_id: 用户id\n homepage_url: 用户主页\n \"\"\"\n self.get(self.url)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n main_container = soup.find(class_=self.main_container_class_name)\n id_class = main_container.li\n user_id = id_class.get(self.myself_id_class_name)\n user_info_class = main_container.find_all(\"a\")\n user_name = user_info_class[1].get(\"title\")\n homepage_url = user_info_class[1].get(\"href\")\n homepage_url = homepage_url.split(\"?\")[0]\n\n self.user_name, self.user_id, self.homepage_url = user_name, user_id, homepage_url\n\n def enter_homepage_self(self):\n \"\"\"\n 进入个人主页,facebook登录后页面仍停留在https://www.facebook.com,需要进一步跳转到个人主页,获取到主页url,\n 方便对好友列表,照片的获取\n :return:\n \"\"\"\n if self.user_id is None:\n self.get_myself_info()\n\n self.get(self.homepage_url)\n\n def get_user_id(self, user_homepage_url):\n \"\"\"\n 根据用户的主页url获取其user id\n :param user_homepage_url: 用户的主页url\n :return: user id\n \"\"\"\n if utils.url_type_judge(user_homepage_url) == 1:\n self.driver.get(user_homepage_url)\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n cover = soup.find(class_=self.user_cover_class_name)\n user_id = cover.a.get(\"data-referrerid\")\n else:\n user_id = user_homepage_url.split(\"id=\")[-1]\n\n return user_id\n\n def get_friends_number(self):\n \"\"\"\n 获取当前登录账户的好友数量\n :return:\n self.friends_number: 当前登录账户的好友数量\n \"\"\"\n friends_page_url = utils.get_jump_url(self.homepage_url, \"friends\")\n self.get(friends_page_url)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n friends_table = self.driver.find_element_by_id(self.friends_number_id_name)\n friends_table_class_name = friends_table.get_attribute(\"class\")\n\n block = soup.find(class_=friends_table_class_name)\n content = block.find_all(\"div\")\n content_text = content[5].a.text\n pattern = re.compile(r\"\\d+\\.?\\d*\")\n\n self.friends_number = int(pattern.findall(content_text)[0])\n\n def get_friends_list(self, _friends_number=None):\n \"\"\"\n 获取当前登录账户的好友列表\n :param _friends_number: 待检索的好友数量\n :return:\n self.user_info_friends: 好友用户信息 [user_name, user_id, homepage_url]\n \"\"\"\n if len(self.user_info_friends) == 0:\n self.get_friends_number()\n if _friends_number is None or _friends_number > self.friends_number:\n self.page_refresh_to_bottom(\"friends\")\n else:\n refresh_times = _friends_number // 20\n self.page_refresh(refresh_times)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n # 获取好友url列表\n contents = soup.find_all(class_=self.friends_list_class_name)\n for content in contents:\n homepage_url = content.a.get(\"href\")\n if utils.url_type_judge(homepage_url) == 1:\n homepage_url = homepage_url.replace(self.homepage_url_postfix_1, \"\")\n if utils.url_type_judge(homepage_url) == 2:\n homepage_url = homepage_url.replace(self.homepage_url_postfix_2, \"\")\n user_name = content.a.text\n pattern = re.compile(r\"id=\\d+\")\n user_id = pattern.findall(content.a.get(\"data-hovercard\"))[0].split(\"id=\")[-1]\n\n self.user_info_friends.append([user_name, user_id, homepage_url])\n else:\n pass\n\n def get_user_info(self, item):\n data_be_str = item.div.get(\"data-bt\")\n user_id = utils.str2dict(data_be_str)[\"id\"]\n\n # 获取user homepage url\n user_info = item.find(class_=self.clearfix_flag)\n user_homepage_url = user_info.a.get(\"href\")\n\n user_name_block = user_info.div.find(class_=self.clearfix_flag).find_all(\"div\")\n # user_name_class_name = user_name_block[-1].a.get(\"class\")[0]\n user_name = user_name_block[-1].a.text\n\n about_items = user_info.find_all(\"div\")\n about_class = about_items[11].find_all(\"div\")\n\n try:\n about = about_class[5].text\n except:\n about = None\n\n return [user_name, user_id, user_homepage_url, about]\n\n def get_class_name_for_search(self):\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n element = self.driver.find_element_by_xpath(self.browse_results_container)\n user_search_class_name = element.get_attribute(\"class\")\n item = soup.find(class_=user_search_class_name)\n user_info = item.find(class_=self.clearfix_flag)\n user_name_block = user_info.div.find(class_=self.clearfix_flag).find_all(\"div\")\n user_name_class_name = user_name_block[-1].a.get(\"class\")[0]\n\n self.user_search_class_name = user_search_class_name\n self.user_name_class_name = user_name_class_name\n\n def search_users(self, _keyword=\"wahaha\", user_number=None):\n \"\"\"\n 根据关键字进行用户搜索\n :param _keyword: 待检索关键字\n :param user_number: 需要检索的用户数量\n :return:\n self.user_info_search: 用户信息列表 [user_name, user_id, location, homepage_url]\n \"\"\"\n user_info_search = list()\n search_url = \"https://www.facebook.com/search/str/\" + _keyword + \"/keywords_users\"\n self.get(search_url)\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n empty_flag = soup.find(id=\"empty_result_error\")\n if empty_flag is None:\n # 页面刷新\n if user_number is None:\n self.page_refresh_to_bottom(\"users\")\n else:\n refresh_times = user_number // 5\n self.page_refresh(refresh_times)\n\n # 页面解析\n page_source = self.driver.page_source\n soup = BeautifulSoup(page_source, self.soup_type)\n\n if self.user_search_class_name is None:\n self.get_class_name_for_search()\n\n items = soup.find_all(class_=self.user_search_class_name)\n\n # 列表填充\n if user_number is None:\n for item in items:\n user_info_search.append(self.get_user_info(item))\n else:\n index = 0\n while index < user_number:\n user_info_search.append(self.get_user_info(items[index]))\n index += 1\n else:\n pass\n\n return user_info_search\n\n def get_photos_href_list(self, _homepage_url):\n \"\"\"\n 获取照片\n :param _homepage_url: 待访问的用户主页链接\n :return:\n photos_href_list: 图像链接列表\n \"\"\"\n photos_url = utils.get_jump_url(_homepage_url, \"photos\")\n self.get(photos_url)\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n try:\n bottom_element = self.driver.find_element_by_xpath(self.bottom_xpath_other)\n except:\n bottom_element = None\n\n photos_href_list = list()\n while bottom_element is None:\n self.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n try:\n bottom_element = self.driver.find_element_by_xpath(self.bottom_xpath_other)\n except:\n bottom_element = None\n\n if bottom_element is not None:\n break\n\n for data in soup.find_all(class_=\"uiMediaThumb\"):\n photos_href_list.append(data.get(\"href\"))\n\n return photos_href_list\n\n def get_photo_info(self, _photo_href):\n \"\"\"\n 根据图像的链接对其信息进行获取\n :param _photo_href: 图像链接\n :return:\n link: 原始图像对应的链接\n date: 图像发布对应的时间\n location: 图像发布对应的位置\n text: 图像发布对应的文本内容\n width: 图像的实际宽度\n height: 图像的实际高度\n \"\"\"\n self.get(_photo_href)\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n\n date = self.get_photo_publish_date(soup)\n location = self.get_photo_publish_location(soup)\n text = self.get_photo_publish_text(soup)\n\n full_screen_element = WebDriverWait(self.driver, 5).until(\n EC.presence_of_element_located((By.ID, self.full_screen_id)))\n full_screen_element.click()\n page = self.driver.page_source\n soup = BeautifulSoup(page, self.soup_type)\n\n link = self.get_photo_link(soup)\n width, height = self.get_photo_size(soup)\n\n return link, date, location, text, width, height\n\n def get_photos_info_list(self, _photos_href_list):\n photos_info_list = list()\n for photo_href in _photos_href_list:\n link, date, location, text, width, height = self.get_photo_info(photo_href)\n photos_info_list.append([link, date, location, text, width, height])\n\n return photos_info_list\n\n def download_photos_one(self, homepage_url, folder_name=\"./\",\n start_date=None, end_date=None, keyword=\"\",\n width_left=0, width_right=5000, height_left=0, height_right=5000):\n \"\"\"\n 单个用户的照片下载\n :param homepage_url: 用户主页\n :param folder_name: 待保存文件夹路径\n\n 以下为筛选条件\n :param start_date: 待下载图片的起始日期 (default: None)\n :param end_date: 待下载图片的终止日期 (default: None)\n :param keyword: 待下载图片对应的文字中包含的关键字 (default: \"\")\n :param width_left: 图片宽度下界 (default: 0)\n :param width_right: 图片宽度上界 (default: 5000)\n :param height_left: 图片高度下界 (default: 0)\n :param height_right: 图片高度上界 (default: 5000)\n :return: NULL\n Note:\n photo info:\n link, date, location, text, width, height\n \"\"\"\n utils.folder_make(folder_name)\n photos_href_list = self.get_photos_href_list(homepage_url)\n photos_info_list = self.get_photos_info_list(photos_href_list)\n\n if start_date is None and end_date is None:\n for photo_info in photos_info_list:\n utils.download_photos(photo_info[0], folder_name)\n else:\n start_date_unix = utils.get_unix_stamp(start_date)\n end_date_unix = utils.get_unix_stamp(end_date)\n for photo_info in photos_info_list:\n unix_time = photo_info[1]\n if start_date_unix < unix_time < end_date_unix \\\n and keyword in photo_info[3]:\n if width_left < photo_info[4] < width_right and height_left < photo_info[5] < height_right:\n utils.download_photos(photo_info[0], folder_name)\n else:\n pass\n else:\n pass\n\n def download_photos_batch(self, user_info_list,\n start_date=None, end_date=None, keyword=\"\",\n width_left=0, width_right=5000, height_left=0, height_right=5000):\n \"\"\"\n 多个用户照片下载\n :param user_info_list: 用户信息列表\n user_name, user_id, user_homepage_url, about\n 以下为筛选条件\n :param start_date: 待下载图片的起始日期 (default: None)\n :param end_date: 待下载图片的终止日期 (default: None)\n :param keyword: 待下载图片对应的文字中包含的关键字 (default: \"\")\n :param width_left: 图片宽度下界 (default: 0)\n :param width_right: 图片宽度上界 (default: 5000)\n :param height_left: 图片高度下界 (default: 0)\n :param height_right: 图片高度上界 (default: 5000)\n :return: NULL\n \"\"\"\n for user_info in user_info_list:\n folder_name = user_info[1]\n homepage_url = user_info[2]\n self.download_photos_one(homepage_url, folder_name=folder_name,\n start_date=start_date, end_date=end_date, keyword=keyword,\n width_left=width_left, width_right=width_right,\n height_left=height_left, height_right=height_right)\n print(\"Download completed.\")\n\n def get(self, url):\n \"\"\"\n 页面跳转,为避免多余跳转,先对当前页面的url进行判断,若url相同则不再跳转\n :param url: 待跳转的url\n :return: NULL\n \"\"\"\n current_url = self.driver.current_url\n if url == current_url:\n pass\n else:\n self.driver.get(url)\n\n @staticmethod\n def get_photo_link(soup):\n spotlight = soup.find(class_=\"spotlight\")\n _link = spotlight.get(\"src\") # 图片链接\n\n return _link\n\n @staticmethod\n def get_photo_size(soup):\n spotlight = soup.find(class_=\"spotlight\")\n style = spotlight.get(\"style\") # 图片尺寸字符串\n _width, _height = utils.get_size(style) # 获取图像的宽和高\n\n return _width, _height\n\n @staticmethod\n def get_photo_publish_date(soup):\n publish_time = soup.find(\"span\", {\"id\": \"fbPhotoSnowliftTimestamp\"})\n if publish_time is None:\n _date = None\n else:\n _date = publish_time.a.abbr.get(\"data-utime\") # 图片发表的时间 (Unix时间戳)\n\n return _date\n\n @staticmethod\n def get_photo_publish_location(soup):\n location_object = soup.find(class_=\"fbPhotosImplicitLocLink\") # 图片发表的位置信息\n if location_object is None:\n _location = None\n else:\n _location = location_object.text\n\n return _location\n\n @staticmethod\n def get_photo_publish_text(soup):\n text_object = soup.find(\"span\", {\"class\": \"hasCaption\"}) # 图片发表时对应的文字说明\n if text_object is None:\n _text = []\n else:\n _text = text_object.text\n\n return _text\n\n\nif __name__ == \"__main__\":\n email, password = utils.get_account(\"account.csv\", 0)\n fb = Facebook(email, password, \"Chrome\", False)\n if fb.browser_state == 1:\n fb.sign_in()\n fb.enter_homepage_self()\n fb.make_post()\n cookies = fb.cookies\n\n else:\n print(\"Initialization failed.\")\n","sub_path":"facebook.py","file_name":"facebook.py","file_ext":"py","file_size_in_byte":29527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"433427911","text":"\"\"\"\nAdapted from: https://github.com/udacity/deep-reinforcement-learning/blob/master/dqn/solution/model.py\n\nThe code was modified to add one more hidden layer as suggested by the paper: \nBudget Constrained Bidding by Model-free Reinforcement Learning in Display Advertising\n(https://arxiv.org/pdf/1802.08365.pdf)\n\"\"\"\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport os\nimport random\n\n\nclass Network(nn.Module):\n \"\"\"Actor (Policy) Model.\"\"\"\n\n def __init__(self, state_size, action_size, fc1_units=100, \n fc2_units=100, fc3_units=100):\n \"\"\"Initialize parameters and build model.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n seed (int): Random seed\n fc1_units (int): Number of nodes in first hidden layer\n fc2_units (int): Number of nodes in second hidden layer\n \"\"\"\n super(Network, self).__init__()\n set_seed()\n self.fc1 = nn.Linear(state_size, fc1_units)\n self.fc2 = nn.Linear(fc1_units, fc2_units)\n self.fc3 = nn.Linear(fc2_units, fc3_units)\n self.fc4 = nn.Linear(fc3_units, action_size)\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> action values.\"\"\"\n x = F.relu(self.fc1(state))\n x = F.relu(self.fc2(x))\n x = F.relu(self.fc3(x))\n return self.fc4(x)\n\ndef set_seed():\n os.environ['PYTHONHASHSEED'] = str(0)\n random.seed(0)\n np.random.seed(0)\n torch.manual_seed(0)\n torch.cuda.manual_seed_all(0)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n","sub_path":"src/rtb_agent/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"301988961","text":"# -*- coding: utf-8 -*-\nimport pywinauto\nfrom abc import ABCMeta, abstractmethod\n\nclass VoiroAuto_Meta(metaclass=ABCMeta):\n @abstractmethod\n def talk(self, text):\n raise NotImplemented()\n\nclass Voiro_Pywinauto(VoiroAuto_Meta):\n def search_child_byclassname(self, class_name, uiaElementInfo, target_all = False):\n target = []\n # 全ての子要素検索\n for childElement in uiaElementInfo.children():\n # ClassNameの一致確認\n if childElement.class_name == class_name:\n if target_all == False:\n return childElement\n else:\n target.append(childElement)\n if target_all == False:\n # 無かったらFalse\n return False\n else:\n return target\n\n\n def search_child_byname(self, name, uiaElementInfo):\n # 全ての子要素検索\n for childElement in uiaElementInfo.children():\n # Nameの一致確認\n if childElement.name == name:\n return childElement\n # 無かったらFalse\n return False\n\n def talk(self, speakPhrase):\n # デスクトップのエレメント\n parentUIAElement = pywinauto.uia_element_info.UIAElementInfo()\n # voiceroidを捜索する\n voiceroid2 = self.search_child_byname(\"VOICEROID2\",parentUIAElement)\n # *がついている場合\n if voiceroid2 == False:\n voiceroid2 = self.search_child_byname(\"VOICEROID2*\",parentUIAElement)\n\n # テキスト要素のElementInfoを取得\n TextEditViewEle = self.search_child_byclassname(\"TextEditView\",voiceroid2)\n textBoxEle = self.search_child_byclassname(\"TextBox\",TextEditViewEle)\n\n # コントロール取得\n textBoxEditControl = pywinauto.controls.uia_controls.EditWrapper(textBoxEle)\n\n # テキスト登録\n textBoxEditControl.set_edit_text(speakPhrase)\n\n\n # ボタン取得\n buttonsEle = self.search_child_byclassname(\"Button\",TextEditViewEle,target_all = True)\n # 再生ボタンを探す\n playButtonEle = \"\"\n for buttonEle in buttonsEle:\n # テキストブロックを捜索\n textBlockEle = self.search_child_byclassname(\"TextBlock\",buttonEle)\n if textBlockEle.name == \"再生\":\n playButtonEle = buttonEle\n break\n\n # ボタンコントロール取得\n playButtonControl = pywinauto.controls.uia_controls.ButtonWrapper(playButtonEle)\n\n # 再生ボタン押下\n playButtonControl.click()\n\n\n\n #----- In development functions ------------------\n def look_children(self, obj):\n for child in obj.children():\n print(child.class_name)\n #print(child.handle(),end=\" \")\n #print(child.automation_id())\n\n\n def get_voiro2(self):\n # デスクトップのエレメント\n parentUIAElement = pywinauto.uia_element_info.UIAElementInfo()\n # voiceroidを捜索する\n voiceroid2 = self.search_child_byname(\"VOICEROID2\",parentUIAElement)\n # *がついている場合\n if voiceroid2 == False:\n voiceroid2 = self.search_child_byname(\"VOICEROID2*\",parentUIAElement)\n return voiceroid2\n\n def get_tuning_tab(self, tabName, voiro2=None):\n if voiro2==None:\n voiro2 = self.get_voiro2()\n tuning_tab = self.search_child_byclassname(\"TabControl\", voiro2, target_all=True)[1]\n target = self.search_child_byname(tabName, tuning_tab)\n return target\n\nif __name__==\"__main__\":\n pass\n","sub_path":"voiro_auto.py","file_name":"voiro_auto.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"217473609","text":"# -*- coding:utf-8 -*-\r\n\r\n__author__ = 'ken'\r\n\r\nimport time\r\n\r\ncurrent_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime()) # 获取当前的时间戳\r\n\r\n\"\"\"定义测试需要用到的数据\"\"\"\r\nparm = {\r\n 'platformName': 'iOS',\r\n 'paltformVersion': '11.2.2',\r\n 'deviceName': 'iPhone 8',\r\n 'UDID': '69db97834cc4c58922171fe542285219eae4f3a9',\r\n 'packageName': 'com.oceanwing.BatteryCam', # 定义apk包名\r\n 'noReset': true,\r\n 'appActivity': 'com.oceanwing.battery.cam.main.SplashActivity', # 定义此启动的activity\r\n 'apk_path': 'D:\\python_project\\Eufy\\data\\eufy_v1.0.5.apk', # 定义apk存放路径\r\n 'screenshot_path': \"D:\\\\python_project\\\\Eufy\\\\screenshot\\\\\", # 定义截图路径\r\n # 'login_user': '894772205@qq.com', # 定义登录时用到的账号\r\n # 'login_pwd': '123456789', # 定义登录时用到的密码\r\n 'login_user': 'yuanhui.li@oceanwing.com',\r\n 'login_pwd': '123456789',\r\n 'sign_email': 'ken@{}.com'.format(current_time), # 定义注册时用的邮箱\r\n 'sign_pwd': '123456789', # 定义注册时用的密码\r\n 'grant_email': '894772205@qq.com', # 定义邀请用户邮箱\r\n 'grant_pwd': '123456789', # 定义邀请用户登录密码\r\n 'report_path': r'D:\\python_project\\Eufy\\report', # 定义个报告存放的路径,支持相对路径\r\n}\r\n\r\n\"\"\"\r\n# htc M8w\r\n'paltformVersion': '4.4.2',\r\n'deviceName': 'HC45XWM00171',\r\n\"\"\"\r\n\r\n\"\"\"\r\n# ZTE AXON 7 mini\r\n'paltformVersion': '6.0.1',\r\n'deviceName': '968d6b8',\r\n\"\"\"\r\n\r\n\"\"\"\r\n# 三星S7\r\n'paltformVersion': '7.0',\r\n'deviceName': 'bce6916b',\r\n\"\"\"\r\n","sub_path":"data/custom.py","file_name":"custom.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"226019645","text":"import cv2\nimport numpy as np\nimport random\nimport math\nimport imgFunctions as img\nimport time\nimport map as mp\nimport collision as col\nimport driver as driver\nimport bot as bt\nimport concurrent.futures\n\ncounter = 1\n\n# lists and dicts required to construct bot objects and graph\nbots = list()\ngraph = {}\nexcp_Set = set()\n\nshortestPathtoNodes = {} # dict to hold shortest path to each node from each node\nPathCost = {} # dict to hold path cost from node to node\n\n#Path_list = []\n\n# parameters required for the GUI\nBOT_COUNT = 5\nRADI = 50\nGRID_SIZE = 24\ndS = 0.1\nWINDOW_SIZE = 700 # square window, height = width\nCELL_SIZE = WINDOW_SIZE/GRID_SIZE \n\nbackg_H = 0\nbackg_W = 0\nbot_H = 0\nbot_W = 0\n\n# boolean flag to signify destination is reached\ndest_reached = False\n\npaused = False\nset_dest = False\n\n\"\"\" draw bot images in the overlay canvas\n return : overlay(4 dims with the alpha layer)\"\"\"\n\n# function to draw bots on the canvas\ndef draw_bots(bots):\n # create a overlay layer to draw all the robots with the alpha\n overlay = np.zeros((backg_H, backg_W, 4), dtype=\"uint8\")\n for bot in bots:\n x = bot.curr_x*CELL_SIZE\n y = bot.curr_y*CELL_SIZE\n\n x = (0 if (x < 0) else ((backg_W - CELL_SIZE)\n if x > (backg_W - CELL_SIZE) else x))\n y = (0 if (y < 0) else ((backg_H - CELL_SIZE)\n if y > (backg_H - CELL_SIZE) else y))\n\n #print(bot.curr_x, bot.curr_y, x, y)\n angle = bot.angle\n x_start = int(x)\n y_start = int(y)\n\n # set the state of the bot acording to the neighbour bots distatnce\n bot.getState(bots)\n\n # add the additional status color bar to the basic bot png\n if (bot.state == 0) :\n addon = bot.bot_imgs['blue']\n else:\n addon = bot.bot_imgs['red']\n\n # ---------------------Draw destination lines and rectangles -----------------------\n\n #color = (125, 0, 100)\n\n if bot.dest_x != -1:\n cv2.line(overlay, (int(bot.x), int(bot.y)),\n (bot.dest_x, bot.dest_y), (0, 200, 200, 255), 2)\n\n\n cv2.rectangle(overlay, (bot.dest_x-int(CELL_SIZE), bot.dest_y-int(CELL_SIZE)),\n (bot.dest_x+int(CELL_SIZE), bot.dest_y+int(CELL_SIZE)), color, 2)\n\n bot_img = cv2.add(bot.bot_imgs['bot'], addon)\n bot_img = img.rotate_image(bot_img, angle)\n roi = overlay[y_start:y_start+bot_W, x_start:x_start+bot_W] # region of interest\n overlay[y_start:y_start+bot_W, x_start:x_start+bot_W] = roi + bot_img\n\n return overlay\n\n\nif __name__ == \"__main__\":\n\n \n # load backgroug image according to the grid size\n backg_H, backg_W, background = img.loadBackground(GRID_SIZE, WINDOW_SIZE)\n bot_H, bot_W, bot_pngs = img.loadBotImgs(\n GRID_SIZE, WINDOW_SIZE) # load all pngs of the bot to a dict\n bot_png = bot_pngs['bot'] # get the bot image\n\n #print(backg_H, backg_W)\n #print(bot_H, bot_W)\n\n cv2.namedWindow(\"image\")\n# cv2.setMouseCallback(\"image\", mosueEvent)\n\n # hard coded order schedulee\n inputSchedule = {'0':[(1,6)], '1':[(22,6)], '2':[(11,15)], '3':[(12,6)], '4':[(22,6), (11,15)]\n \n }\n\n\n ''' initializes the graph and the grid '''\n \n # adding shelves\n mp.add_shelves(excp_Set,(3,3), (9,9)) # Rack A\n mp.add_shelves(excp_Set,(3,12), (10,18)) # Rack C\n mp.add_shelves(excp_Set,(13,3), (21,9)) # Rack B\n mp.add_shelves(excp_Set,(13,12), (21,18)) # Rack D\n\n # mp.add_shelves(excp_Set,(8,0), (21,1)) # run-off area near delivery station\n\n # mp.add_shelves(excp_Set,(2,3), (3,10)) # run-off area near rack A\n # mp.add_shelves(excp_Set,(2,12), (3,19)) # run-off area near rack C\n # mp.add_shelves(excp_Set,(13,3), (14,10)) # run-off area near rack B\n # mp.add_shelves(excp_Set,(13,12), (14,19)) # run-off area near rack D\n\n # mp.add_nodes(excp_Set, (2,6))\n # mp.add_nodes(excp_Set, (2,7))\n # mp.add_nodes(excp_Set, (2,15))\n # mp.add_nodes(excp_Set, (2,16))\n # mp.add_nodes(excp_Set, (13,6))\n # mp.add_nodes(excp_Set, (13,7))\n # mp.add_nodes(excp_Set, (13,15))\n # mp.add_nodes(excp_Set, (13,16))\n\n \n\n # creates the graph\n mp.generate_graph(graph, excp_Set, 31)\n \n \n ''' Left side roads along columns '''\n # for (1,3) --> (1,9)\n for i in range(3,10):\n # path towards increasing Y \n # remove backward link from (1,10) to (1,9)\n del graph[(1,i)][(1,i-1)]\n del graph[(1,i)][(0,i)]\n del graph[(1,i)][(2,i)]\n\n # for (0,2) --> (0,9)\n for i in range(2,10):\n # path towards decreasing Y\n # remove forward link from (0,1) to (0,2) \n del graph[(0,i)][(0,i+1)]\n del graph[(0,i)][(1,i)]\n\n\n # for (1,12) --> (1,18)\n for i in range(12,19):\n # path towards increasing Y\n # remove backward link from (1,19) to (1,18)\n del graph[(1,i)][(1,i-1)]\n del graph[(1,i)][(0,i)]\n del graph[(1,i)][(2,i)]\n\n # for (0,12) --> (0,19)\n for i in range(12,20):\n # path towards decreasing Y\n # remove forward link from (0,11) to (0,12)\n del graph[(0,i)][(0,i+1)]\n del graph[(0,i)][(1,i)]\n \n ''' Bottom side roads along rows '''\n # for (1,20) --> (10,20)\n for i in range(1,11):\n # path towards decreasing X\n # remove forward link from (0,20) to (1,20) \n del graph[(i,20)][(i+1,20)]\n del graph[(i,20)][(i,19)]\n del graph[(i,20)][(i,21)]\n \n # for (13,20) --> (22,20)\n for i in range(13,23):\n # path towards decreasing X\n # remove forward link from (12,20) to (13,20)\n del graph[(i,20)][(i+1,20)]\n del graph[(i,20)][(i,19)]\n del graph[(i,20)][(i,21)]\n\n # for (2,19) --> (10,19)\n for i in range(2,11):\n # path towards increasing X\n # remove backward link from (11,19) to (10,19) \n del graph[(i,19)][(i-1,19)]\n del graph[(i,19)][(i,20)]\n #del graph[(1,20)][(1,21)]\n \n # for (13,19) --> (22,19)\n for i in range(13,23):\n # path towards increasing X\n # remove backward link (23,19) to (22,19)\n del graph[(i,19)][(i-1,19)]\n del graph[(i,19)][(i,20)]\n #del graph[(1,20)][(1,21)]\n \n ''' Right side roads along columns '''\n\n # for (22,3) --> (22,9)\n for i in range(3,10):\n # path towards decreasing Y\n # remove forward link from (22,2) to (22,3)\n del graph[(22,i)][(22,i+1)]\n del graph[(22,i)][(23,i)]\n\n # for (22,12) --> (22,18)\n for i in range(12,19):\n # path towards decreasing Y\n # remove forward link from (22,11) to (22,12)\n del graph[(22,i)][(22,i+1)]\n del graph[(22,i)][(23,i)]\n\n\n # for (23,2) --> (23,9)\n for i in range(2,10):\n # path towards increasing Y\n # remove backward link from (23,10) to (23,9)\n del graph[(23,i)][(23,i-1)]\n del graph[(23,i)][(22,i)]\n del graph[(23,i)][(24,i)]\n \n \n\n # for (23,12) --> (23,19)\n for i in range(12,20):\n # path towards increasing Y\n # remove backward link from (23,20) to (23,19) \n del graph[(23,i)][(23,i-1)]\n del graph[(23,i)][(22,i)]\n del graph[(23,i)][(24,i)]\n\n ''' Top side roads along row '''\n\n # for (4,1) --> (10,1)\n for i in range(4,11):\n # path towards increasing X\n # remove backward link from (11,1) to (10,1)\n del graph[(i,1)][(i-1,1)]\n del graph[(i,1)][(i,2)]\n if i>=2:\n del graph[(i,1)][(i,0)]\n \n # for (13,1) --> (22,1)\n for i in range(13,23):\n # path towards increasing X\n # remove backward link from (23,1) to (22,1) \n del graph[(i,1)][(i-1,1)]\n del graph[(i,1)][(i,0)]\n del graph[(i,1)][(i,2)]\n\n # for (4,2) --> (10,2)\n for i in range(4,11):\n # path towards decreasing X\n # remove forward link from (3,2) to (4,2)\n del graph[(i,2)][(i+1,2)]\n del graph[(i,2)][(i,1)]\n del graph[(i,2)][(i,3)]\n \n # for (13,2) --> (21,2)\n for i in range(13,22):\n # path towards decreasing X\n # remove forward link from (12,2) to (13,2)\n del graph[(i,2)][(i+1,2)]\n del graph[(i,2)][(i,1)]\n del graph[(i,2)][(i,3)]\n\n ''' Center channel roads vertical '''\n\n # for (11,3) --> (11,9)\n for i in range(3,10):\n # path towards increasing Y\n # remove backward link from (11,10) to (11,9)\n del graph[(11,i)][(11,i-1)]\n del graph[(11,i)][(12,i)]\n del graph[(11,i)][(10,i)]\n\n # for (11,12) --> (11,19)\n for i in range(12,20):\n # path towards increasing Y\n del graph[(11,i)][(11,i-1)]\n del graph[(11,i)][(12,i)]\n del graph[(11,i)][(10,i)]\n\n # for (12,3) --> (12,9)\n for i in range(3,10):\n # path towards decreasing Y\n # remove forward link from (12,2) to (12,3)\n del graph[(12,i)][(12,i+1)]\n del graph[(12,i)][(11,i)]\n del graph[(12,i)][(13,i)]\n\n # for (12,12) --> (12,18)\n for i in range(12,19):\n # path towards decreasing Y\n # remove forward link from (12,11) to (12,12)\n del graph[(12,i)][(12,i+1)]\n del graph[(12,i)][(11,i)]\n del graph[(12,i)][(13,i)]\n \n ''' center channel roads horizontal '''\n # for (2,10) to (10,10)\n for i in range(2,11):\n # path towards decreasing Y\n # remove forward link from (1,10) to (2,10)\n del graph[(i,10)][(i+1,10)]\n del graph[(i,10)][(i,11)]\n del graph[(i,10)][(i,9)]\n\n # for (13,10) to (21,10)\n for i in range(13,22):\n # path towards decreasing Y\n # remove forward link from (12,10) to (13,10)\n del graph[(i,10)][(i+1,10)]\n del graph[(i,10)][(i,11)]\n del graph[(i,10)][(i,9)]\n\n # for (2,11) to (10,11)\n for i in range(2,11):\n # path towards increasing Y\n # remove forward link from (11,11) to (10,11)\n del graph[(i,11)][(i-1,11)]\n del graph[(i,11)][(i,12)]\n del graph[(i,11)][(i,10)]\n\n # for (13,11) to (21,11)\n for i in range(13,22):\n # path towards increasing Y\n # remove forward link from (22,11) to (21,11)\n del graph[(i,11)][(i-1,11)]\n del graph[(i,11)][(i,12)]\n del graph[(i,11)][(i,10)]\n\n\n\n ''' Junction points '''\n del graph[(1,2)][(0,2)]\n\n del graph[(1,19)][(0,19)]\n\n del graph[(22,2)][(23,2)]\n\n del graph[(22,19)][(23,19)]\n\n ''' removing additional points '''\n del graph[(1,10)][(1,9)]\n del graph[(0,1)][(0,2)]\n del graph[(1,19)][(1,18)]\n del graph[(0,11)][(0,12)]\n del graph[(0,20)][(1,20)]\n del graph[(12,20)][(13,20)]\n #del graph[(11,19)][(10,19)]\n #del graph[(23,19)][(22,19)]\n del graph[(22,2)][(22,3)]\n del graph[(22,11)][(22,12)]\n del graph[(23,10)][(23,9)]\n del graph[(23,20)][(23,19)]\n del graph[(11,1)][(10,1)]\n del graph[(23,1)][(22,1)]\n del graph[(3,2)][(4,2)]\n del graph[(12,2)][(13,2)]\n del graph[(11,10)][(11,9)]\n #del graph[(11,19)][(11,18)]\n del graph[(12,2)][(12,3)]\n del graph[(12,11)][(12,12)]\n del graph[(1,10)][(2,10)]\n del graph[(12,10)][(13,10)]\n del graph[(11,11)][(10,11)]\n del graph[(22,11)][(21,11)]\n\n # del graph[(2,0)][(2,1)]\n # del graph[(2,1)][(2,0)]\n\n # del graph[(3,0)][(3,1)]\n # del graph[(3,1)][(3,0)]\n\n # del graph[(4,0)][(4,1)]\n # del graph[(4,1)][(4,0)]\n\n # del graph[(5,0)][(5,1)]\n # del graph[(5,1)][(5,0)]\n\n # del graph[(4,0)][(4,1)]\n # del graph[(4,1)][(4,0)]\n\n # destination array\n #racks = [[(0,0),(4,10)], [(1,0),(13,19)], [(2,0), (20,20)]]\n #Path_list = [[]for _ in range(BOT_COUNT)]\n \n while True:\n\n \n # creates bots images at the initial run\n if len(bots) == 0:\n for i in range(BOT_COUNT):\n imgs = bot_pngs.copy()\n bot = bt.Bot(i)\n \n #bot.setPos(i, 0)\n # each bot is positioned initially in a place where (x,y) coordinates are even\n # the mapping function used is as follows;\n # (x,y) ---> (2x,2y)\n bot.init_x = i\n bot.init_y = 0\n bot.curr_x = bot.init_x\n bot.curr_y = bot.init_y\n bot.angle = 180\n bot.waitFlag = False\n bot.setImgs(imgs)\n bots.append(bot)\n \n else: \n #col.collision(bots)\n #update(bots)\n\n for bot in bots:\n \n # check if the OrderComplete flag is up and inputSchedule dict is not empty\n if bot.OrderComplete and bool(inputSchedule) :\n # calculates the path\n driver.setPath(graph, shortestPathtoNodes, PathCost, bot, inputSchedule)\n\n # calls start function activate bots\n driver.start(bots) \n\n \n ''' ------------Draw bots ------------------------------ '''\n # get a overlay that contains the vector with aplha which has the current orientation of bots\n overlay = draw_bots(bots)\n # mask the background with the overlay\n masked_backg = cv2.bitwise_and(background, background, mask=cv2.bitwise_not(overlay[:, :, 3]))\n # add the overlay and the background\n finalImg = cv2.add(overlay[:, :, :3], masked_backg)\n\n # ------------Draw rect on selected cell --------------\n # x_cell, y_cell, CELL_SIZE = getCell(mouse_pos[0], mouse_pos[1])\n \n # color = (125, 0, 100) if mouse_state == cv2.EVENT_LBUTTONDOWN else (125, 255, 0)\n # cv2.rectangle(finalImg, (x_cell, y_cell),\n # (x_cell+CELL_SIZE, y_cell+CELL_SIZE), color, 2)\n\n #input(\"Press Enter to continue....\") \n cv2.imshow('image', finalImg)\n\n key = cv2.waitKey(5)\n\n if key == 27:\n break\n elif key == 32:\n paused = not paused","sub_path":"Software/Simulator/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":14184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"455266379","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n########################################################################################################################\n# Copyright © 2019-2020 Pi-Yueh Chuang, Lorena A. Barba, and G2 Integrated Solutions, LLC.\n# All Rights Reserved.\n#\n# Contributors: Pi-Yueh Chuang \n# J. Tracy Thorleifson \n#\n# Licensed under the BSD-3-Clause License (the \"License\").\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at: https://opensource.org/licenses/BSD-3-Clause\n#\n# BSD-3-Clause License:\n#\n# Redistribution and use in source and binary forms, with or without modification, are permitted provided\n# that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the\n# following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the\n# following disclaimer in the documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or\n# promote products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES,\n# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE\n# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n########################################################################################################################\n\n\"\"\"\nWrite to NetCDF4 file with CF convention\n\"\"\"\n\nimport os\nimport sys\nimport numpy\nimport netCDF4\nimport datetime\nimport argparse\n\n\ndef get_state_interpolator(state, field=0):\n \"\"\"\n Get a Scipy interpolation object for a field on a AMR grid.\n \"\"\"\n import scipy.interpolate\n\n # the underlying patch in this state object\n p = state.patch\n\n # x, y arrays and also dx, dy for checking\n x, dx = numpy.linspace(p.lower_global[0]+p.delta[0]/2.,\n p.upper_global[0]-p.delta[0]/2.,\n p.num_cells_global[0], retstep=True)\n y, dy = numpy.linspace(p.lower_global[1]+p.delta[1]/2.,\n p.upper_global[1]-p.delta[1]/2.,\n p.num_cells_global[1], retstep=True)\n assert numpy.abs(dx-p.delta[0]) < 1e-6, \"{} {}\".format(dx, p.delta[0])\n assert numpy.abs(dy-p.delta[1]) < 1e-6, \"{} {}\".format(dy, p.delta[1])\n\n # get the interpolation object\n kx = ky = 3\n\n if x.size <= 3:\n kx = x.size - 1\n\n if y.size <= 3:\n ky = y.size - 1\n\n interp = scipy.interpolate.RectBivariateSpline(\n x, y, state.q[field, :, :],\n [p.lower_global[0], p.upper_global[0], p.lower_global[1], p.upper_global[1]],\n kx=kx, ky=ky)\n\n return interp\n\ndef interpolate(solution, x_target, y_target,\n field=0, shift=[0., 0.], level=1,\n clip=True, clip_less=1e-7, nodatavalue=-9999.):\n \"\"\"\n Do the interpolation.\n \"\"\"\n\n # allocate space for interpolated results\n values = numpy.zeros((y_target.size, x_target.size), dtype=numpy.float64)\n\n # loop through all AMR grids\n for state in solution.states:\n\n p = state.patch\n\n # only do subsequent jobs if this is at the target level\n if p.level != level:\n continue\n\n # get the indices of the target coordinates that are inside this patch\n xid = numpy.where((x_target>=p.lower_global[0])&(x_target<=p.upper_global[0]))[0]\n yid = numpy.where((y_target>=p.lower_global[1])&(y_target<=p.upper_global[1]))[0]\n\n # get interpolation object\n interpolator = get_state_interpolator(state, field)\n\n # if any target coordinate located in thie patch, do interpolation\n if xid.size and yid.size:\n values[yid[:, None], xid[None, :]] = \\\n interpolator(x_target[xid]-shift[0], y_target[yid]-shift[1]).T\n\n # apply nodatavalue to a threshold\n if clip:\n values[values:0.02\" \"distance:>=:50000\r\n\r\nOutput options: Required.\r\n- display: prints to stdout\r\n- csv_file: exports data to a csv\r\n\r\nFilters options: Optional. Input as: option:operation:value e.g. diameter:>=:0.042\r\n- is_hazardous:[=]:bool\r\n- diameter:[>=|=|<=]:float\r\n- distance:[>=|=|<=]:float\r\n\r\nReturn objects options: Optional, defaults to NEO if not specified.\r\n- NEO\r\n- Path\r\n\r\nFilename: Optional, used for specifying a filename for a csv to load data from. By default project looks for a csv in: data/neo_data.csv.\r\n\"\"\"\r\n\r\nimport argparse\r\nimport pathlib\r\nimport sys\r\nfrom datetime import datetime\r\n\r\nfrom exceptions import UnsupportedFeature\r\nfrom database import NEODatabase\r\nfrom search import Query, NEOSearcher\r\nfrom writer import OutputFormat, NEOWriter\r\n\r\nPROJECT_ROOT = pathlib.Path(__file__).parent.absolute()\r\n\r\n\r\ndef verify_date(datetime_str):\r\n \"\"\"\r\n Function that verifies datetime strings in YYYY-MM-DD format are valid dates.\r\n\r\n :param datetime_str: String representing datetime in %Y-%m-%d format\r\n :return: str: String representing datetime in %Y-%m-%d format\r\n \"\"\"\r\n try:\r\n date_time_obj = datetime.strptime(datetime_str, \"%Y-%m-%d\")\r\n return datetime_str\r\n except ValueError:\r\n error_message = f'Not a valid date: \"{datetime_str}\"'\r\n raise argparse.ArgumentTypeError(error_message)\r\n\r\n\r\ndef verify_output_choice(choice):\r\n \"\"\"\r\n Function that verifies output choice is a supported OutputFormat.\r\n\r\n :param choice: String representing an OutputFormat\r\n :return: str: String representing an OutputFormat\r\n \"\"\"\r\n options = OutputFormat.list()\r\n\r\n if choice not in options:\r\n error_message = f'Not a valid output option: \"{choice}\"'\r\n raise argparse.ArgumentTypeError(error_message)\r\n\r\n return options[options.index(choice)]\r\n\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser(description='Near Earth Objects (NEOs) Database')\r\n parser.add_argument('output', choices=OutputFormat.list(), type=verify_output_choice,\r\n help='Select option for how to output the search results.')\r\n parser.add_argument('-r', '--return_object', choices=['NEO', 'Path'],\r\n default='NEO', type=str,\r\n help='Select entity data to return.')\r\n parser.add_argument('-d', '--date', type=verify_date, help='YYYY-MM-DD format to find NEOs on the given date')\r\n parser.add_argument('-s', '--start_date', type=verify_date,\r\n help='YYYY-MM-DD format to find NEOs on the provided start date')\r\n parser.add_argument('-e', '--end_date', type=verify_date,\r\n help='YYYY-MM-DD format to find NEOs up to the end date')\r\n parser.add_argument('-n', '--number', type=int, help='Int representing max number of NEOs to return')\r\n parser.add_argument('-f', '--filename', type=str, help='Name of input csv data file')\r\n parser.add_argument('--filter', nargs='+', help='Select filter options with filter value: '\r\n 'is_hazardous:[=]:bool, '\r\n 'diameter:[>=|=|<=]:float, '\r\n 'distance:[>=|=|<=]:float.'\r\n 'Input as: [option:operation:value] '\r\n 'e.g. diameter:>=:0.042')\r\n\r\n args = parser.parse_args()\r\n var_args = vars(args)\r\n\r\n # Load Data\r\n if args.filename:\r\n filename = args.filename\r\n else:\r\n filename = f'{PROJECT_ROOT}/data/neo_data.csv'\r\n\r\n db = NEODatabase(filename=filename)\r\n\r\n try:\r\n db.load_data()\r\n except FileNotFoundError as e:\r\n print(f'File {var_args.get(\"filename\")} not found, please try another file name.')\r\n sys.exit()\r\n except Exception as e:\r\n print(Exception)\r\n sys.exit()\r\n\r\n # Build Query\r\n query_selectors = Query(**var_args).build_query()\r\n\r\n # Get Results\r\n try:\r\n results = NEOSearcher(db).get_objects(query_selectors)\r\n except UnsupportedFeature as e:\r\n print('Unsupported Feature; Write unsuccessful')\r\n sys.exit()\r\n\r\n # Output Results\r\n try:\r\n result = NEOWriter().write(\r\n data=results,\r\n format=args.output,\r\n )\r\n except Exception as e:\r\n print(e)\r\n print('Write unsuccessful')\r\n sys.exit()\r\n\r\n if result:\r\n print('Write successful.')\r\n else:\r\n print('Write unsuccessful.')\r\n\r\n","sub_path":"starter/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"436303833","text":"import sys\nimport socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_address = ('127.0.0.1', 31002)\nsock.connect(server_address)\n\ntry:\n msg = open('/home/bella/PROGJAR_05111740000117/Tugas1/test.txt','rb')\n\n message = msg.read(1024)\n sock.sendall(message.encode())\n\n amount_received = 0\n amount_expected = len(message)\n while amount_received < amount_expected:\n data = sock.recv(1024).decode()\n amount_received += len(data)\n print('received data')\n\nfinally:\n print('closing socket')\n sock.close()","sub_path":"Tugas1/Tugas1a/client2.py","file_name":"client2.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"522973362","text":"''' move zeros to the end'''\n\n\ndef move_zeros(nums):\n i = 0\n j = i + 1\n while i < len(nums) - 1 and j < len(nums):\n if nums[i] == 0:\n j = i + 1\n while nums[j] == 0:\n j = j + 1\n if j == len(nums):\n return\n nums[i], nums[j] = nums[j], nums[i]\n\n else:\n i = i + 1\n\nnums = [4, 2, 4, 0, 0, 3, 0, 5, 1, 0]\nprint(nums)\nmove_zeros(nums)\nprint(nums)\nnums = [0, 0]\nprint(nums)\nmove_zeros(nums)\nprint(nums)\n\n\ndef move_zeros2(nums):\n zi = 0\n for i in range(len(nums)):\n nums[i], nums[zi] = nums[zi], nums[i]\n zi += 1\n\nimport timeit\nprint(timeit.timeit('move_zeros(nums)', globals=globals()))\nprint(timeit.timeit('move_zeros2(nums)', globals=globals()))\n","sub_path":"Interview Questions/Facebook/move_zeros.py","file_name":"move_zeros.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"461978958","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# For training \nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch import optim\nimport dlc_practical_prologue as prologue\nimport time\n\n\n##### Global parameters #####\n\n#For reproductibility\nSEED = 123 \ntorch.manual_seed(SEED)\n\nif torch.cuda.is_available(): \n DEVICE = torch.device('cuda')\nelse:\n DEVICE = torch.device('cpu')\n\n# Training parameters\nN = 1000 #Dataset size (train and test)\nBATCH_SIZE = 25 #Batch size for stochastic optimization\nEPOCHS = 200 # Number of epochs for one round of training\n\n#Learing rate evolution (multiply LEARNING_RATE by GAMMA every LR_STEP epochs)\nLEARNING_RATE = 1e-3 \nLR_STEP = int(0.5 * EPOCHS)\nGAMMA = 0.1\n\n# Auxiliary and main losses ponderation \nAUX_LOSS = 0.5\n\n##### Helper functions ####\n\ndef accuracy(model_output, test_target):\n \"\"\"Return the accuracy of the model output.\"\"\"\n nb_samples = model_output.shape[0]\n output_int = torch.zeros(nb_samples)\n \n # Convert probability to decision\n output_int = torch.argmax(model_output, 1)\n nb_errors = (output_int - test_target).type(torch.BoolTensor).sum().item()\n \n return (nb_samples - nb_errors) / nb_samples\n\ndef accuracyMnist(model_output, test_target):\n \"\"\"Return the accuracy of the predicted digits of a Digit Net.\"\"\"\n nb_samples = model_output.shape[0]\n model_class = model_output.argmax(dim=1)\n nb_errors = (model_class - test_target).type(torch.BoolTensor).sum().item()\n \n return (nb_samples - nb_errors) / nb_samples\n\n\ndef nb_param(model):\n \"\"\"Return the number of trained parameters of the input model.\"\"\"\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n##### Neural Nets Definition ####\n\nclass FCNet(nn.Module):\n \"\"\"Naive fully connected net.\"\"\"\n def __init__(self):\n super(FCNet, self).__init__()\n self.fc1 = nn.Linear(392,200)\n self.fc2 = nn.Linear(200,20)\n self.fc3 = nn.Linear(20,2)\n \n self.drop = nn.Dropout(0.25)\n self.activ = F.relu\n\n def forward(self, x):\n x = self.fc1(x.view(x.size(0),-1))\n x = self.activ(x)\n x = self.drop(x)\n x = self.fc2(x)\n x1, x2 = x[:, 0:10], x[:, 10:20]\n x = self.activ(x)\n x = self.fc3(x)\n \n return x, x1, x2\n\nclass ConvNet(nn.Module):\n \"\"\"Naive convolutional net.\"\"\"\n def __init__(self):\n super(ConvNet, self).__init__()\n self.conv1 = nn.Conv2d(2, 12, kernel_size=3) #(1,14,14) to (12,12,12)\n self.conv2 = nn.Conv2d(12, 32, kernel_size=3) #(12,12,12) to (32,10,10)\n self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) #(32,10,10) to (32,5,5)\n self.fc1 = nn.Linear(800, 100)\n self.fc2 = nn.Linear(100, 20)\n self.fc3 = nn.Linear(20, 2)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x): \n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.drop(x.view(x.size(0), -1))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.drop(x)\n x = self.fc2(x)\n x1, x2 = x[:, 0:10], x[:, 10:20]\n x = F.relu(x)\n x = self.fc3(x)\n \n return x, x1, x2\n\nclass DigitNet(nn.Module):\n \"\"\"Inspired by LeNet5, dropout 0.5 and 2 fc layers.\"\"\"\n def __init__(self):\n super(DigitNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 12, kernel_size=3) #(1,14,14) to (12,12,12)\n self.conv2 = nn.Conv2d(12, 32, kernel_size=3) #(12,12,12) to (32,10,10)\n self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) #(32,10,10) to (32,5,5)\n self.fc1 = nn.Linear(800, 100)\n self.fc2 = nn.Linear(100, 10)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x): \n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.drop(x.view(x.size(0), -1))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n \n return x\n \n\nclass ConvSepNet(nn.Module):\n \"\"\"Run DigitNet in parrallel on each chanel and combine at the\n end with two fully connected layers (20->10->2). No Dropout in the f.c. layers.\n \"\"\"\n def __init__(self):\n super(ConvSepNet, self).__init__()\n self.mnistNet = DigitNet()\n self.fc1 = nn.Linear(20,10)\n self.fc2 = nn.Linear(10,2)\n\n\n def forward(self, x):\n x1, x2 = x[:,0:1,:,:], x[:,1:2,:,:]\n x1 = self.mnistNet(x1)\n x2 = self.mnistNet(x2) \n x = torch.cat((x1, x2), 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n \n return x, x1, x2\n \n\nclass FinalDigitNet(nn.Module):\n \"\"\"Inspired by LeNet5, dropout 0.5 and 3 fc layers\"\"\"\n def __init__(self):\n super(FinalDigitNet, self).__init__()\n self.conv1 = nn.Conv2d(1, 12, kernel_size=3) #(1,14,14) to (12,12,12)\n self.conv2 = nn.Conv2d(12, 32, kernel_size=3) #(12,12,12) to (32,10,10)\n self.max_pool1 = nn.MaxPool2d(kernel_size=2, stride=2) #(32,10,10) to (32,5,5)\n self.fc1 = nn.Linear(800, 400)\n self.fc2 = nn.Linear(400, 100)\n self.fc3 = nn.Linear(100, 10)\n self.drop = nn.Dropout(0.5)\n \n def forward(self, x): \n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = self.max_pool1(x)\n x = self.drop(x.view(x.size(0), -1))\n x = self.fc1(x)\n x = F.relu(x)\n x = self.drop(x)\n x = self.fc2(x)\n x = F.relu(x)\n x = self.drop(x)\n x = self.fc3(x)\n \n return x\n \nclass FinalNet(nn.Module):\n \"\"\"DigitNet with two fully connected layers (20->10->2). No Dropout\"\"\"\n def __init__(self):\n super(FinalNet, self).__init__()\n self.mnistNet = FinalDigitNet()\n self.fc1 = nn.Linear(20,10)\n self.fc2 = nn.Linear(10,2)\n\n\n def forward(self, x):\n x1, x2 = x[:,0:1,:,:], x[:,1:2,:,:]\n x1 = self.mnistNet(x1)\n x2 = self.mnistNet(x2) \n x = torch.cat((x1, x2), 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.fc2(x)\n \n return x, x1, x2\n\n##### Training routine ####\n \ndef train_routine(model, train_input, train_target, train_classes, test_input, test_target, test_classes):\n \"\"\"Train a model and compute its performance on train and test data.\"\"\"\n \n # Loss\n criterion = nn.CrossEntropyLoss().to(DEVICE)\n \n # Optimizer\n optimizer = optim.Adam(model.parameters(), LEARNING_RATE)\n \n scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=LR_STEP, gamma = GAMMA)\n \n # Start timer\n t0 = time.time() \n \n # Training the model\n model.train(True)\n \n for e in range(EPOCHS):\n \n print('\\rTraining {}... (Epoch {}/{})'.format(model.__class__.__name__, e+1, EPOCHS), end=\"\")\n \n # Ponderation of the main loss => (1-f): ponderation of the auxiliray loss. \n f = AUX_LOSS\n\n for inputs, targets, classes in zip(train_input.split(BATCH_SIZE), \\\n train_target.split(BATCH_SIZE), \\\n train_classes.split(BATCH_SIZE)):\n \n output, aux1, aux2 = model(inputs)\n\n loss = (1-f) * criterion(output, targets) + f * (criterion(aux1, classes[:,0]) + criterion(aux2, classes[:,1]))\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # Updtate learning rate\n scheduler.step()\n \n\n # End timer\n t1 = time.time() \n dt = t1-t0\n \n # Evaluating model performance on train and test data\n model.train(False)\n tr_output, tr_aux1, tr_aux2 = model(train_input)\n te_output, te_aux1, te_aux2 = model(test_input)\n \n tr_acc = accuracy(tr_output, train_target)\n te_acc = accuracy(te_output, test_target)\n \n tr_acc_mnist = 0.5*(accuracyMnist(tr_aux1, train_classes[:,0]) + \\\n accuracyMnist(tr_aux2, train_classes[:,1]))\n te_acc_mnist = 0.5*(accuracyMnist(te_aux1, test_classes[:,0]) + \\\n accuracyMnist(te_aux2, test_classes[:,1]))\n \n # Showing results\n print(\"\\nTraining time : {:.2f}s\\n\".format(dt) + \\\n \"Main performance:\\n\" + \\\n \" -Train accuracy : {:.2f}%\\n\".format(100 * tr_acc) + \\\n \" -Test accuracy : {:.2f}%\\n\".format(100 * te_acc) + \\\n \"Auxiliary performance:\\n\" + \\\n \" -Train digit accuracy : {:.2f}%\\n\".format(100 * tr_acc_mnist) + \\\n \" -Test digit accuracy : {:.2f}%\\n\".format(100 * te_acc_mnist) + \\\n \"-----------------------------------\")\n\n\nif __name__ == '__main__':\n \n # Display information about training procedure\n \n print('Train and test dataset size: {}\\n'.format(N) + \\\n 'Number of epochs: {}\\n'.format(EPOCHS) + \\\n 'Batch size for stochastic optimization: {}\\n'.format(BATCH_SIZE) + \\\n 'Learning rate: {} (multiplied by {} after {} epochs)\\n'.format(LEARNING_RATE, GAMMA, LR_STEP) + \\\n 'Device used for training: {}\\n'.format(DEVICE) + \\\n 'Weight of auxiliary loss: f={}'.format(AUX_LOSS))\n\n \n # Load data and move it to DEVICE\n print('Loading the data...')\n train_input, train_target, train_classes, test_input, test_target, test_classes = prologue.generate_pair_sets(N)\n train_input, train_target, train_classes = train_input.to(DEVICE), train_target.to(DEVICE), train_classes.to(DEVICE)\n test_input, test_target, test_classes = test_input.to(DEVICE), test_target.to(DEVICE), test_classes.to(DEVICE)\n print('Data loaded.') \n \n \n # Model constructions\n print('Constructing the models:')\n myFCNet = FCNet().to(DEVICE)\n myConvNet = ConvNet().to(DEVICE)\n myConvSepNet = ConvSepNet().to(DEVICE)\n myFinalNet = FinalNet().to(DEVICE)\n print(' -FCNet: {} parameters\\n'.format(nb_param(myFCNet)) + \\\n ' -ConvNet: {} parameters\\n'.format(nb_param(myConvNet)) + \\\n ' -ConvSepNet: {} parameters\\n'.format(nb_param(myConvSepNet)) + \\\n ' -FinalNet: {} parameters\\n'.format(nb_param(myFinalNet)))\n\n # Training \n\n train_routine(myFCNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n train_routine(myConvNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n train_routine(myConvSepNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n train_routine(myFinalNet, train_input, train_target, train_classes, test_input, test_target, test_classes)\n\n\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":10733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"23951836","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport localflavor.us.models\nfrom django.conf import settings\nimport django.contrib.auth.models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='FoodOffer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('address', models.TextField(max_length=1000)),\n ('description', models.TextField(max_length=2000)),\n ('picture', models.ImageField(upload_to=b'/food/')),\n ('price', models.DecimalField(max_digits=5, decimal_places=2)),\n ('max_people', models.PositiveSmallIntegerField()),\n ('offer_datetime', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='FoodRequest',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ('accepted', models.BooleanField(default=False)),\n ('offer', models.ForeignKey(to='foodoffers.FoodOffer')),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('user_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\n ('zip_code', localflavor.us.models.USZipCodeField(max_length=10)),\n ('prof_pic', models.ImageField(upload_to=b'/profiles/')),\n ],\n options={\n 'abstract': False,\n 'verbose_name': 'user',\n 'verbose_name_plural': 'users',\n },\n bases=('auth.user',),\n managers=[\n ('objects', django.contrib.auth.models.UserManager()),\n ],\n ),\n migrations.AddField(\n model_name='foodrequest',\n name='requester',\n field=models.ForeignKey(to='foodoffers.User'),\n ),\n migrations.AddField(\n model_name='foodoffer',\n name='user',\n field=models.ForeignKey(to='foodoffers.User'),\n ),\n ]\n","sub_path":"foober/foodoffers/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"500656176","text":"\"\"\"\nConfiguration file\n\"\"\"\nimport numpy as np\n\n# General config\n_VERBOSE_LEVEL = 1\n\n# Scripts config\n# 8 speeds between (0.8, 1.2); remove the speed with value 1\n_SPEEDS = np.delete(np.linspace(0.8, 1.2, 9), 4)\n\n# 8 semitones between (-200, 200); remove the semitone with value 0\n_SEMITONES = np.delete(np.linspace(-200, 200, 9), 4)\n\n_NOISES = ['preprocessing/noises/ambiance.wav',\n 'preprocessing/noises/crowd.wav',\n 'preprocessing/noises/street.wav',\n 'preprocessing/noises/driving.wav']\n\n\n# Core config\n_EARLY_STOP_RANGE = None\n_DATA_CSV = None\n\n\nclass Config:\n def __init__(self):\n # General config\n self.verbose_level = _VERBOSE_LEVEL\n # Core config\n self.early_stop_range = _EARLY_STOP_RANGE\n self.data_csv = _DATA_CSV\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"384774286","text":"import os\nimport random\nimport operator\nfrom treat_data.read_file import ReadFiles\nfrom treat_data.write_infiles import WriteInFiles\n\n__author__ = 'Arthur Fortes'\n\n\nclass CrossFoldValidation(object):\n def __init__(self, database, dir_folds, n_folds=10):\n self.database = database\n self.dir_folds = dir_folds\n self.n_folds = n_folds\n self.data = ReadFiles(self.database)\n self.separate_data = list()\n\n self.read_interactions()\n self.create_data_folds()\n self.write_data()\n\n def read_interactions(self):\n self.data.read_without_dict()\n\n def create_data_folds(self):\n random.shuffle(self.data.list_interaction)\n percent = int(float(self.data.num_interactions)/float(self.n_folds))\n\n last = -1\n for n in xrange(self.n_folds):\n initial = 1 + last\n final = (n + 1) * percent\n if n < (self.n_folds - 1):\n self.separate_data.append(self.data.list_interaction[initial:final])\n else:\n self.separate_data.append(self.data.list_interaction[initial:])\n last = final\n\n def write_data(self):\n self.dir_folds += 'folds//'\n\n if not os.path.exists(self.dir_folds):\n os.mkdir(self.dir_folds)\n\n select_fold = list()\n for n in xrange(self.n_folds):\n\n fold_dir = self.dir_folds + str(n) + \"//\"\n if not os.path.exists(fold_dir):\n os.mkdir(fold_dir)\n\n test_fold = n\n\n if test_fold not in select_fold:\n select_fold.append(test_fold)\n final_train_data = list()\n\n for f in xrange(self.n_folds):\n if f != test_fold:\n final_train_data += self.separate_data[f]\n\n final_train_data = sorted(final_train_data, key=operator.itemgetter(0, 1))\n final_test_data = sorted(self.separate_data[n], key=operator.itemgetter(0, 1))\n\n file_write_train = fold_dir + \"train.dat\"\n file_write_test = fold_dir + \"test.dat\"\n WriteInFiles(final_train_data, file_write_train).write_train_and_test(final_test_data, file_write_test)\n","sub_path":"split_base/n_cross_fold_validation.py","file_name":"n_cross_fold_validation.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"158869622","text":"import datetime\nfrom core.database import session, TeamInvite\nfrom core.errors import ValidationError\nfrom . import teams, users\nfrom sqlalchemy.sql.expression import and_\nfrom itsdangerous import URLSafeSerializer\nfrom config import Config\n\n\nconfig = Config()\n\n\ndef get(team_slug, username):\n return TeamInvite.query.filter(and_(TeamInvite.team_slug == team_slug, TeamInvite.username == username)).first()\n\n\ndef has_accepted_invite(team_slug, username):\n return TeamInvite.query.filter(and_(TeamInvite.team_slug == team_slug, TeamInvite.username == username,\n TeamInvite.accepted == True)).first() is not None\n\n\ndef create(team_slug, username):\n print(\"{}:team\".format(team_slug))\n\n if not users.get(username):\n raise ValidationError(\"That Github user is currently not a part of Source League\")\n\n if get(team_slug, username):\n raise ValidationError(\"Invitation already sent\")\n\n invite = TeamInvite()\n invite.team_slug = team_slug\n invite.username = username\n invite.created = datetime.datetime.utcnow()\n invite.updated = datetime.datetime.utcnow()\n\n session.add(invite)\n session.commit()\n\n return get(team_slug, username)\n\n\ndef accept(team_slug, username):\n invite = get(team_slug, username)\n\n if invite:\n invite.accepted = True\n invite.responded_at = datetime.datetime.utcnow()\n invite.updated = datetime.datetime.utcnow()\n\n session.merge(invite)\n session.commit()\n\n teams.add_to_team(team_slug, username)\n\n\ndef decline(team_slug, username):\n invite = get(team_slug, username)\n\n if invite:\n invite.accepted = False\n invite.responded_at = datetime.datetime.utcnow()\n invite.updated = datetime.datetime.utcnow()\n\n session.merge(invite)\n session.commit()\n\n teams.add_to_team(team_slug, username)\n\n\ndef delete_all_for_team(slug):\n team = teams.get(slug)\n\n for invite in team.invites:\n session.delete(invite)\n\n session.commit()\n\n\ndef serialize(team_invite):\n serializer = URLSafeSerializer(config.app_secret())\n return serializer.dumps([team_invite.team_slug, team_invite.username])\n\n\ndef deserialize(serialized):\n serializer = URLSafeSerializer(config.app_secret())\n response = serializer.loads(serialized)\n return get(response[0],response[1])\n","sub_path":"core/ops/team_invites.py","file_name":"team_invites.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"520396497","text":"from utils import get_page\r\nfrom lxml import etree\r\nfrom db import RedisClient\r\nimport random\r\nimport time\r\nclass Crawl_ip(object):\r\n def __init__(self):\r\n self.db = RedisClient()\r\n def ip_xici(self):\r\n url = 'http://www.xicidaili.com/'\r\n con = get_page(url)\r\n html = etree.HTML(con)\r\n ip_list = html.xpath('//tr/td[2]/text()')\r\n ip_port = html.xpath('//tr/td[3]/text()')\r\n for i in range(100):\r\n ip = ip_list[i] + ':' + ip_port[i]\r\n self.db.add(ip)\r\n def ip_66(self):\r\n preurl = 'http://www.66ip.cn/'\r\n for i in range(100):\r\n url = preurl+str(i)+'.html'\r\n con = get_page(url)\r\n if con:\r\n html = etree.HTML(con)\r\n ip_list = html.xpath('//tr')\r\n for i in range(2,len(ip_list)):\r\n ip = ip_list[i].xpath('td[1]/text()')[0]+\":\"+ip_list[i].xpath('td[2]/text()')[0]\r\n self.db.add(ip,10)\r\n intr = random.randint(5,15)\r\n time.sleep(intr*0.1)\r\n def run(self):\r\n self.ip_66()\r\n self.ip_xici()\r\n\r\nif __name__ == '__main__':\r\n crawl = Crawl_ip()\r\n crawl.run()\r\n\r\n\r\n\r\n","sub_path":"crawl_ip.py","file_name":"crawl_ip.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"259695835","text":"from mock import Mock\nimport fluenttest\n\n\nclass TornadoHandlerTestCase(fluenttest.TestCase):\n\n @classmethod\n def arrange(cls):\n super(TornadoHandlerTestCase, cls).arrange()\n cls.application = Mock()\n cls.application.ui_methods = {}\n\n cls.request = Mock()\n cls.request.headers = {}\n","sub_path":"tests/unit/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"229456561","text":"from conans import ConanFile, CMake, tools\nimport os\nfrom conans.errors import ConanInvalidConfiguration\nimport textwrap\n\nclass FastDDSConan(ConanFile):\n\n name = \"fast-dds\"\n license = \"Apache-2.0\"\n homepage = \"https://fast-dds.docs.eprosima.com/\"\n url = \"https://github.com/conan-io/conan-center-index\"\n description = \"The most complete OSS DDS implementation for embedded systems.\"\n topics = (\"DDS\", \"Middleware\", \"IPC\")\n settings = \"os\", \"compiler\", \"build_type\", \"arch\"\n options = {\n \"shared\": [True, False],\n \"fPIC\": [True, False],\n \"with_ssl\": [True, False]\n }\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"with_ssl\": False\n }\n generators = \"cmake\", \"cmake_find_package\"\n _cmake = None\n exports_sources = [\"patches/**\", \"CMakeLists.txt\"]\n\n @property\n def _pkg_share(self):\n return os.path.join(\n self.package_folder,\n \"share\"\n )\n\n @property\n def _pkg_tools(self):\n return os.path.join(\n self.package_folder,\n \"tools\"\n )\n\n @property\n def _pkg_bin(self):\n return os.path.join(\n self.package_folder,\n \"bin\"\n )\n\n @property\n def _module_subfolder(self):\n return os.path.join(\n \"lib\",\n \"cmake\"\n )\n\n @property\n def _module_file_rel_path(self):\n return os.path.join(\n self._module_subfolder,\n \"conan-target-properties.cmake\"\n )\n \n @property\n def _minimum_cpp_standard(self):\n return 11\n\n @property\n def _minimum_compilers_version(self):\n return {\n \"Visual Studio\": \"16\",\n \"gcc\": \"5\",\n \"clang\": \"3.9\",\n \"apple-clang\": \"8\",\n }\n\n @staticmethod\n def _create_cmake_module_alias_targets(module_file, targets):\n content = \"\"\n for alias, aliased in targets.items():\n content += textwrap.dedent(\"\"\"\\\n if(TARGET {aliased} AND NOT TARGET {alias})\n add_library({alias} INTERFACE IMPORTED)\n set_property(TARGET {alias} PROPERTY INTERFACE_LINK_LIBRARIES {aliased})\n endif()\n \"\"\".format(alias=alias, aliased=aliased))\n tools.save(module_file, content)\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def _patch_sources(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n\n def configure(self):\n if self.options.shared:\n del self.options.fPIC\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def _configure_cmake(self):\n if not self._cmake:\n self._cmake = CMake(self) \n self._cmake.definitions[\"BUILD_MEMORY_TOOLS\"] = False\n self._cmake.definitions[\"NO_TLS\"] = not self.options.with_ssl\n self._cmake.definitions[\"SECURITY\"] = self.options.with_ssl\n self._cmake.definitions[\"EPROSIMA_INSTALLER_MINION\"] = False\n self._cmake.configure()\n return self._cmake\n\n def requirements(self):\n self.requires(\"tinyxml2/7.1.0\")\n self.requires(\"asio/1.18.2\")\n self.requires(\"fast-cdr/1.0.21\")\n self.requires(\"foonathan-memory/0.7.0\")\n self.requires(\"boost/1.73.0\")\n if self.options.with_ssl:\n self.requires(\"openssl/1.1.1k\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version], strip_root=True,\n destination=self._source_subfolder)\n\n def validate(self):\n if self.settings.compiler.get_safe(\"cppstd\"):\n tools.check_min_cppstd(self, self._minimum_cpp_standard)\n min_version = self._minimum_compilers_version.get(str(self.settings.compiler))\n if not min_version:\n self.output.warn(\"{} recipe lacks information about the {} compiler support.\".format(\n self.name, self.settings.compiler))\n else:\n if tools.Version(self.settings.compiler.version) < min_version:\n raise ConanInvalidConfiguration(\"{} requires C++{} support. The current compiler {} {} does not support it.\".format(\n self.name, self._minimum_cpp_standard, self.settings.compiler, self.settings.compiler.version))\n if self.settings.os == \"Windows\":\n if (\"MT\" in self.settings.compiler.runtime and self.options.shared):\n # This combination leads to an fast-dds error when linking\n # linking dynamic '*.dll' and static MT runtime\n raise ConanInvalidConfiguration(\"Mixing a dll {} library with a static runtime is a bad idea\".format(self.name))\n\n\n def build(self):\n self._patch_sources()\n cmake = self._configure_cmake()\n cmake.build()\n\n def package(self):\n cmake = self._configure_cmake()\n cmake.install()\n tools.rmdir(self._pkg_share)\n self.copy(\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n tools.rename(\n src=self._pkg_tools,\n dst=os.path.join(self._pkg_bin, \"tools\")\n )\n tools.remove_files_by_mask(\n directory=os.path.join(self.package_folder, \"lib\"),\n pattern=\"*.pdb\"\n )\n tools.remove_files_by_mask(\n directory=os.path.join(self.package_folder, \"bin\"),\n pattern=\"*.pdb\"\n )\n self._create_cmake_module_alias_targets(\n os.path.join(self.package_folder, self._module_file_rel_path),\n {\"fastrtps\": \"fastdds::fastrtps\"}\n )\n\n def package_info(self):\n self.cpp_info.names[\"cmake_find_package\"] = \"fastdds\"\n self.cpp_info.names[\"cmake_find_multi_package\"] = \"fastdds\"\n # component fastrtps\n self.cpp_info.components[\"fastrtps\"].names[\"cmake_find_package\"] = \"fastrtps\"\n self.cpp_info.components[\"fastrtps\"].names[\"cmake_find_multi_package\"] = \"fastrtps\"\n self.cpp_info.components[\"fastrtps\"].libs = tools.collect_libs(self)\n self.cpp_info.components[\"fastrtps\"].requires = [\n \"fast-cdr::fast-cdr\",\n \"asio::asio\",\n \"tinyxml2::tinyxml2\",\n \"foonathan-memory::foonathan-memory\",\n \"boost::boost\"\n ]\n if self.settings.os in [\"Linux\", \"Macos\", \"Neutrino\"]:\n self.cpp_info.components[\"fastrtps\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"fastrtps\"].system_libs.extend([\"rt\", \"dl\", \"atomic\"])\n elif self.settings.os == \"Windows\":\n self.cpp_info.components[\"fastrtps\"].system_libs.extend([\"iphlpapi\",\"shlwapi\"])\n if self.options.shared:\n self.cpp_info.components[\"fastrtps\"].defines.append(\"FASTRTPS_DYN_LINK\")\n if self.options.with_ssl:\n self.cpp_info.components[\"fastrtps\"].requires.append(\"openssl::openssl\")\n self.cpp_info.components[\"fastrtps\"].builddirs.append(self._module_subfolder)\n self.cpp_info.components[\"fastrtps\"].build_modules[\"cmake_find_package\"] = [self._module_file_rel_path]\n self.cpp_info.components[\"fastrtps\"].build_modules[\"cmake_find_package_multi\"] = [self._module_file_rel_path]\n # component fast-discovery\n self.cpp_info.components[\"fast-discovery-server\"].names[\"cmake_find_package\"] = \"fast-discovery-server\"\n self.cpp_info.components[\"fast-discovery-server\"].names[\"cmake_find_multi_package\"] = \"fast-discovery-server\"\n self.cpp_info.components[\"fast-discovery-server\"].bindirs = [\"bin\"]\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH env var for fast-dds::fast-discovery-server with : {}\".format(bin_path)),\n self.env_info.PATH.append(bin_path)\n # component tools\n self.cpp_info.components[\"tools\"].names[\"cmake_find_package\"] = \"tools\"\n self.cpp_info.components[\"tools\"].names[\"cmake_find_multi_package\"] = \"tools\"\n self.cpp_info.components[\"tools\"].bindirs = [os.path.join(\"bin\",\"tools\")]\n bin_path = os.path.join(self._pkg_bin, \"tools\")\n self.output.info(\"Appending PATH env var for fast-dds::tools with : {}\".format(bin_path)),\n self.env_info.PATH.append(bin_path)\n","sub_path":"recipes/fast-dds/all/conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"311475375","text":"\"\"\"\nBase model for multilingual models.\n\"\"\"\nfrom new import classobj\n\nfrom django.db import models\nfrom django.db.models.base import ModelBase\n\nfrom multilingual.languages import get_all\n\nfrom .fields import TranslationProxyField, TranslationRelation, TRANSLATION_FIELD_NAME\nfrom .manager import MultilingualManager\nfrom .options import MultilingualOptions\nfrom .translation import TranslationModelBase, TranslationModel\n\n# TODO: inheritance of multilingual models and translation models\n\n\nclass MultilingualModelBase(ModelBase):\n def __new__(cls, name, bases, attrs):\n ### START - Build translation model\n # At first we build translation model so we can add it to attrs\n # Purpose is to not call 'add_to_class' after model is registered\n\n # We have to copy attributes because they change during creation of a model\n trans_attrs = attrs.copy()\n\n # Make a copy of Meta, so changes in it when creating a translation model does not affect\n # creation of multilingual model\n if attrs.has_key('Meta'):\n trans_attrs['Meta'] = classobj.__new__(classobj, 'Meta', (attrs['Meta'],), attrs['Meta'].__dict__.copy())\n\n translation_name = name + \"Translation\"\n trans_attrs['multilingual_model_name'] = name\n c_trans_model = TranslationModelBase(translation_name, (TranslationModel, ), trans_attrs)\n ### END - Build translation model\n\n ### And some changes before we build multilingual model\n meta = attrs.get('Meta', None)\n abstract = getattr(meta, 'abstract', False)\n\n # Add translation model to attrs\n attrs['translation_model'] = c_trans_model\n\n if not abstract:\n # Add translation relations\n for language_code in [None] + get_all():\n field = TranslationRelation(c_trans_model, base_name=TRANSLATION_FIELD_NAME,\n language_code=language_code)\n attrs[field.name] = field\n\n # Add proxies for translated fields into attrs\n for field in (c_trans_model._meta.fields + c_trans_model._meta.many_to_many):\n if field.name in ('id', 'language_code', 'master'):\n continue\n for language_code in get_all():\n proxy = TranslationProxyField(field.name, language_code)\n attrs[proxy.name] = proxy\n proxy = TranslationProxyField(field.name, None)\n attrs[proxy.name] = proxy\n proxy = TranslationProxyField(field.name, None, fallback=True)\n attrs[proxy.name] = proxy\n\n # Handle manager\n if not 'objects' in attrs:\n # If there is no manager, set MultilingualManager as manager\n attrs['objects'] = MultilingualManager()\n elif not isinstance(attrs['objects'], MultilingualManager):\n # Make sure that if the class specifies objects then it is a subclass of our Manager.\n\n # Don't check other managers since someone might want to have a non-multilingual manager, but assigning\n # a non-multilingual manager to objects would be a common mistake.\n raise ValueError(\"Model %s specifies translations, so its 'objects' manager must be a subclass of \"\\\n \"multilingual.Manager.\" % name)\n\n # And now just create multilingual model\n return super(MultilingualModelBase, cls).__new__(cls, name, bases, attrs)\n\n def add_to_class(cls, name, value):\n # Catch meta and change its class, it is HACK, but it is the least ugly one\n if name == '_meta':\n value = MultilingualOptions(value.meta, value.app_label)\n super(MultilingualModelBase, cls).add_to_class(name, value)\n\n\nclass MultilingualModel(models.Model):\n __metaclass__ = MultilingualModelBase\n\n class Meta:\n abstract = True\n\n def save(self, force_insert=False, force_update=False, using=None):\n \"\"\"\n Change save method to save translations when multilingual object is saved.\n \"\"\"\n super(MultilingualModel, self).save(force_insert=force_insert, force_update=force_update, using=using)\n for field in self._meta.fields:\n if not isinstance(field, TranslationRelation):\n continue\n\n # Find translation. Use cache name to prevent any unnecessary SQL queries.\n # If it isn't loaded, it isn't changed.\n attr_name = field.get_cache_name()\n translation = getattr(self, attr_name, None)\n\n if translation is None:\n # Translation does not exist, continue with next\n continue\n\n # Set the master ID. The master and translation could be just created.\n translation.master_id = self.pk\n translation.save()\n","sub_path":"multilingual/models/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"588475436","text":"import sys\n\nsys.stdin = open('input.txt', 'r')\n\n# 모든 쌍의 최단 경로를 찾고 그 합을 구하자.\nT = int(input())\n\nfor tc in range(1, T+1):\n inputs = list(map(int, input().split()))\n N = inputs[0]\n lines = inputs[1:]\n networks = [lines[i*N:i*N+N] for i in range(N)]\n\n dp = [[1001]*(N+1) for _ in range(N+1)]\n\n for i in range(N):\n for j in range(N):\n if networks[i][j] == 1:\n dp[i+1][j+1] = 1\n\n for k in range(1, N+1):\n for i in range(1, N+1):\n if k != i:\n for j in range(1, N+1):\n if j != i and j != k:\n dp[i][j] = min(dp[i][j], dp[i][k]+dp[k][j])\n minV = 1000*N\n for i in range(1, N+1):\n total = 0\n for j in range(1, N+1):\n value = dp[i][j]\n if 0 < value < 1001:\n total += value\n if total < minV:\n minV = total\n print(\"#{} {}\".format(tc, minV))\n","sub_path":"PYTHON/SWEXPERT/1263_사람네트워크2/1263_플로이드와샬.py","file_name":"1263_플로이드와샬.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"137551351","text":"# Given an integer, write a function to determine if it is a power of two.\n\n# Example 1:\n\n# Input: 1\n# Output: true \n# Explanation: 20 = 1\n# Example 2:\n\n# Input: 16\n# Output: true\n# Explanation: 24 = 16\n# Example 3:\n\n# Input: 218\n# Output: false\n\n\nclass solution:\n def isPowerOfTwo(self, n: int) -> bool:\n # for i in range(0,16):\n # if n==(1<1:\n # if n%2==1:\n # return False\n # else:\n # n /= 2\n # return True\n\n\n if n<1: return False\n if n==1: return True\n while n>1:\n if n%2==1: return False\n n//=2\n return True\nprint(solution.isPowerOfTwo(solution,2))\n","sub_path":"231_power_of_two.py","file_name":"231_power_of_two.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"162570805","text":"import pickle\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nimport urllib.request\r\nimport json\r\nimport re\r\n\r\nfrom datetime import datetime\r\nfrom FileReadWrite import FileReadWrite\r\n\r\nhistorical_records_pkl = FileReadWrite('historical_records.pkl')\r\nuser_project_matrix_csv = FileReadWrite('user_project_matrix.csv')\r\npage = 0\r\nrecord = 0\r\n\r\nif historical_records_pkl.exists() and user_project_matrix_csv.exists():\r\n historical_records = historical_records_pkl.get_data_pkl()\r\n user_project_matrix = user_project_matrix_csv.get_data_csv()\r\nelse:\r\n historical_records = pd.DataFrame(columns = ['profile', 'project', 'type', 'whens', 'origin', 'repetitions'])\r\n user_project_matrix = pd.DataFrame(columns=['user'])\r\n\r\npage = int(historical_records.shape[0]/500)\r\nrecord = int(historical_records.shape[0]%500)\r\n\r\ndef update_records():\r\n global page\r\n global record\r\n print('filter current page')\r\n cleaned = __filter_current_page(page, record)\r\n if len(cleaned['activity']) != 0:\r\n __update_data(cleaned)\r\n\r\n while True:\r\n page = int(page) + 1\r\n print('load page', page)\r\n# for testing\r\n # if page > 6:\r\n # break\r\n\r\n page = str(page)\r\n url = 'https://scistarter.org/api/stream-page?page='+page+'&key=5255cf33e739e9ecc20b9b260cb68567fbc81f6b1bfb4808ba2c39548501f0a1523e2e97d79563645cba40a09894bfdb277779d1145a596f237ebdc166afcf50'\r\n content = __getPage(url)\r\n if len(content['activity']) == 0:\r\n break\r\n else:\r\n __update_data(content)\r\n\r\n historical_records_pkl.put_data_pkl(historical_records)\r\n user_project_matrix_csv.put_data_csv(user_project_matrix)\r\n __put_update_time()\r\n\r\n\r\ndef __getPage(url):\r\n page = urllib.request.urlopen(url)\r\n content = page.read().decode(\"utf8\")\r\n page.close()\r\n JSON_full = {'activity': []}\r\n for entry in content.splitlines():\r\n# if '\"project\": null' in entry:\r\n# continue\r\n cleaned_text = __extractattribute(entry)\r\n JSON_single = __JSONconverter(cleaned_text)\r\n JSON_full['activity'].append(JSON_single)\r\n\r\n # return json.dumps(JSON_full)\r\n return JSON_full\r\n\r\ndef __filter_current_page(page, record):\r\n page = str(page)\r\n cleaned_data = {'activity': []}\r\n url = 'https://scistarter.org/api/stream-page?page='+page+'&key=5255cf33e739e9ecc20b9b260cb68567fbc81f6b1bfb4808ba2c39548501f0a1523e2e97d79563645cba40a09894bfdb277779d1145a596f237ebdc166afcf50'\r\n content = __getPage(url)\r\n cleaned = content['activity'][record:]\r\n cleaned_data['activity'] = cleaned\r\n return cleaned_data\r\n\r\ndef __JSONconverter (cleaned_text):\r\n ### input: cleaned attribute string: ['\"profile\": \"c3174748ab29f73d8c6226d0c2171aeb\"', '\"when\": \"2016-07-22 14:07:43\"', '\"project\": 25']\r\n ### output: JSON for one entry\r\n data = {}\r\n for a in cleaned_text:\r\n if ('\"profile\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n user = a[index[-2]+1:index[-1]]\r\n data['user'] = user\r\n elif ('\"project\"') in a:\r\n index = [m.start() for m in re.finditer(':', a)]\r\n project = a[index[-1]+2:]\r\n data['project'] = int(project)\r\n elif ('\"when\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n time = a[index[-2]+1:index[-1]]\r\n data['when'] = time\r\n elif ('\"type\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n mtype = a[index[-2]+1:index[-1]]\r\n data['type'] = mtype\r\n elif ('\"origin\"') in a:\r\n index = [m.start() for m in re.finditer('\"', a)]\r\n origin = a[index[-2]+1:index[-1]]\r\n data['origin'] = origin\r\n elif ('\"repetitions\"') in a:\r\n index = [m.start() for m in re.finditer(':', a)]\r\n rep = a[index[-1]+2:]\r\n data['repetitions'] = int(rep)\r\n else:\r\n print(\"0\")\r\n # json_data = json.dumps(data)\r\n\r\n return data\r\n\r\ndef __extractattribute (entry):\r\n ### input: single entry, such as \"'{\"origin\": \"Unspecified\", \"profile\": \"c3174748ab29f73d8c6226d0c2171aeb\", \"extra\": \"\", \"repetitions\": 1, \"profile_utm_campaign\": \"\", \"profile_referrer\": \"\", \"duration\": 0.0, \"profile_utm_term\": \"\", \"authenticated\": true, \"profile_origin\": \"\", \"where\": null, \"when\": \"2016-07-22 14:07:43\", \"profile_utm_medium\": \"\", \"project\": 25, \"magnitude\": 1, \"profile_utm_source\": \"\", \"profile_utm_content\": \"\", \"type\": \"Participated\"}'\"\r\n ### output: user, time, project, such as ['\"profile\": \"c3174748ab29f73d8c6226d0c2171aeb\"', '\"when\": \"2016-07-22 14:07:43\"', '\"project\": 25']\r\n attribute_list = entry.split(\", \")\r\n cleaned_text = list(filter (lambda a: ('\"profile\"' in a or '\"when\"' in a or '\"project\"'in a or '\"type\"' in a or '\"origin\"' in a or '\"repetitions\"' in a), attribute_list))\r\n return cleaned_text\r\n\r\ndef __update_data(cleaned):\r\n for entry in cleaned['activity']:\r\n user = entry['user']\r\n project = entry['project']\r\n when = entry['when']\r\n origin = entry['origin']\r\n mtype = entry['type']\r\n repetitions = entry['repetitions']\r\n\r\n # Update user project matrix\r\n if not (user in list(user_project_matrix['user'])):\r\n new_row_number = user_project_matrix.shape[0]\r\n user_project_matrix.loc[new_row_number] = [user] + list(np.zeros(user_project_matrix.shape[1]-1,dtype=int))\r\n if not (str(project) in list(user_project_matrix)):\r\n user_project_matrix[str(project)] = 0\r\n # old_value = data[data['user']==user][str(project)]\r\n user_project_matrix.loc[user_project_matrix['user']==user, str(project)] = 1\r\n\r\n # Update historical records\r\n historical_row = historical_records.shape[0]\r\n historical_records.loc[historical_row] = [user, project, mtype, when, origin, repetitions]\r\n\r\ndef __put_update_time():\r\n if(os.path.isfile('update_times.txt')):\r\n file = open('update_times.txt', 'a+')\r\n file.write('\\n'+datetime.now().strftime(\"%Y-%m-%d, %H:%M:%S\"))\r\n file.close()\r\n else:\r\n file = open('update_times.txt', 'w+')\r\n file.write(datetime.now().strftime(\"%Y-%m-%d, %H:%M:%S\"))\r\n file.close()\r\n\r\nif __name__ == '__main__':\r\n update_records()\r\n","sub_path":"Updater.py","file_name":"Updater.py","file_ext":"py","file_size_in_byte":6354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"609191017","text":"\"\"\"\nPython Flight Mechanics Engine (PyFME).\nCopyright (c) AeroPython Development Team.\nDistributed under the terms of the MIT License.\n\"\"\"\n\nfrom pyfme.environment.environment import Environment\nfrom pyfme.aircrafts import Component, Controller\nfrom pyfme.aircrafts.components import Aircraft\nimport numpy as np\n\n\nclass Propeller(Component):\n \"\"\"A propeller. The propeller is generating a thrust depending on the value\n of the internal controller, which is automatically created. Such controller\n may take values between 0 (minimum thrust) and 1 (maximum thrust)\n \"\"\"\n def __init__(self, r, omega, J, Ct,\n vec=np.asarray([1, 0, 0]),\n controller_name='delta_t',\n cog=np.zeros(3, dtype=np.float64),\n mass=0.0,\n inertia=np.zeros((3, 3), dtype=np.float64),\n Sw=0.0,\n parent=None):\n \"\"\"Create a new propeller\n\n Parameters\n ----------\n r : float\n Propeller radius (m)\n omega : array_like\n List of considered propeller angular velocities (RPM). The current\n rpm are linearly interpolated using the controller value, which can\n take values between 0 and 1\n J : array_like\n Advance ratio considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``Ct``\n Ct : array_like\n Thrust coeff. considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``J``\n vec : array_like\n Thrust direction vector\n controller_name : string\n Name of the associated controller to be automatically generated\n cog : array_like\n Local x, y, z coordinates -i.e. referered to the considered center\n of the aircraft- of the center of gravity (m, m, m)\n mass : float\n Mass of the component (kg)\n inertia : array_like\n 3x3 tensor of inertia of the component (kg * m2) for the upright\n aircraft.\n Current equations assume that the global aircraft has a symmetry\n plane (x_b - z_b), thus J_xy and J_yz must be null\n Sw : float\n Wetted surface (m2)\n parent : Component\n Parent component which owns the current component.\n \"\"\"\n super().__init__(cog, mass, inertia, Sw, parent=parent)\n\n # Velocities\n self.__r = r\n self.__delta_t = np.linspace(0, 1, num=len(omega))\n self.__omega = np.asarray(omega)\n self.__J = J\n self.__Ct = Ct\n self.__vec = vec\n self.controller = Controller(controller_name, 0.0, 1.0)\n\n @property\n def r(self):\n \"\"\"Propeller radius (m)\n\n Returns\n -------\n r : float\n Propeller radius (m)\n \"\"\"\n return self.__r\n\n @r.setter\n def r(self, r):\n \"\"\"Set the propeller radius (m)\n\n Parameters\n ----------\n r : float\n Propeller radius (m)\n \"\"\"\n self.__r = r\n\n @property\n def omega(self):\n \"\"\"List of considered propeller angular velocities (RPM)\n\n Returns\n -------\n omega : array_like\n List of considered propeller angular velocities (RPM). The current\n rpm are linearly interpolated using the controller value, which can\n take values between 0 and 1\n \"\"\"\n return self.__omega\n\n @omega.setter\n def omega(self, omega):\n \"\"\"Set the list of considered propeller angular velocities (RPM)\n\n Parameters\n ----------\n omega : array_like\n List of considered propeller angular velocities (RPM). The current\n rpm are linearly interpolated using the controller value, which can\n take values between 0 and 1\n \"\"\"\n self.__delta_t = np.linspace(0, 1, num=len(omega))\n self.__omega = np.asarray(omega)\n\n @property\n def J(self):\n \"\"\"Advance ratio considered values\n\n Returns\n -------\n J : array_like\n Advance ratio considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``Ct``\n \"\"\"\n return self.__omega\n\n @J.setter\n def J(self, J):\n \"\"\"Set the advance ratio considered values\n\n Parameters\n ----------\n J : array_like\n Advance ratio considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``Ct``\n \"\"\"\n self.__J = J\n\n @property\n def Ct(self):\n \"\"\"Thrust coeff. considered values\n\n Returns\n -------\n Ct : array_like\n Thrust coeff. considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``J``\n \"\"\"\n return self.__Ct\n\n @Ct.setter\n def Ct(self, Ct):\n \"\"\"Set the thrust coeff. considered values\n\n Parameters\n ----------\n Ct : array_like\n Thrust coeff. considered values. The propeller thrust value will be\n computed getting first the current advance ratio, interpolating\n later the trhust coefficient using this array and ``J``\n \"\"\"\n self.__Ct = Ct\n\n @property\n def vec(self):\n \"\"\"Thrust direction vector\n\n Returns\n -------\n vec : array_like\n Thrust direction vector\n \"\"\"\n return self.__vec\n\n @vec.setter\n def vec(self, Ct):\n \"\"\"Set the thrust direction vector\n\n Parameters\n ----------\n vec : array_like\n Thrust direction vector\n \"\"\"\n self.__vec = vec\n\n def calculate_forces_and_moments(self):\n \"\"\"Compute the forces and moments of the global aircraft collecting all\n the subcomponents\n\n Returns\n -------\n f : array_like\n Drag, lateral and Lift forces (N)\n m : array_like\n Roll, pitch and yaw moments (N * m)\n \"\"\"\n f, m = super().calculate_forces_and_moments()\n\n aircraft = self.top_node()\n assert isinstance(aircraft, Aircraft)\n if aircraft.environment is None:\n raise Warning('aircraft.environment is None')\n return f, m\n\n # Get the airspeed (just in case we have an available aircraft)\n V = np.zeros(3, dtype=np.float64)\n # FIXME: Vectorial velocities should be considered to can model\n # other aircraft types, like helicopters\n V[0] = aircraft.TAS\n V = np.dot(V, self.__vec)\n\n delta_t = self.controller.value\n rho = aircraft.environment.rho\n omega = np.interp(delta_t, self.__delta_t, self.__omega) # rpm\n omega_RAD = (omega * 2.0 * np.pi) / 60.0 # rad/s\n\n J = (np.pi * V) / (omega_RAD * self.__r)\n Ct = np.interp(J, self.__J, self.__Ct)\n T = (2.0 / np.pi)**2 * rho * (omega_RAD * self.__r)**2 * Ct # N\n\n ff = T * self.__vec\n r = self.cog(use_subcomponents=False) - self.cog()\n mm = np.cross(r, ff)\n\n return f + ff, m + mm\n","sub_path":"src/pyfme/aircrafts/components/propeller.py","file_name":"propeller.py","file_ext":"py","file_size_in_byte":7636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"174835144","text":"from kafka import KafkaProducer\nimport time\nimport requests\nimport json\n\n\nkafka_bootstrap_servers = 'localhost:9092'\nkafka_topic_name = 'sample_topic'\n\nproducer = KafkaProducer(bootstrap_servers = kafka_bootstrap_servers,\nvalue_serializer = lambda v: json.dumps(v).encode('utf-8'))\n\njson_message = None\ncity_name = None\ntemperature = None\nhumidity = None\nopenweathermap_api_endpoint = None\nappid = None\n\ndef get_weather_detail(openweathermap_api_endpoint):\n print(openweathermap_api_endpoint)\n api_response = requests.get(openweathermap_api_endpoint)\n json_data = api_response.json()\n city_name = json_data['name']\n humidity = json_data['main']['humidity']\n temperature = json_data['main']['temp']\n json_message = {'CityName': city_name,\n \"temperature\": temperature,\n \"Humidity\": humidity,\n \"CreationTime\": time.strftime(\"%y-%m-%d %H:%M:%S\")}\n return json_message\n\n\ndef get_apikey():\n with open('weatherapikey.json') as f:\n return json.load(f)['weatherdetail']\n\n# a = requests.get(\"http://api.openweathermap.org/data/2.5/weather?q=Chennai&appid=98a1502877eff8b08da85801cf53cdc5\")\n# b = a.json()\n# print(b['name'])\n\n\nwhile True:\n city_name = 'Hyderabad'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n city_name = 'Chennai'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n city_name = 'Mumbai'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n\n city_name = 'Bangalore'\n api_key = get_apikey()\n openweathermap_api_endpoint = \"http://api.openweathermap.org/data/2.5/weather?q=\" + city_name + \"&appid=\" + api_key\n json_message = get_weather_detail(openweathermap_api_endpoint)\n producer.send(kafka_topic_name, json_message)\n print(json_message)\n time.sleep(2)\n\n\n\n\n","sub_path":"WeatherDataToTopic.py","file_name":"WeatherDataToTopic.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"64256799","text":"#!/usr/bin/python3\r\n\r\n######################################################################\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\r\n# Python Program to row objects\t\t\t\t \t\t\t\t # \r\n# Created on : 13/06/2020\t\t\t\t\t\t\t\t\t #\r\n# Author : Vikas Bansode\t\t\t\t\t\t\t\t #\r\n#\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t #\r\n######################################################################\r\n\r\n\r\n\r\nimport sqlite3\r\nimport sys\r\n\r\ndb_filename = 'todo.db'\r\n\r\nwith sqlite3.connect(db_filename) as conn:\r\n\tconn.row_factory = sqlite3.Row\r\n\r\n\tcursor = conn.cursor()\r\n\r\n\tcursor.execute(\"\"\"\r\n\t\tselect name,description,deadline from project\r\n\t\twhere name = 'singh'\r\n\t\t\"\"\")\r\n\tname,description,deadline = cursor.fetchone()\r\n\r\n\tprint(\"Project details for {} ({})\\n due {}\".format(\r\n\t\tdescription,name,deadline))\r\n\r\n\tcursor.execute(\"\"\"\"\r\n\t\tselect id, priority, status, deadline, details from task\r\n\t\twhere project = 'singh' order by deadline\r\n\t\t\"\"\"\r\n\t\t)\r\n\r\n\tprint(\"\\nNext 5 task: \")\r\n\tfor row in cursor.fetchmany(5):\r\n\t\tprint('{:2d}[{:d}] {:<25} [{:<8}] ({})'.format(\r\n\t\t\trow['id'],row['priority'],row['details'],\r\n\t\t\trow['status'],row['deadline'],))","sub_path":"16.Database_program/09.row_objects.py","file_name":"09.row_objects.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"464628611","text":"import requests\n\ndef obtain_data(url):\n response = requests.get(url)\n if response.status_code == 200:\n print(\"OK\")\n return response.json()\n\n\n# def findSameUrl\n# docks:\n# http://docs.python-requests.org/en/master/user/quickstart/#response-content\n\nurl = \"https://jsonplaceholder.typicode.com/photos\"\n\ndata = obtain_data(url)\nprint(type(data)) # \nprint(type(data[0])) # \nprint(data[0][\"url\"]) # https://via.placeholder.com/600/92c952\n\"\"\"\n{\n 'albumId': 1,\n 'id': 1,\n 'title': 'accusamus beatae ad facilis cum similique qui sunt',\n 'url': 'https://via.placeholder.com/600/92c952',\n 'thumbnailUrl': 'https://via.placeholder.com/150/92c952'\n}\n\"\"\"\n\n\n\n\n# response = requests.get(url)\n# print(response) # ]\n# print(type(response)) # \n# print(response.status_code) # 200\n\n# print(data.json())\n","sub_path":"find-same-url/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"343098363","text":"from function_Uteis import basicos\nfrom random import randint\nimport pygame\n\n\ndef estilo_musicas_function():\n estilo_musicas = {}\n lista_keys = []\n estilo = basicos.lerArquivos('../../../musicas/','estilos m','txt')\n cont = 0\n for c in estilo:\n lista_keys.append(c.rstrip(\"\\n\"))\n estilo_musicas[f'{lista_keys[cont]}'] = []\n cont += 1\n for key in estilo_musicas.keys():\n try:\n songs = basicos.lerArquivos(f'../../../musicas/{key}/', f'{key} songs', 'txt')\n lista = []\n for song in songs:\n lista.append(song.rstrip(\"\\n\"))\n estilo_musicas[f'{key}'] = lista\n except FileNotFoundError:\n estilo_musicas[key] = []\n return estilo_musicas\n\n\ndef reproduzir_musica():\n try:\n estilo_musicas = estilo_musicas_function()\n\n print('-=' * 30)\n print(f'\\033[1;36m{\"menu estilos musicais\".center(60)}\\033[m')\n print('-=' * 30)\n\n print('-' * 30)\n for key in estilo_musicas.keys():\n print(f' * {key}')\n print('-' * 30)\n\n basicos.reproduzir_voz('Aquir estão todas as playlist já criadas')\n basicos.reproduzir_voz('selecione a playlist que voçê deseja')\n nomePlay = str(input('\\033[1;37mQual playlist voçê que selecionar? \\033[m')).strip().lower()\n\n if nomePlay in estilo_musicas.keys():\n NOTMusic = ''\n basicos.reproduzir_voz(f'voçê escolheu a playlist {nomePlay}')\n lista_musicas = []\n for key, musica in estilo_musicas.items():\n if key == nomePlay:\n if len(musica) > 0:\n for i, valor in enumerate(musica):\n lista_musicas.append(valor)\n print(f'{i} -> \\033[1;37m{valor}\\033[m')\n else:\n NOTMusic = 'não á musicas'\n if NOTMusic == \"não á musicas\":\n basicos.reproduzir_voz('Não á musicas nessa playlist')\n pass\n else:\n basicos.reproduzir_voz('Escolha entre o modo aleatório ou o modo manual')\n reps = str(input('\\033[1;37mmodo aleátorio | modo manual: \\033[m')).strip().lower()\n\n if 'aleátorio' in reps:\n pygame.mixer.init()\n repitido = []\n total_musicas = 0\n while 1:\n aleatorio = randint(0, (len(lista_musicas) - 1))\n if total_musicas == len(lista_musicas):\n break\n if aleatorio not in repitido:\n musica = lista_musicas[aleatorio]\n pygame.mixer.music.load(f\"../../../musicas/{nomePlay}/{musica}\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == 1:\n continue\n repitido.append(aleatorio)\n total_musicas += 1\n else:\n pygame.mixer.init()\n lista_musicas_selecionadas = []\n max = int(input('\\033[1;37mQuantas musicas voçê quer selecionar? \\033[m'))\n for c in range(0, max):\n music = int(input(f'\\033[1;37mqual musica deseja colocar na posição {c}° ? \\033[m'))\n lista_musicas_selecionadas.append(music)\n for c in range(0, max):\n musica = lista_musicas[lista_musicas_selecionadas[c]]\n pygame.mixer.music.load(f\"../../../musicas/{nomePlay}/{musica}\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == 1:\n continue\n else:\n basicos.reproduzir_voz('Essa playlist não existe')\n print('\\033[7mPLAYLIST ENCERRADA!\\033[m')\n print('-=' * 30)\n except Exception:\n basicos.reproduzir_voz('um erro aconteceu!, voçê não pode iniciar essa função')\n print('-=' * 20)\n\n\ndef estilosADM():\n print('==' * 20)\n print(f'\\033[1;36m{\"ADM Estilos musicais\".center(40)}\\033[m')\n print('==' * 20)\n print('--' * 20)\n print('[1] => \\033[1;37m\"ver estilos cadastrados\"\\033[m ')\n print('[2] => \\033[1;37m\"adicionar um novo estilo\"\\033[m ')\n print('[3] => \\033[1;37m\"excluir um estilo\"\\033[m ')\n print('[4] => \\033[1;37m\"Sair\"\\033[m ')\n print('--' * 20)\n\n while 1:\n\n resp = int(input('\\033[1;37mQual você que selecionar?\\033[m '))\n\n if resp == 1:\n Em = estilo_musicas_function()\n print('--' * 20)\n print(f'{\"Estilos Cadastrados\".center(40)}')\n print('--' * 20)\n for pos, key in enumerate(Em.keys()):\n print(f'{pos:.<30}', end=\"\")\n print(f'\\033[1;37m{key}\\033[m')\n elif resp == 2:\n nomePag = str(input('Nome do Estilo: ')).strip().lower()\n estilo = basicos.lerArquivos('../../../musicas/', 'estilos m', 'txt')\n Nestilo = []\n for c in estilo:\n Nestilo.append(c.rstrip(\"\\n\"))\n Nestilo.append(nomePag)\n NE = ''\n for v in Nestilo:\n NE += v + \"\\n\"\n basicos.criaArquivos('../../../musicas/','estilos m','txt', f'{NE}','w')\n elif resp == 3:\n EstExcluir = int(input('Qual Estilo deseja excluir? [\"-1\" para cancelar] '))\n if EstExcluir == -1:\n pass\n else:\n estilo = basicos.lerArquivos('../../../musicas/', 'estilos m', 'txt')\n Nestilo = []\n for c in estilo:\n Nestilo.append(c.rstrip(\"\\n\"))\n Nestilo.pop(EstExcluir)\n NE = ''\n for v in Nestilo:\n NE += v + \"\\n\"\n basicos.criaArquivos('../../../musicas/', 'estilos m', 'txt', f'{NE}', 'w')\n else:\n break\n","sub_path":"assistente virtual/function_Uteis/play_musica.py","file_name":"play_musica.py","file_ext":"py","file_size_in_byte":6116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"207955098","text":"class Invoice:\r\n \r\n def __init__(self):\r\n self.items = {}\r\n \r\n def addProduct(self,qnt,price,discount):\r\n self.items[\"qnt\"] = qnt\r\n self.items[\"unit_price\"] = price\r\n self.items[\"discount\"] = discount\r\n return self.items\r\n\r\n def totalImpurePrice(self, products):\r\n total_impure_price = 0\r\n for k, v in products.items():\r\n total_impure_price += float(v[\"unit_price\"]* int(v[\"qnt\"]))\r\n total_impure_price = round(total_impure_price, 2)\r\n return total_impure_price\r\n \r\n def totalDiscount(self, products):\r\n total_discount = 0\r\n for k, v in products.items():\r\n total_discount += (int(v[\"qnt\"])*float(v[\"unit_price\"]))*float(v[\"discount\"])/100\r\n total_discount = round(total_discount,2)\r\n return total_discount\r\n \r\n def totalPurePrice(self, products):\r\n totalPurePrice = self.totalImpurePrice(products)- self.totalDiscount(products)\r\n return totalPurePrice\r\n \r\n def inputAnswer(self, input_value):\r\n while True:\r\n userInput = input(input_value)\r\n if userInput in [\"y\",\"n\"]:\r\n return userInput\r\n print(\"y or n! try again\")\r\n \r\n def inputNumber(self, input_value):\r\n while True:\r\n try:\r\n userInput = input(input_value)\r\n except ValueError:\r\n print(\"not a number! try again\")\r\n else:\r\n return userInput\r\n #added\r\n def getName(self, products,indexVal):\r\n key_list = list(products.keys())\r\n val_list = list(products.values())\r\n name = key_list[indexVal]\r\n return name","sub_path":"invoice.py","file_name":"invoice.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"345791247","text":"#!/usr/bin/env python\nimport time\nimport serial\nimport rospy\nimport thread\nfrom std_msgs.msg import String\n\n\nser = serial.Serial(\n port='/dev/ttyUSB0',\n baudrate=9600,\n parity=serial.PARITY_ODD,\n stopbits=serial.STOPBITS_TWO,\n bytesize=serial.SEVENBITS\n )\n\ndef send():\n input = 'hii'\n # Python 3 users\n # input = input(\">> \")\n rospy.loginfo(\"Txed The signal\"+input)\n pub_tx.publish(input)\n ser.write(input + '\\r\\n')\n\ndef receive():\n out = ''\n out += ser.read(10)\n time.sleep(1)\n pub_rx.publish(out)\n rospy.loginfo(\"Rcvd Mssg\",out)\n\n\nif __name__ == '__main__':\n\n # configure the serial connections (the parameters differs on the device you are connecting to)\n ser.isOpen()\n rospy.init_node('XBee_2')\n pub_tx=rospy.Publisher('XBee_2_TX', String, queue_size=10)\n pub_rx=rospy.Publisher('XBee_2_RX', String, queue_size=10)\n rate=rospy.Rate(0.1)\n thread.start_new_thread(receive,())\n while not rospy.is_shutdown():\n thread.start_new_thread(send,()) \n rate.sleep() ","sub_path":"XBee_ROS/src/XBee_2.py","file_name":"XBee_2.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"203652257","text":"#!/usr/bin/env python3.5\n# -*- coding: utf-8 -*-\n# @Time : 2017/11/14 15:45\n# @Author : HoxHou\n# @File : mysql_engine.py\n# @Software: PyCharm Community Edition\n\n\n\n\nimport re\n\nimport pymysql\n\n\nclass MySQLEngine(object):\n def __init__(self):\n \"\"\"\n MySQL 数据库ORM\n :param\n \"\"\"\n self.host = \"10.10.13.120\"\n self.user = \"root\"\n self.password = \"123456\"\n self.charset = \"utf8\"\n self.db_name = \"xhg_testautomation\"\n try:\n conn = pymysql.connect(host=self.host, user=self.user, password=self.password,\n database=self.db_name, charset=self.charset, connect_timeout=100)\n except pymysql.Error:\n raise\n self.__conn = conn\n\n def my_execute(self, execute_type, sql):\n \"\"\"\n :param execute_type:\n :param sql:\n :return:\n \"\"\"\n try:\n print(sql) # 后期保存到日志模块\n self.cursor = self.__conn.cursor()\n if execute_type is 'query':\n self.cursor.execute(sql)\n data_list = self.cursor.fetchall()\n table_fields = [each[0] for each in self.cursor.description]\n result=[]\n for row in data_list:\n obj_dict = {}\n # 字典键值对\n for index, value in enumerate(row):\n obj_dict[table_fields[index]] = value\n result.append(obj_dict)\n #print(result) # 后期保存到日志模块\n return result\n elif execute_type in ('insert', 'update', 'delete'):\n try:\n result={}\n self.cursor.execute(sql)\n if execute_type == 'insert':\n insert_id = self.__conn.insert_id()\n print('新插入的id:'+str(insert_id))\n result['insert_id'] = insert_id\n self.__conn.commit()\n print(\"受影响的行:%d\" % (self.cursor.rowcount))\n result['rowcount'] = self.cursor.rowcount\n return result\n except pymysql.Error:\n self.__conn.rollback()\n raise\n finally:\n self.__conn.close()\n","sub_path":"common/db_handler/mysql_engine.py","file_name":"mysql_engine.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"608028369","text":"# 234. Palindrome Linked List\n# Given a singly linked list, determine if it is a palindrome.\n\n# Example 1:\n\n# Input: 1->2\n# Output: false\n# Example 2:\n\n# Input: 1->2->2->1\n# Output: true\n# Follow up:\n# Could you do it in O(n) time and O(1) space?\n\n# convert to a list and then reverse the last and\n# compare with regular list\n\n\ndef isPalindrome(self, head: ListNode) -> bool:\n nums = []\n curr = head\n while curr:\n nums.append(curr.val)\n curr = curr.next\n\n return nums == nums[::-1]\n\n\n# slow fast pointers\n# move slow to one past halfway\n# reverse first half of the last on the way\n# traverse reverse first half and second half\n# checking for equality\n#\ndef isPalindrome(self, head):\n rev = None\n slow = fast = head\n while fast and fast.next:\n fast = fast.next.next\n rev, rev.next, slow = slow, rev, slow.next\n if fast:\n slow = slow.next\n while rev and rev.val == slow.val:\n slow = slow.next\n rev = rev.next\n return not rev\n","sub_path":"LinkedList/PalindromeLinkedList.py","file_name":"PalindromeLinkedList.py","file_ext":"py","file_size_in_byte":1002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"610447448","text":"# -*- coding: utf-8 -*-\n'''\nCreated on Sat Feb 10 23:22:48 2018\n\n@author: Wen\n\n'''\nimport numpy as np\nimport random\n\n# the 2048 game\n\nclass Board(object):\n def __init__(self, board_size=4):\n self.board_size = board_size\n self.actions = ['L', 'U', 'R', 'D']\n # sometimes certain action is not available\n self.available_actions = self.actions\n self.new_num = [2, 4]\n self.game_over = False\n self.indices = np.arange(board_size)\n # use 2 arrays to record the state of the board\n # 1-D array is easy to choose empty cells, 2-D array easy to check game status\n self.state = np.zeros(board_size * board_size)\n self.state_square = np.zeros((board_size, board_size))\n # randomly fill two cells in the board\n self.update()\n self.update()\n self.check_all()\n \n # check whether sliding to left is available\n def check(self):\n available = False\n # the game ends only if all actions are unavailable\n # check for sliding left\n # for other directions rotate the board and call the same function\n for i in self.indices:\n # store the original values in the row for update\n values = list(self.state_square[i])\n empty_indices = [j for j in self.indices if values[j] == 0]\n # if all cells are empty then continue to check the next row\n if len(empty_indices) == self.board_size:\n continue\n num_indices = [j for j in self.indices if values[j] > 0]\n # as long as the rightmost number is in the right of the leftmost empty cell then the action is available\n if empty_indices != []:\n if min(empty_indices) < max(num_indices):\n available = True\n break\n # otherwise only can move if adjacent cells are of the same value\n # in this case if number of numbered cells has to be greater than 1\n num_len = len(num_indices)\n if num_len < 2:\n continue\n else:\n for j in range(num_len - 1):\n if values[num_indices[j]] == values[num_indices[j+1]]:\n available = True\n break\n return available\n \n # check all directions, if there is no available moves then game over\n def check_all(self):\n self.available_actions = []\n \n action = 'L'\n if self.check():\n self.available_actions.append(action)\n \n action = 'U'\n self.state_square = np.rot90(self.state_square, 1)\n if self.check():\n self.available_actions.append(action)\n self.state_square = np.rot90(self.state_square, 3)\n \n action = 'D'\n self.state_square = np.rot90(self.state_square, 3)\n if self.check():\n self.available_actions.append(action)\n self.state_square = np.rot90(self.state_square, 1)\n \n action = 'R'\n self.state_square = np.flip(self.state_square, 1)\n if self.check():\n self.available_actions.append(action)\n self.state_square = np.flip(self.state_square, 1)\n \n if self.available_actions == []:\n self.game_over = True\n else:\n self.game_over = False\n \n # slide the board to the left\n def slide(self):\n for i in self.indices:\n # store the original values in the row for update\n # need to make a new list, otherwise it is merely a reference\n values = list(self.state_square[i])\n empty_indices = [j for j in self.indices if values[j] == 0]\n # if all cells are empty then continue to check the next row\n if len(empty_indices) == self.board_size:\n continue\n num_indices = [j for j in self.indices if values[j] > 0]\n num_len = len(num_indices)\n \n self.state_square[i] = 0\n if num_len < 2:\n self.state_square[i][0] = values[num_indices[0]]\n else:\n j = 0\n pos = 0\n while j < num_len:\n # whether reaches the last element\n if j < num_len - 1:\n if values[num_indices[j]] == values[num_indices[j+1]]:\n self.state_square[i][pos] = 2 * values[num_indices[j]]\n j += 2\n pos += 1\n continue\n \n self.state_square[i][pos] = values[num_indices[j]]\n j += 1\n pos += 1 \n \n # randomly choose an empty cell to fill it with 2 or 4 randomly\n def update(self):\n idx = random.choice([i for i in range(self.board_size * self.board_size) if self.state[i] == 0])\n new_num = random.choice(self.new_num)\n # need to update both state arrays\n self.state[idx] = self.state_square[int(np.floor(idx / self.board_size))][idx % self.board_size] = new_num\n \n # make a move using a certain action\n def move(self, action):\n # for different action, rotate the matrix correspondingly and slide to the left\n # then restore the matrix afterward\n # 'U' and 'D' corresponds to rotate 90 and 270 degrees\n # 'R' corresponds to flip the matrix\n if not action in self.available_actions:\n print(action, 'is not an available action.')\n return False\n \n print('Making a move:', action)\n if action == 'R':\n self.state_square = np.flip(self.state_square, 1)\n self.slide()\n self.state_square = np.flip(self.state_square, 1)\n else:\n rot = self.actions.index(action)\n self.state_square = np.rot90(self.state_square, rot)\n self.slide()\n self.state_square = np.rot90(self.state_square, 4 - rot)\n \n self.state = self.state_square.flat[:]\n \n return True\n \n # make a random move\n def random_move(self):\n self.move(random.choice(self.available_actions))\n \n def score(self):\n return max(self.state)\n \n def show(self):\n print(self.state_square.astype(int))\n\nif __name__ == '__main__':\n board = Board(4)\n board.show()\n steps = 0\n while not board.game_over:\n steps += 1\n board.random_move()\n board.update()\n board.show()\n board.check_all()\n if board.game_over:\n print('Game over!')\n print('Score:', board.score())\n print('Steps:', steps)\n break","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"322072821","text":"# Requires Python 3.5+\n# Requires mp3-tagger library (\"pip3 install mp3-tagger\" or download from here: https://pypi.org/project/mp3-tagger/)\n# nem kell -*- coding:Utf-8 -*-\n\nimport os\nimport glob\nimport json\nimport sys\nimport getopt\nfrom mp3_tagger import MP3File\nfrom mp3_tagger.id3 import VERSION_2, VERSION_BOTH, VERSION_1\n\n\n#PATH = \"/home/apulai/mp3\"\n#PATH = \"Z:\\\\\"\n#PATH = \"c:\\\\test\"\n#PATH=\"Z:\\\\juca\"\n#PATH=\"Z:\\\\mp3\\\\shrek\"\n#PATH=\"Z:\\\\mp3\\\\_Magyar\"\n#PATH=\"D:\\\\temp\"\n#PATH=\"Z:\\\\mp3\\\\_Latin\"\n#PATH=\"Z:\\\\mp3\\\\_Country\"\n#PATH=\"Z:\\\\mp3\\\\_Disco\"\n#PATH=\"Z:\\\\mp3\\\\_Folk\"\n#PATH=\"Z:\\\\mp3\\\\_Gyerek\"\n#PATH=\"Z:\\\\mp3\\\\_Hangoskonyv\"\n#PATH=\"Z:\\\\mp3\\\\_Jazz\\\\Take Five\"\nPATH=\"Z:\\\\mp3\\\\_Magyar\\\\Valami Amerika\"\n#PATH=\"Z:\\\\mp3\\\\_Pop\\\\Boney M - The Magic Of Boney M\"\n#PATH=\"Z:\\mp3\\_Magyar\\István a király\"\n#PATH=\"Z:\\\\mp3\\\\_Vegyes\"\n#PATH=\"Z:\\\\mp3\\\\_Jazz\\\\Smooth Africa\"\n#PATH=\"Z:\\\\mp3\\\\_Rock\"\n#PATH=\"Z:\\\\mp3\\\\_Country\"\n#PATH=\"/mnt/backupdsk/mp3/_Magyar\"\n\n# We will look for these extensions\nLIST_OF_EXTENSIONS = \".mp3\", \".MP3\"\n\n# In some of the tags there were very strange chars\n# We want them to be removed\nBAD_CHARS = \" \\n\\x00\\r\\ufeff\"\n\n# Pseudo done: TODO: Skip only those directories which were marked as consistent in the processed.log file (likely load proccessed log before run\n# We log error message and then the directory name\n\n# Pseudo done: TODO: Log somehow if mp3 file had only v1 tags\n# Folder is logged\n\n\n#LOGFILE_NAME = \"uxprocessed.log\"\nLOGFILE_NAME = \"processed.log\"\n#PROCESSED_DIR_FILE = PATH + \"/uxprocessed.log\"\nPROCESSED_DIR_FILE = PATH + \"/\" + LOGFILE_NAME\n\nrootDir = PATH\nreport_inconsistent_directories = 1\nupdate_mp3data = 1\n\ndef collect_mp3info(directory):\n \"\"\"\n function:\tcollect-mp3info\n input:\t foldername\n output:\t list of dictionaries containing mp3 tags per song\n operation:\topens each mp3 files, and extracts mp3 info into a list of dictionaries.\n might return an empty list\n\n \"\"\"\n print(\"Function: collect_mp3info Directory {}\".format(directory))\n songs_list = list()\n file_list = list()\n for extension in LIST_OF_EXTENSIONS:\n temp_list = glob.glob(directory + \"/*\" + extension, recursive=False)\n # Do not append a list to a list...\n file_list = file_list + temp_list\n\n #Since on windows .mp3 and .MP3 is not different\n #Make this list uniq again\n\n temp_list = file_list\n\n file_list = list()\n for x in temp_list:\n if x not in file_list:\n file_list.append(x)\n\n #extension = \".mp3\"\n #file_list = glob.glob(directory + \"/*\" + extension, recursive=False)\n\n # print(directory),\n #print(file_list),\n\n for file in file_list:\n # print(\"file:\", file)\n # print(\"file: \" + file)\n # print(\"file: {} directory:{}\".format(file,directory))\n # print(\"file: {0} directory:{1}\".format(file,directory))\n # print(\"file: {0} directory:{1} file: {0}\".format(file,directory))\n\n print(\".\", end=\"\")\n d = dict()\n try:\n d[\"hasbadchars\"] = False\n\n mp3 = MP3File(file)\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n d[\"tagversion\"]=\"v1\" # We hope tags will be v2, but let's set the worst case for us which is v1, if no v2 tags we will assume all was v1 and will not write\n\n if isinstance(mp3.artist, str): # If it's a string we are good...\n if len(mp3.artist) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1) # So there was a non-zero v2 tag\n else:\n d[\"tagversion\"] = \"v2\"\n d[\"artist\"] = mp3.artist.rstrip()\n else:\n d[\"artist\"] = \"\"\n\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n if isinstance(mp3.album, str): # If it's a string we are good...\n if len(mp3.album) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1)\n else:\n d[\"tagversion\"] = \"v2\" # So there was a non-zero v2 tag\n d[\"album\"] = mp3.album.rstrip()\n else:\n d[\"album\"] = \"\"\n\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n if isinstance(mp3.song, str): # If it's a string we are good...\n if len(mp3.song) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1)\n else:\n d[\"tagversion\"] = \"v2\" # So there was a non-zero v2 tag\n d[\"song\"] = mp3.song.rstrip()\n else:\n d[\"song\"] = \"\"\n\n mp3.set_version(VERSION_2) # we just want to get the v2 tags\n if isinstance(mp3.band, str): # If it's a string we are good...\n if len(mp3.band) == 0: # But if v2 tag is empty, let's try v1 tag instead\n mp3.set_version(VERSION_1)\n else:\n d[\"tagversion\"] = \"v2\" # So there was a non-zero v2 tag\n d[\"band\"] = mp3.band.rstrip()\n else:\n d[\"band\"] = \"\"\n\n d[\"filename\"] = file\n\n songs_list.append(d)\n except Exception as e:\n print(\"Warning: MP3 tag cannot be read from file: {}. Exception: {}\".format(file, e))\n writelogfile(\"ERR MP3:\" + format(file) + \"\\n\")\n print(\"\")\n print(json.dumps(songs_list, indent=4, ensure_ascii=False))\n\n return songs_list\n\ndef remove_bad_chars(song_list):\n \"\"\"\n function:\tremove_bad_chars\n input:\t song_list\n output:\t corrected song list\n operation:\tWalks so a song_list collected by collect_mp3info\n Tries to remove bad chars we have seen mostly on windows\n \"\"\"\n ret_list = list()\n for song in song_list:\n ret_song = dict()\n\n ret_song[\"hasbadchars\"]=False\n\n ret_song[\"artist\"] = song[\"artist\"].rstrip(BAD_CHARS)\n if (ret_song[\"artist\"] != song[\"artist\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"album\"] = song[\"album\"].rstrip(BAD_CHARS)\n if (ret_song[\"album\"] != song[\"album\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"song\"] = song[\"song\"].rstrip(BAD_CHARS)\n if (ret_song[\"song\"] != song[\"song\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"band\"] = song[\"band\"].rstrip(BAD_CHARS)\n if (ret_song[\"band\"] != song[\"band\"]):\n ret_song[\"hasbadchars\"] = True\n\n ret_song[\"tagversion\"]=song[\"tagversion\"]\n ret_song[\"filename\"] = song[\"filename\"]\n ret_list.append(ret_song)\n\n return ret_list\n\ndef is_mp3info_consistent(songs_list):\n \"\"\"\n function:\tis_mp3info_consistent\n input:\t list of dictionaries with mp3 tags\n output:\t True if album, band and artist are the same for all songs\n True if list is empty\n False if all band, artist and album tags are empty\n False in other cases\n operation:\ttakes the list's first element and compares subsequent entries\n if there is a difference returns False\n \"\"\"\n # if we got an empty list as input, we will return\n if len(songs_list) == 0:\n return True\n # artist_consistent = True\n album_consistent = True\n band_consistent = True\n artist_consistent = True\n first_nonempty_album = False\n first_nonempty_band = False\n first_nonempty_artist = False\n # we will compare each song to the first song\n first_song = songs_list[0]\n\n for song in songs_list:\n # We don't need to compare the first song to first_song one as well [1:] We can use 1: like operators on lists\n # But this is wrong what if we have only 1 song?!\n if song[\"album\"] != \"\":\n first_nonempty_album = True\n if song[\"band\"] != \"\":\n first_nonempty_band = True\n if song[\"artist\"] != \"\":\n first_nonempty_artist = True\n\n if first_song[\"artist\"] != song[\"artist\"]:\n artist_consistent = False\n print(\"Suspect: Artist inconsistent\")\n break\n if first_song[\"album\"] != song[\"album\"]:\n album_consistent = False\n print(\"Err: Album inconsistent\")\n break\n if first_song[\"band\"] != song[\"band\"]:\n band_consistent = False\n print(\"Err: Band inconsistent\")\n break\n\n # Not all artist was the same\n # We can correct it if band is consistent and album is consistent\n # And all artist is different then we are still OK\n\n if( artist_consistent == False):\n print(\"Double check artist consistency, if album and band is consitent, and no empty artists are then OK\")\n if ( band_consistent == True and album_consistent == True):\n totalnumberofsongs = len(songs_list)\n artistlist = list()\n for song in songs_list: # We need to generate the list of artists\n artistlist.append(song[\"artist\"])\n track = {}\n for value in artistlist:\n if (value == [] or (value is None)):\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n numberofdifferentartists = len(track)\n #if ( float(numberofdifferentartists)/float(totalnumberofsongs)==1.0):\n if( \"empty\" not in artistlist):\n artist_consistent = True\n print(\"Doublecheck: artist is OK, since no empty artist while band and album is consistent\")\n\n else:\n print(\"Double check: artist is really not OK\")\n\n if not first_nonempty_band:\n print(\"Band is empty for all songs!\")\n if not first_nonempty_album:\n print(\"Album is empty for all songs!\")\n if not first_nonempty_artist:\n print(\"Artist is empty for all songs!\")\n return album_consistent and band_consistent and artist_consistent and first_nonempty_album and \\\n first_nonempty_band and first_nonempty_artist\n\n\ndef suggest_mostfrequent_mp3info(songlist):\n \"\"\"\n function:\tsuggest_mostfrequent_mp3info\n input:\t list of mp3 objects - songlist\n output:\t band, album, artist tuple\n operation:\tlooks into the mp3 objects, and calculates the\n most frequent band and album string\n returns band, album\n \"\"\"\n\n # If we get no data let's return\n totalnumberofsongs = len(songlist)\n if totalnumberofsongs == 0:\n return \"empty\", \"empty\", \"empty\"\n\n albumlist = list()\n bandlist = list()\n artistlist = list()\n for song in songlist: # Create 3 separate list of attributes so we can work with them easier\n albumlist.append(song[\"album\"])\n bandlist.append(song[\"band\"])\n artistlist.append(song[\"artist\"])\n\n # Start work on list of albums, calculate most ferquent album name\n track = {}\n for value in albumlist:\n if( value == [] or (value is None) ): # sometimes we got NoneVaule, likely this won't happen anymore, but we still check\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n retvalalbum=max(track, key=track.get) # sometimes we got NoneVaule, likely this won't happen anymore, but we still check\n retvalalbumqty=track[retvalalbum]\n calculatedalbum=retvalalbum\n calculatedalbumqty=retvalalbumqty\n\n # Start work on list of artits, calculate most ferquent artist name\n track = {}\n for value in artistlist:\n if (value == [] or(value is None)):\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n\n # We will select here the most frequent artist\n calculatedartist = max(track, key=track.get)\n retvalartist = calculatedartist\n calculatedartistqty = track[calculatedartist]\n retvalartistqty = calculatedartistqty\n totalnumberofdifferentartist=len(track)\n\n\n # But If all song has an artist we will propose keep instead\n if (\"\" not in artistlist):\n retvalartist = \"keep\"\n\n # Start work on list of band, calculate most ferquent band name\n track = {}\n for value in bandlist:\n if (value == \"\" or (value is None)):\n value = \"empty\"\n if value not in track:\n track[value] = 1\n else:\n track[value] += 1\n retvalband = max(track, key=track.get)\n retvalbandqty = track[retvalband]\n calculatedband=retvalband\n calculatedbandqty=retvalbandqty\n\n#If band is empty propose artist as band\n if retvalband == \"empty\" :\n # If the most frequent artist is present in more than 15% of the songs\n # and the band is empty let's propose artist as the band\n if float(calculatedartistqty)/float(totalnumberofsongs) >= 0.15:\n retvalband = calculatedartist\n\n\n\n print(\"Total number of songs in this folder:\\t{}\".format(totalnumberofsongs))\n\n print(\"Most frequent band:\\t{} \\tnumber of occurances: {} .\".format(calculatedband, calculatedbandqty))\n print(\"Most frequent album:\\t{} \\tnumber of occurances: {} .\".format(calculatedalbum, calculatedalbumqty))\n print(\"Most frequent artist:\\t{} \\tnumber of occurances: {} . \".format(calculatedartist, calculatedartistqty))\n\n print(\"Returning proposal for band:\\t{} \\tnumber of occurances: {} .\".format(retvalband,retvalbandqty))\n print(\"Returning proposal for album:\\t{} \\tnumber of occurances: {} .\".format(retvalalbum,retvalalbumqty))\n print(\"Returning proposal for artist:\\t{} \\tnumber of occurances: {} . \".format(retvalartist, retvalartistqty))\n\n return retvalband,retvalalbum,retvalartist\n\n\ndef update_mp3info(songlist, requiredtag, write_v1_tags=False):\n \"\"\"\n function:\tupdate_mp3info\n input:\t songlist a directory of mp3 tags, dictionary of required mp3, write_v1_tags by default false\n output:\n operation:\twrites mp3tags into each song, if tag == keep keeps tag (artist only)\n future: updates processed dir logfile\n \"\"\"\n\n # TODO: add album cover!\n\n print(\"Function: update_mp3info\")\n #print(dir),\n #print(fileList),\n for song in songlist:\n needtosave=False\n if( song[\"album\"] != requiredtag[\"album\"]):\n needtosave=True\n if( song[\"band\"] != requiredtag[\"band\"]):\n needtosave=True\n if (song[\"song\"] == \"\"):\n needtosave=True\n if( song[\"artist\"] != requiredtag[\"artist\"] and requiredtag[\"artist\"] != \"keep\" ):\n needtosave=True\n\n if( song[\"tagversion\"]==\"v1\" and write_v1_tags == False):\n # ISSUE: mp3tagger seems not to handle corrctly if there is no tag or only v1 tags\n needtosave = False\n print(\"WARNING: Song with V1 tags only: {}\".format(song[\"filename\"]))\n #writelogfile(\"Log: only V1 tag excpetion: {}\".format(song[\"filename\"]))\n\n if needtosave==True :\n try:\n mp3 = MP3File(song[\"filename\"])\n mp3.set_version(VERSION_BOTH)\n mp3.band = requiredtag[\"band\"].rstrip(BAD_CHARS)\n mp3.album = requiredtag[\"album\"].rstrip(BAD_CHARS)\n if song[\"song\"] == \"\":\n # My TC friend is totally bored sometimes somewhere so he learns stuff like [:-4]\n mp3.song = os.path.basename(song[\"filename\"])[:-4]\n mp3.song = mp3.song.rstrip(BAD_CHARS)\n if (requiredtag[\"artist\"] != \"keep\"):\n mp3.artist = requiredtag[\"artist\"].rstrip(BAD_CHARS)\n #print('Writing tags to %s' % song[\"filename\"] )\n mp3.save()\n except Exception as e:\n print(\"Warning: MP3 tag cannot be saved for file: {}. Exception: {}\".format(song[\"filename\"], e))\n writelogfile(\"Log: Warning: MP3 tag cannot be saved for file:\" + format(song[\"filename\"])+ format(e))\n else:\n print(\"Info: MP3 tag updated for file: {}\".format(song[\"filename\"]))\n\ndef rewrite_songs_with_bad_chars(songlist):\n \"\"\"\n function:\trewrite_songs_with_bad_chars\n input:\t songlist a directory of mp3 tags\n output:\n operation:\twrites mp3tags and rstrips again\n \"\"\"\n for song in songlist:\n try:\n if song[\"hasbadchars\"] == True and song[\"tagversion\"] == \"v2\":\n try:\n mp3 = MP3File(song[\"filename\"])\n mp3.set_version(VERSION_BOTH)\n mp3.band = song[\"band\"].rstrip(BAD_CHARS)\n mp3.album = song[\"album\"].rstrip(BAD_CHARS)\n mp3.song = song[\"song\"].rstrip(BAD_CHARS)\n mp3.artist = song[\"artist\"].rstrip(BAD_CHARS)\n mp3.save()\n except Exception as e:\n print(\n \"Warning: MP3 tag cannot be saved for file: {}. Exception: {}\".format(song[\"filename\"], e))\n writelogfile(\n \"Log: Warning: MP3 tag cannot be saved for file:\" + format(song[\"filename\"]) + format(e))\n else:\n print(\"Info: MP3 badchars removed for file: {}\".format(song[\"filename\"]))\n elif song[\"tagversion\"] == \"v1\":\n writelogfile(\n \"ERR V1 BADCHAR: MP3 tag cannot be saved for file:\" + format(song[\"filename\"]) + format(e))\n except NameError:\n print(\"Info no bad chars, or not checked\")\n return\n\n\ndef writelogfile(str):\n try:\n with open(PROCESSED_DIR_FILE, \"a\") as f:\n f.write(str)\n except IOError:\n print(\"Processed directories log file: {} cannot be opened.\".format(PROCESSED_DIR_FILE))\n\n\ndef walkdir_OBSOLETE(dir):\n \"\"\"\n function:\twalkdir - OBSOLETE\n input:\t foldername\n output:\t none\n operation:\trecureseivly walks through the directories\n tries to collect mp3 info in each dir\n checks mp3 info per directroy\n future changes: make it non-recursive?\n \"\"\"\n for dirName, subdirList, fileList in os.walk(dir):\n print('\\nArrived in directory: %s' % dirName)\n songlist = collect_mp3info(dirName)\n\n # songlist maybe empty, in this case we skip info check\n if( len(songlist) > 0):\n if( is_mp3info_consistent(songlist)== False):\n print(\"Album is INCONSISTENT\")\n if( update_mp3data == 1):\n suggestedband,suggestedalbum,suggestedartist=suggest_mostfrequent_mp3info(songlist)\n print(\"Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n accept = input(\"Accept suggested (Y/n/q)?\")\n if accept.lower() == 'n':\n suggestedband = input(\"Enter new band: %s \" % suggestedband) or suggestedband\n suggestedalbum = input(\"Enter new album: %s \" % suggestedalbum) or suggestedalbum\n suggestedartist = input(\"Enter new artist (or keep or blank) %s\" % suggestedartist) or suggestedartist\n print(\"New values: Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n if accept.lower() == 'q':\n exit(2)\n d = dict ()\n d[\"band\"] = suggestedband\n d[\"album\"] = suggestedalbum\n d[\"artist\"] = suggestedartist\n update_mp3info(songlist,d)\n if( report_inconsistent_directories == 1):\n writelogfile(\"Inconsistent:\" + dirName + \"\\n\")\n\n else:\n print(\"Album seems to be OK\")\n writelogfile(\"Consistent:\" + dirName + \"\\n\")\n if( len(subdirList) == 0):\n print(\"No subdirs\")\n else:\n for dname in subdirList:\n print(\"Going to: {}\".format(dname))\n walkdir_OBSOLETE(dname)\n print(\"Directroy processed: {}\".format(dirName))\n\ndef v1_tags_present(song_list):\n # Walk through the tags and check if any of them is v1\n # if v1 we return true\n for song in song_list:\n if song[\"tagversion\"] == \"v1\":\n return True\n return False\n\n\ndef process_dir(current_directory):\n \"\"\"\n function:\tprocess_dir\n input:\t foldername\n output:\t 0 if directory is updated\n 1 if directory is not updated\n 2 if directory has v1 tags and it is not updated\n operation:\tgenerates list of songs in current directory\n collects mp3info\n processes mp3info\n \"\"\"\n\n song_list=collect_mp3info(current_directory)\n if (len(song_list) > 0):\n # If there are v1 tags present we will log only an error for this directory\n if (v1_tags_present(song_list) == True ):\n print(\"Album has songs with v1 tags only, not safe to process\")\n return 2\n if (is_mp3info_consistent(song_list) == False):\n print(json.dumps(song_list, indent=4, ensure_ascii=False))\n print(\"Album is inconsistent\")\n if (update_mp3data == 1):\n # Try to analyze the collected info, and come back with suggestions\n suggestedband, suggestedalbum, suggestedartist = suggest_mostfrequent_mp3info(song_list)\n # Ask for user input\n print(\"Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n accept = input(\"Accept suggested (Y/n/q/(s)kip)?\")\n if accept.lower() == 'n':\n suggestedband = input(\"Enter new band: %s \" % suggestedband) or suggestedband\n suggestedalbum = input(\"Enter new album: %s \" % suggestedalbum) or suggestedalbum\n suggestedartist = input(\n \"Enter new artist (or keep to keep) %s\" % suggestedartist) or suggestedartist\n print(\n \"New values: Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n if accept.lower() == 'q':\n exit(2)\n if accept.lower() != 's':\n d = dict()\n d[\"band\"] = suggestedband\n d[\"album\"] = suggestedalbum\n d[\"artist\"] = suggestedartist\n update_mp3info(song_list, d)\n else:\n print (\"Skipping this directory\")\n return 1\n else:\n print(\"Album is consistent\")\n\n\n return 0\n\n\ndef walkdir(dir):\n \"\"\"\n function:\twalk\n input:\t root folder name\n output:\t none\n operation:\tgenerates list of directories\n processes each unprocessed directory\n logs processed directories\n \"\"\"\n\n # List all directories\n directories = glob.glob(PATH + '/**/*/', recursive=True)\n\n #Debug if all directories are listed\n #i = 1\n #for p in directories:\n # print(\"{} {}\".format(i,p))\n # i=i+1\n #exit(1)\n\n # Add current directory\n directories.append(dir)\n\n number_of_directories_found = len(directories)\n print(\"Found {} directories to scan\".format(number_of_directories_found))\n\n # We will skip processed directories\n # Therefore we try to load the list of processed directories\n try:\n with open(PROCESSED_DIR_FILE) as f:\n processed_dirs = f.read().splitlines()\n except IOError:\n print(\"Processed directories log file: {} cannot be opened.\".format(PROCESSED_DIR_FILE))\n processed_dirs = []\n # print(processed_dirs)\n # Logfile of processed directories are now loaded\n\n current_directory = ''\n first_file = True\n first_file_in_dir = True\n new = {}\n for current_directory in directories:\n # We will check in this list if our current directory was alread processed:\n if current_directory not in processed_dirs:\n # If our current directory was not already processed we will process it.\n # We will collect and update mp3 info in to following call:\n #print(\"Processing dir: {}\".format(current_directory))\n retval = process_dir(current_directory)\n\n # Process_dir will return different error codes for different problems\n # Let's check them 1 by 1\n if retval == 0:\n # If we managed to refresh this directory,\n # we log it as updated\n processed_dirs.append(current_directory)\n writelogfile(current_directory + '\\n')\n elif retval == 1:\n print(\"Directory was skipped / not processed\")\n #We are adding some easy to grep error in the log\n #This will also invalidate the directory when we will load the processed.log file\n #next time we run this tool (there is likely no Skip: driectory when listing the contents)\n writelogfile(\"Skip:\" + current_directory + '\\n')\n else:\n # We are adding some easy to grep error in the log\n # This will also invalidate the directory when we will load the processed.log file\n # next time we run this tool (there is likely no Skip: driectory when listing the contents)\n print(\"Directory had V1 only tags\")\n writelogfile(\"ERR V1:\" + current_directory + '\\n')\n else:\n print(\"Directory: {} was already processed.\".format(current_directory))\n number_of_directories_found = number_of_directories_found - 1;\n print(\"Number of directories to go {}\".format(number_of_directories_found))\n print(\"Walk complete. Remeber to check logfile for errors, like folders with v1 tags only.\")\n\n#TODO: if no arguments, then use current folder as path\ndef main(argv):\n global PATH\n global PROCESSED_DIR_FILE\n global LOGFILE_NAME\n\n try:\n PATH\n except NameError:\n print(\"PATH is not defined, we will use current directory\")\n PATH = os.getcwd()\n else:\n print(\"PATH is defined in the script body\")\n\n\n try:\n opts, args = getopt.getopt(argv, \"hp:l:\",[\"path=\",\"log=\"])\n except getopt.GetoptError:\n print('mp3tagger.py -p -l ')\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n print('mp3tagger.py -p -l ')\n sys.exit()\n elif opt in (\"-p\", \"--path\"):\n PATH = arg\n elif opt in (\"-l\", \"--logdir\"):\n LOGFILE_NAME = arg\n\n PROCESSED_DIR_FILE = PATH + \"/\" + LOGFILE_NAME\n\n print(\"Path {} Logdir {} Concat {}\".format(PATH,LOGFILE_NAME,PROCESSED_DIR_FILE))\n walkdir(PATH)\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n exit(0)\n #songlist = collect_mp3info(\"Z:\\\\mp3\\\\_Magyar\\\\István a király\\\\Cd1\")\n songlist = collect_mp3info(\"D:\\\\temp\\\\mp3\\\\Valami Amerika\")\n suggestedband, suggestedalbum, suggestedartist = suggest_mostfrequent_mp3info(songlist)\n print(\"Suggested band: \" + suggestedband + \"\\tSuggested album: \" + suggestedalbum + \"\\tSuggested artist: \" + suggestedartist)\n requiredtag = dict()\n requiredtag[\"artist\"]=\"ARTISTA\"\n requiredtag[\"album\"]=\"ALBUMM\"\n requiredtag[\"band\"]=\"Banda\"\n update_mp3info(songlist, requiredtag)\n rewrite_songs_with_bad_chars(songlist)\n","sub_path":"mp3tagger.py","file_name":"mp3tagger.py","file_ext":"py","file_size_in_byte":28076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"521553225","text":"# -*- coding: utf-8 -*-\n\nfrom server.api.v1.urls import router as v1_router\nfrom django.conf.urls import url, include\nfrom rest_framework_jwt.views import (\n obtain_jwt_token,\n refresh_jwt_token,\n verify_jwt_token,\n)\n\nurlpatterns = [\n url('auth/login/', obtain_jwt_token),\n url('auth/token_verify/', verify_jwt_token),\n url('auth/token_refresh/', refresh_jwt_token),\n url('v1/', include(v1_router.urls)),\n]\n","sub_path":"{{cookiecutter.project_name}}/server/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"9594598","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport re,json,collections\r\nimport csv\r\nimport os\r\nfrom threading import Thread\r\n\r\nsymList = []\r\nans = collections.defaultdict(list)\r\n\r\ndef crawl(sym):\r\n url = \"https://finance.yahoo.com/quote/{}/key-statistics?p={}\".format(sym, sym)\r\n header = {\r\n 'User-Agent': \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36\"}\r\n soup = BeautifulSoup(urllib.request.urlopen(url), 'html.parser',from_encoding=\"iso-8859-1\")\r\n result = re.search('root.App.main = (.*)\\;', soup.text)\r\n result = json.loads(result.group(1))\r\n dic = result\r\n keys = ['priceToBook','trailingPE']\r\n res = collections.defaultdict()\r\n queue = []\r\n for key, val in dic.items():\r\n if key in keys:\r\n res[key] = val\r\n if type(val) == list or type(val) == dict:\r\n queue.append(val)\r\n while queue:\r\n q2 = []\r\n for item in queue:\r\n if type(item) == list:\r\n for jtem in item:\r\n if type(jtem) == list or type(jtem) == dict:\r\n q2.append(jtem)\r\n elif type(item) == dict:\r\n for key, val in item.items():\r\n if key in keys:\r\n res[key] = val\r\n if type(val) == list or type(val) == dict:\r\n q2.append(val)\r\n queue = q2\r\n nums = ['','','']\r\n if 'trailingPE' in res and 'raw' in res['trailingPE']:\r\n pe = res['trailingPE']['raw']\r\n nums[0] = pe\r\n if 'priceToBook' in res and 'raw' in res['priceToBook']:\r\n pb = res['priceToBook']['raw']\r\n nums[1] = pb\r\n if nums[0] != '' and nums[1] != '':\r\n exb = str(round(nums[0]*nums[1],2))\r\n nums[2] = exb\r\n ans[sym] = nums\r\n print (sym,nums)\r\n\r\n\r\ndef toSyms(path):\r\n reader = csv.reader(open(path, \"r\"))\r\n for row in reader:\r\n symList.append(row[0].replace(' ',''))\r\n return symList\r\n\r\n\r\n\r\npath = 'st.csv'\r\ntoSyms(path)\r\n\r\nthreads = []\r\nfor key in symList[0:200]:\r\n t = Thread(target=crawl, args=(key,))\r\n t.start()\r\n threads.append(t)\r\nfor b in threads:\r\n b.join()\r\nprint (len(ans))\r\nprint (ans)\r\n\r\n\r\n","sub_path":"mulThread.py","file_name":"mulThread.py","file_ext":"py","file_size_in_byte":2260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"315844876","text":"from odoo import models, fields, api\nfrom datetime import datetime, timedelta\n\nclass crm_lead(models.Model):\n _inherit = 'crm.lead'\n\n @api.depends('annual_revenue','planned_revenue')\n @api.onchange('planned_revenue')\n def _annual_revenue_calculate(self):\n if self.planned_revenue:\n self.annual_revenue = self.planned_revenue * 12\n if self.planned_revenue ==0:\n self.annual_revenue = 0\n return\n\n @api.depends('annual_revenue','planned_revenue')\n @api.onchange('annual_revenue')\n def _mothy_revenue_calculate(self):\n if self.annual_revenue:\n self.planned_revenue = 1.000*self.annual_revenue/12\n if self.annual_revenue ==0:\n self.planned_revenue = 0\n return\n\n @api.one\n @api.depends('x_subscription_period', 'x_month_number')\n def compute_end_date(self):\n for record in self:\n if not record.x_subscription_period:\n return\n month = record.x_month_number or 0\n start = datetime.strptime(record.x_subscription_period, '%Y-%m-%d')\n add_month = month % 12\n add_years = int(month / 12) + (int(start.month) + add_month) / 12\n end_month = (int(start.month) + add_month) % 12 if (int(start.month) + add_month) / 12 > 0 else int(\n start.month) + add_month\n end_year = start.year + add_years\n end_day = start.day -1\n if end_day == 0:\n end_month = end_month -1\n end_day = 31\n if end_month == 0:\n end_month=12\n end_year = end_year - 1\n\n while True:\n end_format = '%s-%s-%s' % (end_year, end_month, end_day)\n try:\n record.x_end_date = datetime.strptime(end_format, '%Y-%m-%d')\n break\n except:\n end_day -= 1\n\n x_subscription_period = fields.Date(string=\"Subscription Period\", default=fields.Datetime.now)\n one_time_revenue = fields.Integer('One Time Revenue')\n annual_revenue = fields.Float('Annual Revenue')\n x_month_number = fields.Integer('Number of Month', default=0)\n x_end_date = fields.Date(\"End Date\", compute=compute_end_date)\n invoice_type = fields.Selection([('fiancial', 'Financial Terminal'),('trading_gts', 'Trading (GTS)'), ('trading_dzh', 'Trading (DZHI)'), ('event','Conference & Event'),('digital','Digital')])\n\n\n\n\n","sub_path":"beta-dev1/dzh_modifier_fields_1707/models/crm_lead.py","file_name":"crm_lead.py","file_ext":"py","file_size_in_byte":2482,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"469179396","text":"\"\"\"\nFunctions\n---------\n\nFunctions with a common interface for parameters\n\"\"\"\nimport numpy as np\n\ndef sigmoid (x, parameters):\n \"\"\"Sigmoid function\n POI = A2 + (A1 - A2) / (1. + exp((x - x0) / dx))\n \n Parameters\n ----------\n x: float or array of floats\n variable\n parameters: dict\n dictionary containing 'sigmoid_A1','sigmoid_A2','sigmoid_x0',\n and 'sigmoid_dx'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n A1 = parameters['sigmoid_A1']\n A2 = parameters['sigmoid_A2']\n x0 = parameters['sigmoid_x0']\n dx = parameters['sigmoid_dx']\n \n return A2 + (A1 - A2)/(1.+ np.exp((x - x0)/dx))\n \ndef sigmoid2 (x, parameters):\n \"\"\"Sigmoid 2 function\n POI = K / (C + (A*x**B))\n \n Parameters\n ----------\n x: float or array of floats or array of floats\n variable\n parameters: dict\n dictionary containing 'sigmoid2_K','sigmoid2_C','sigmoid2_A',\n and 'sigmoid2_B'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n K = parameters['sigmoid2_K']\n C = parameters['sigmoid2_C']\n A = parameters['sigmoid2_A']\n B = parameters['sigmoid2_B']\n return K / (C + (A * x**B)) \n\n\ndef linear (x, parameters):\n \"\"\"Sigmoid function\n POI = a + (b * x )\n \n Parameters\n ----------\n x: float or array of floats\n variable\n parameters: dict\n dictionary containing 'linear_a', and 'linear_b'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n a = parameters['linear_a']\n b = parameters['linear_b']\n return a + (b * x)\n \ndef hill (x, parameters):\n \"\"\"Hill function\n POI = (B*(x^n))/(1+(x^n))\n \n Parameters\n ----------\n x: float or array of floats\n variable\n parameters: dict\n dictionary containing 'hill_B' and 'hill_N'\n \n Returns\n -------\n float or array of floats:\n function result\n \"\"\"\n B = parameters['hill_B']\n N = parameters['hill_N']\n return (B * (x**N))/(1. + (x**N))\n\n# table of functions\ntable = {\n 'sigmoid': sigmoid,\n 'linear': linear,\n 'sigmoid2': sigmoid2,\n 'hill': hill,\n}\n","sub_path":"atm/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"543860084","text":"# -*- coding: utf-8 -*-\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom qq.items import QqItem\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy import log\n\n\nclass NewsSpider(CrawlSpider):\n name = \"news\"\n allowed_domains = [\"news.qq.com\"]\n start_urls = ['http://news.qq.com/']\n rules = (\n Rule(LinkExtractor(allow=(\".*.qq.com/a/\\d*/\\d*.htm\",), allow_domains=(\"qq.com\",)), callback=\"parse_item\",\n follow=True),\n Rule(LinkExtractor(allow=(\"news.qq.com/\\w*_index.shtml\",), allow_domains=(\"qq.com\",)), follow=True),\n Rule(LinkExtractor(allow=(\"(?y:\r\n print(\"La palabra %s tiene mas vocales.\" % (palabra1))\r\n elif xright):\n BL+=wf\n\n j+=1\n id+=1\nnp.savez(path+'PMT{}/BL'.format(pmt), BL=BL/j)\nplt.figure()\nplt.plot(BL/j, 'k.')\nplt.show()\n","sub_path":"calib/make_bl.py","file_name":"make_bl.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"440326701","text":"# Copyright 2023 Google LLC. All Rights Reserved.\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom connector import channel\nfrom google3.cloud.graphite.mmv2.services.google.compute import (\n network_firewall_policy_rule_pb2,\n)\nfrom google3.cloud.graphite.mmv2.services.google.compute import (\n network_firewall_policy_rule_pb2_grpc,\n)\n\nfrom typing import List\n\n\nclass NetworkFirewallPolicyRule(object):\n def __init__(\n self,\n description: str = None,\n rule_name: str = None,\n priority: int = None,\n location: str = None,\n match: dict = None,\n action: str = None,\n direction: str = None,\n enable_logging: bool = None,\n rule_tuple_count: int = None,\n target_service_accounts: list = None,\n target_secure_tags: list = None,\n disabled: bool = None,\n kind: str = None,\n firewall_policy: str = None,\n project: str = None,\n service_account_file: str = \"\",\n ):\n channel.initialize()\n self.description = description\n self.rule_name = rule_name\n self.priority = priority\n self.location = location\n self.match = match\n self.action = action\n self.direction = direction\n self.enable_logging = enable_logging\n self.target_service_accounts = target_service_accounts\n self.target_secure_tags = target_secure_tags\n self.disabled = disabled\n self.firewall_policy = firewall_policy\n self.project = project\n self.service_account_file = service_account_file\n\n def apply(self):\n stub = network_firewall_policy_rule_pb2_grpc.ComputeBetaNetworkFirewallPolicyRuleServiceStub(\n channel.Channel()\n )\n request = (\n network_firewall_policy_rule_pb2.ApplyComputeBetaNetworkFirewallPolicyRuleRequest()\n )\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if Primitive.to_proto(self.rule_name):\n request.resource.rule_name = Primitive.to_proto(self.rule_name)\n\n if Primitive.to_proto(self.priority):\n request.resource.priority = Primitive.to_proto(self.priority)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n if NetworkFirewallPolicyRuleMatch.to_proto(self.match):\n request.resource.match.CopyFrom(\n NetworkFirewallPolicyRuleMatch.to_proto(self.match)\n )\n else:\n request.resource.ClearField(\"match\")\n if Primitive.to_proto(self.action):\n request.resource.action = Primitive.to_proto(self.action)\n\n if NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction):\n request.resource.direction = (\n NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction)\n )\n\n if Primitive.to_proto(self.enable_logging):\n request.resource.enable_logging = Primitive.to_proto(self.enable_logging)\n\n if Primitive.to_proto(self.target_service_accounts):\n request.resource.target_service_accounts.extend(\n Primitive.to_proto(self.target_service_accounts)\n )\n if NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n ):\n request.resource.target_secure_tags.extend(\n NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n )\n )\n if Primitive.to_proto(self.disabled):\n request.resource.disabled = Primitive.to_proto(self.disabled)\n\n if Primitive.to_proto(self.firewall_policy):\n request.resource.firewall_policy = Primitive.to_proto(self.firewall_policy)\n\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n request.service_account_file = self.service_account_file\n\n response = stub.ApplyComputeBetaNetworkFirewallPolicyRule(request)\n self.description = Primitive.from_proto(response.description)\n self.rule_name = Primitive.from_proto(response.rule_name)\n self.priority = Primitive.from_proto(response.priority)\n self.location = Primitive.from_proto(response.location)\n self.match = NetworkFirewallPolicyRuleMatch.from_proto(response.match)\n self.action = Primitive.from_proto(response.action)\n self.direction = NetworkFirewallPolicyRuleDirectionEnum.from_proto(\n response.direction\n )\n self.enable_logging = Primitive.from_proto(response.enable_logging)\n self.rule_tuple_count = Primitive.from_proto(response.rule_tuple_count)\n self.target_service_accounts = Primitive.from_proto(\n response.target_service_accounts\n )\n self.target_secure_tags = (\n NetworkFirewallPolicyRuleTargetSecureTagsArray.from_proto(\n response.target_secure_tags\n )\n )\n self.disabled = Primitive.from_proto(response.disabled)\n self.kind = Primitive.from_proto(response.kind)\n self.firewall_policy = Primitive.from_proto(response.firewall_policy)\n self.project = Primitive.from_proto(response.project)\n\n def delete(self):\n stub = network_firewall_policy_rule_pb2_grpc.ComputeBetaNetworkFirewallPolicyRuleServiceStub(\n channel.Channel()\n )\n request = (\n network_firewall_policy_rule_pb2.DeleteComputeBetaNetworkFirewallPolicyRuleRequest()\n )\n request.service_account_file = self.service_account_file\n if Primitive.to_proto(self.description):\n request.resource.description = Primitive.to_proto(self.description)\n\n if Primitive.to_proto(self.rule_name):\n request.resource.rule_name = Primitive.to_proto(self.rule_name)\n\n if Primitive.to_proto(self.priority):\n request.resource.priority = Primitive.to_proto(self.priority)\n\n if Primitive.to_proto(self.location):\n request.resource.location = Primitive.to_proto(self.location)\n\n if NetworkFirewallPolicyRuleMatch.to_proto(self.match):\n request.resource.match.CopyFrom(\n NetworkFirewallPolicyRuleMatch.to_proto(self.match)\n )\n else:\n request.resource.ClearField(\"match\")\n if Primitive.to_proto(self.action):\n request.resource.action = Primitive.to_proto(self.action)\n\n if NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction):\n request.resource.direction = (\n NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction)\n )\n\n if Primitive.to_proto(self.enable_logging):\n request.resource.enable_logging = Primitive.to_proto(self.enable_logging)\n\n if Primitive.to_proto(self.target_service_accounts):\n request.resource.target_service_accounts.extend(\n Primitive.to_proto(self.target_service_accounts)\n )\n if NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n ):\n request.resource.target_secure_tags.extend(\n NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n )\n )\n if Primitive.to_proto(self.disabled):\n request.resource.disabled = Primitive.to_proto(self.disabled)\n\n if Primitive.to_proto(self.firewall_policy):\n request.resource.firewall_policy = Primitive.to_proto(self.firewall_policy)\n\n if Primitive.to_proto(self.project):\n request.resource.project = Primitive.to_proto(self.project)\n\n response = stub.DeleteComputeBetaNetworkFirewallPolicyRule(request)\n\n @classmethod\n def list(self, project, location, firewallPolicy, service_account_file=\"\"):\n stub = network_firewall_policy_rule_pb2_grpc.ComputeBetaNetworkFirewallPolicyRuleServiceStub(\n channel.Channel()\n )\n request = (\n network_firewall_policy_rule_pb2.ListComputeBetaNetworkFirewallPolicyRuleRequest()\n )\n request.service_account_file = service_account_file\n request.Project = project\n\n request.Location = location\n\n request.FirewallPolicy = firewallPolicy\n\n return stub.ListComputeBetaNetworkFirewallPolicyRule(request).items\n\n def to_proto(self):\n resource = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRule()\n )\n if Primitive.to_proto(self.description):\n resource.description = Primitive.to_proto(self.description)\n if Primitive.to_proto(self.rule_name):\n resource.rule_name = Primitive.to_proto(self.rule_name)\n if Primitive.to_proto(self.priority):\n resource.priority = Primitive.to_proto(self.priority)\n if Primitive.to_proto(self.location):\n resource.location = Primitive.to_proto(self.location)\n if NetworkFirewallPolicyRuleMatch.to_proto(self.match):\n resource.match.CopyFrom(NetworkFirewallPolicyRuleMatch.to_proto(self.match))\n else:\n resource.ClearField(\"match\")\n if Primitive.to_proto(self.action):\n resource.action = Primitive.to_proto(self.action)\n if NetworkFirewallPolicyRuleDirectionEnum.to_proto(self.direction):\n resource.direction = NetworkFirewallPolicyRuleDirectionEnum.to_proto(\n self.direction\n )\n if Primitive.to_proto(self.enable_logging):\n resource.enable_logging = Primitive.to_proto(self.enable_logging)\n if Primitive.to_proto(self.target_service_accounts):\n resource.target_service_accounts.extend(\n Primitive.to_proto(self.target_service_accounts)\n )\n if NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n ):\n resource.target_secure_tags.extend(\n NetworkFirewallPolicyRuleTargetSecureTagsArray.to_proto(\n self.target_secure_tags\n )\n )\n if Primitive.to_proto(self.disabled):\n resource.disabled = Primitive.to_proto(self.disabled)\n if Primitive.to_proto(self.firewall_policy):\n resource.firewall_policy = Primitive.to_proto(self.firewall_policy)\n if Primitive.to_proto(self.project):\n resource.project = Primitive.to_proto(self.project)\n return resource\n\n\nclass NetworkFirewallPolicyRuleMatch(object):\n def __init__(\n self,\n src_ip_ranges: list = None,\n dest_ip_ranges: list = None,\n layer4_configs: list = None,\n src_secure_tags: list = None,\n src_region_codes: list = None,\n dest_region_codes: list = None,\n src_threat_intelligences: list = None,\n dest_threat_intelligences: list = None,\n src_fqdns: list = None,\n dest_fqdns: list = None,\n src_address_groups: list = None,\n dest_address_groups: list = None,\n ):\n self.src_ip_ranges = src_ip_ranges\n self.dest_ip_ranges = dest_ip_ranges\n self.layer4_configs = layer4_configs\n self.src_secure_tags = src_secure_tags\n self.src_region_codes = src_region_codes\n self.dest_region_codes = dest_region_codes\n self.src_threat_intelligences = src_threat_intelligences\n self.dest_threat_intelligences = dest_threat_intelligences\n self.src_fqdns = src_fqdns\n self.dest_fqdns = dest_fqdns\n self.src_address_groups = src_address_groups\n self.dest_address_groups = dest_address_groups\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatch()\n )\n if Primitive.to_proto(resource.src_ip_ranges):\n res.src_ip_ranges.extend(Primitive.to_proto(resource.src_ip_ranges))\n if Primitive.to_proto(resource.dest_ip_ranges):\n res.dest_ip_ranges.extend(Primitive.to_proto(resource.dest_ip_ranges))\n if NetworkFirewallPolicyRuleMatchLayer4ConfigsArray.to_proto(\n resource.layer4_configs\n ):\n res.layer4_configs.extend(\n NetworkFirewallPolicyRuleMatchLayer4ConfigsArray.to_proto(\n resource.layer4_configs\n )\n )\n if NetworkFirewallPolicyRuleMatchSrcSecureTagsArray.to_proto(\n resource.src_secure_tags\n ):\n res.src_secure_tags.extend(\n NetworkFirewallPolicyRuleMatchSrcSecureTagsArray.to_proto(\n resource.src_secure_tags\n )\n )\n if Primitive.to_proto(resource.src_region_codes):\n res.src_region_codes.extend(Primitive.to_proto(resource.src_region_codes))\n if Primitive.to_proto(resource.dest_region_codes):\n res.dest_region_codes.extend(Primitive.to_proto(resource.dest_region_codes))\n if Primitive.to_proto(resource.src_threat_intelligences):\n res.src_threat_intelligences.extend(\n Primitive.to_proto(resource.src_threat_intelligences)\n )\n if Primitive.to_proto(resource.dest_threat_intelligences):\n res.dest_threat_intelligences.extend(\n Primitive.to_proto(resource.dest_threat_intelligences)\n )\n if Primitive.to_proto(resource.src_fqdns):\n res.src_fqdns.extend(Primitive.to_proto(resource.src_fqdns))\n if Primitive.to_proto(resource.dest_fqdns):\n res.dest_fqdns.extend(Primitive.to_proto(resource.dest_fqdns))\n if Primitive.to_proto(resource.src_address_groups):\n res.src_address_groups.extend(\n Primitive.to_proto(resource.src_address_groups)\n )\n if Primitive.to_proto(resource.dest_address_groups):\n res.dest_address_groups.extend(\n Primitive.to_proto(resource.dest_address_groups)\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleMatch(\n src_ip_ranges=Primitive.from_proto(resource.src_ip_ranges),\n dest_ip_ranges=Primitive.from_proto(resource.dest_ip_ranges),\n layer4_configs=NetworkFirewallPolicyRuleMatchLayer4ConfigsArray.from_proto(\n resource.layer4_configs\n ),\n src_secure_tags=NetworkFirewallPolicyRuleMatchSrcSecureTagsArray.from_proto(\n resource.src_secure_tags\n ),\n src_region_codes=Primitive.from_proto(resource.src_region_codes),\n dest_region_codes=Primitive.from_proto(resource.dest_region_codes),\n src_threat_intelligences=Primitive.from_proto(\n resource.src_threat_intelligences\n ),\n dest_threat_intelligences=Primitive.from_proto(\n resource.dest_threat_intelligences\n ),\n src_fqdns=Primitive.from_proto(resource.src_fqdns),\n dest_fqdns=Primitive.from_proto(resource.dest_fqdns),\n src_address_groups=Primitive.from_proto(resource.src_address_groups),\n dest_address_groups=Primitive.from_proto(resource.dest_address_groups),\n )\n\n\nclass NetworkFirewallPolicyRuleMatchArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [NetworkFirewallPolicyRuleMatch.to_proto(i) for i in resources]\n\n @classmethod\n def from_proto(self, resources):\n return [NetworkFirewallPolicyRuleMatch.from_proto(i) for i in resources]\n\n\nclass NetworkFirewallPolicyRuleMatchLayer4Configs(object):\n def __init__(self, ip_protocol: str = None, ports: list = None):\n self.ip_protocol = ip_protocol\n self.ports = ports\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchLayer4Configs()\n )\n if Primitive.to_proto(resource.ip_protocol):\n res.ip_protocol = Primitive.to_proto(resource.ip_protocol)\n if Primitive.to_proto(resource.ports):\n res.ports.extend(Primitive.to_proto(resource.ports))\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleMatchLayer4Configs(\n ip_protocol=Primitive.from_proto(resource.ip_protocol),\n ports=Primitive.from_proto(resource.ports),\n )\n\n\nclass NetworkFirewallPolicyRuleMatchLayer4ConfigsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n NetworkFirewallPolicyRuleMatchLayer4Configs.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n NetworkFirewallPolicyRuleMatchLayer4Configs.from_proto(i) for i in resources\n ]\n\n\nclass NetworkFirewallPolicyRuleMatchSrcSecureTags(object):\n def __init__(self, name: str = None, state: str = None):\n self.name = name\n self.state = state\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTags()\n )\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.to_proto(\n resource.state\n ):\n res.state = NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.to_proto(\n resource.state\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleMatchSrcSecureTags(\n name=Primitive.from_proto(resource.name),\n state=NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.from_proto(\n resource.state\n ),\n )\n\n\nclass NetworkFirewallPolicyRuleMatchSrcSecureTagsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n NetworkFirewallPolicyRuleMatchSrcSecureTags.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n NetworkFirewallPolicyRuleMatchSrcSecureTags.from_proto(i) for i in resources\n ]\n\n\nclass NetworkFirewallPolicyRuleTargetSecureTags(object):\n def __init__(self, name: str = None, state: str = None):\n self.name = name\n self.state = state\n\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return None\n\n res = (\n network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleTargetSecureTags()\n )\n if Primitive.to_proto(resource.name):\n res.name = Primitive.to_proto(resource.name)\n if NetworkFirewallPolicyRuleTargetSecureTagsStateEnum.to_proto(resource.state):\n res.state = NetworkFirewallPolicyRuleTargetSecureTagsStateEnum.to_proto(\n resource.state\n )\n return res\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return None\n\n return NetworkFirewallPolicyRuleTargetSecureTags(\n name=Primitive.from_proto(resource.name),\n state=NetworkFirewallPolicyRuleTargetSecureTagsStateEnum.from_proto(\n resource.state\n ),\n )\n\n\nclass NetworkFirewallPolicyRuleTargetSecureTagsArray(object):\n @classmethod\n def to_proto(self, resources):\n if not resources:\n return resources\n return [\n NetworkFirewallPolicyRuleTargetSecureTags.to_proto(i) for i in resources\n ]\n\n @classmethod\n def from_proto(self, resources):\n return [\n NetworkFirewallPolicyRuleTargetSecureTags.from_proto(i) for i in resources\n ]\n\n\nclass NetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.Value(\n \"ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum%s\"\n % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum.Name(\n resource\n )[\n len(\"ComputeBetaNetworkFirewallPolicyRuleMatchSrcSecureTagsStateEnum\") :\n ]\n\n\nclass NetworkFirewallPolicyRuleDirectionEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleDirectionEnum.Value(\n \"ComputeBetaNetworkFirewallPolicyRuleDirectionEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleDirectionEnum.Name(\n resource\n )[\n len(\"ComputeBetaNetworkFirewallPolicyRuleDirectionEnum\") :\n ]\n\n\nclass NetworkFirewallPolicyRuleTargetSecureTagsStateEnum(object):\n @classmethod\n def to_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum.Value(\n \"ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum%s\" % resource\n )\n\n @classmethod\n def from_proto(self, resource):\n if not resource:\n return resource\n return network_firewall_policy_rule_pb2.ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum.Name(\n resource\n )[\n len(\"ComputeBetaNetworkFirewallPolicyRuleTargetSecureTagsStateEnum\") :\n ]\n\n\nclass Primitive(object):\n @classmethod\n def to_proto(self, s):\n if not s:\n return \"\"\n return s\n\n @classmethod\n def from_proto(self, s):\n return s\n","sub_path":"python/services/compute/beta/network_firewall_policy_rule.py","file_name":"network_firewall_policy_rule.py","file_ext":"py","file_size_in_byte":23351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"524035563","text":"import os\nimport re\nimport requests\nimport time\n\n# 下载路径,默认为该文件所在路径\ndownloadPath = os.path.abspath(os.path.dirname(__file__))\ndownloadPath += os.path.sep + time.strftime(\"%Y%m%d\", time.localtime())\n\ndef downloadPic(url, fileName):\n # 保存图片\n print(\"正在下载 \" + fileName)\n # fileName = downloadCount + 1\n with open(downloadPath+os.path.sep+fileName, 'wb') as file:\n file.write(requests.get(url).content)\n\n\ndef getPicUrl():\n print(\"正在获取 ...\")\n response = requests.get(\"http://yunjie.f06.87yun.club/st/r/\")\n pattern = re.compile(r'
=1):\n # for n in empval[0]:\n # self.df.iloc[ n,cn ] = replacer\n #success\n return True\n\n def apply_mapper(self, cols ,mets , arglist ):\n '''apply mapper to the columns\n example :\n apply_mapper( ['Age Group','Ethnicity'],'labelencoder' )\n applies LabelEncoder to AgeGroup and Ethnicity.\n mets can be just 'LabelEncoder' and it will be assigned to all the columns listed \n by cols. cols can be none and it will apply to all the column\n @cols - specify which column to apply the mapper\n @met - which method (labelencoder, onehotencoder, standardscaler, ordinalencoder)\n '''\n # mets is singular, apply it to all\n if( cols is not None):\n #apply to specific\n if( not isinstance( cols,list )):\n cols = [cols] #convert to list\n return self.lkup[mets](self,cols,arglist)\n else:\n #apply to all\n return self.lkup[mets](self,self.hlist,arglist)\n\n def _apply_labelencoder(self, cols, arglist):\n '''applies the labelencoder over the columns.'''\n ec = LabelEncoder()\n for c in cols:\n ec.fit( self.df[c] )\n self.df[c] = lec.transform( self.df[c] )\n return True\n\n def _apply_standardscaler(self,cols, arglist):\n '''applies the standardscaler such that the transform will have a\n std. dev. of 1 and a mean of 0. cols should ideally be a list'''\n sc = StandardScaler()\n self.df[cols] = sc.fit_transform( self.df[cols] )\n return True\n\n def _apply_minmaxscaler(self,cols, arglist):\n '''applies the minmaxscaler such that the feature is scaled\n to within range specified in args. see sklearn.preprocessing.MinMaxScaler'''\n sc = MinMaxScaler((args[0],args[1])) #args0 - min, args1 - max\n # by default minmax scaler uses 0 as min and 1 as max\n self.df[cols] = sc.fit_transform( self.df[cols] )\n return True\n\n def _apply_onehotencoder(self,cols, arglist):\n '''cols should be a list !, applies one hot encoder to the columns.\n update1: now cols can be singular, the function auto converts it to a list'''\n dummies = pandas.get_dummies( self.df.filter( cols, axis= self._constant_axis_COLS))\n self.df.drop( cols, axis= self._constant_axis_COLS, inplace=True)\n self.df = self.df.join( dummies )\n return True\n\n def rearrange_cols(self, target, mets, arglist ):\n '''rearrange the columns. move target end of the dataframe\n the last column -- useful to moving the target to the end for training'''\n # TODO: allow target to be moved to specified index\n excludelist = self.df.columns.values.tolist()\n if( target in excludelist ):\n if( isinstance(target,list) ):\n self.error(target,\"cannot be a list in rearrange operation\")\n return False\n ind_tar = excludelist.index(target)\n del excludelist[ind_tar]\n self.df = self.df[ excludelist + [target] ]\n return True\n else:\n self.error(target,\"not a column in the dataframe!\")\n return False\n\n def rewrite( self, filename = fname ,aindex=False):\n '''rewrites the dataframe to a csv file'''\n self.df.to_csv( filename , index=aindex)\n\n def preproc(self, procedure, col, mets, arglist):\n '''easy caller. use this to call the preprocessing methods'''\n '''use None as placeholder for mets if not applicable'''\n res = self.proc[procedure](self,col,mets,arglist)\n self.hlist = self.df.columns.values.tolist()\n return res\n\n proc = {\n \"preproc\": apply_mapper,\n \"ignore\": ignorecolumn,\n \"fill\": fill_empty,\n \"select\": selectcolumn,\n \"moveback\": rearrange_cols\n }\n\n lkup = {\n \"labelencoder\": _apply_labelencoder,\n \"onehotencoder\": _apply_onehotencoder,\n \"standardscaler\":_apply_standardscaler,\n \"minmaxscaler\":_apply_minmaxscaler\n }\n\n","sub_path":"preproc/neoctl.py","file_name":"neoctl.py","file_ext":"py","file_size_in_byte":11240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"406780160","text":"import json\nimport os\nimport pprint\nimport re\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom dateutil import parser\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom mainapp.management.commands.load_edo_centers_accred import get_edo_auth\nfrom reestr.models import (GTU, SO, AccreditedCenter,\n AccreditedCertificationPoint, City, GroupSM, Level,\n SROMember, WeldType)\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nfrom requests.packages.urllib3.util.retry import Retry\n\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\n\n# __import__('ipdb').set_trace()\n\n\nhead = {\n \"Host\": \"ac.naks.ru\",\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36\",\n \"Content-Type\": \"application/x-www-form-urlencoded; charset=UTF-8\",\n \"Accept-Encoding\": \"gzip, deflate\",\n}\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--clean\",\n action=\"store_true\",\n help=\"clean all sro members\",\n )\n parser.add_argument(\n \"--wipe\",\n action=\"store_true\",\n help=\"wipe all sro members before\",\n )\n parser.add_argument(\n \"--deploy\",\n action=\"store_true\",\n help=\"use on remote server\",\n ),\n\n def handle(self, *args, **options):\n ##################################\n ##### make initiate_centers first!\n ##################################\n\n payload = get_edo_auth()\n\n # requests.adapters.DEFLAULT_RETRIES = 5\n if options[\"deploy\"]:\n certifi_cert = (\n \"/home/popov/django3/lib/python3.10/site-packages/certifi/cacert.pem\"\n )\n else:\n certifi_cert = (\n \"/home/popov/django3/lib/python3.8/site-packages/certifi/cacert.pem\"\n )\n del os.environ[\"CA_BUNDLE\"]\n del os.environ[\"REQUESTS_CA_BUNDLE\"]\n del os.environ[\"SSL_CERT_FILE\"]\n\n os.environ[\"CA_BUNDLE\"] = certifi_cert\n os.environ[\"REQUESTS_CA_BUNDLE\"] = certifi_cert\n os.environ[\"SSL_CERT_FILE\"] = certifi_cert\n\n with requests.Session() as sess:\n # sess.keep_alive = False\n # retry = Retry(connect=3, backoff_factor=1)\n # adapter = HTTPAdapter(max_retries=retry)\n # sess.mount('http://', adapter)\n # sess.mount('https://', adapter)\n # login_url = \"https://ac.naks.ru/\"\n # path_to_fullchain_pem = '/etc/letsencrypt/live/jango.naks.ru/fullchain.pem'\n\n path_to_fullchain_pem = \"/home/popov/naks-new/jango-naks-ru-chain.pem\"\n path_to_cert_folder = \"/etc/letsencrypt/live/jango.naks.ru/\"\n\n cert_tuple = (\n \"/etc/letsencrypt/live/jango.naks.ru/private.key\",\n \"/etc/letsencrypt/live/jango.naks.ru/servert.cert\",\n )\n\n login_url = \"https://ac.naks.ru/\"\n\n log_me_in = sess.post(login_url, data=payload, headers=head)\n all_orgs_url = \"https://ac.naks.ru/org/index.php?SHOWALL_1=1#nav_start\"\n\n all_orgs_page = sess.get(all_orgs_url, headers=head)\n org_soup = BeautifulSoup(all_orgs_page.text, \"html.parser\")\n all_orgs_hrefs = org_soup.find_all(\"a\", attrs={\"class\": \"ahidden\"})\n counter = 0\n # __import__('ipdb').set_trace()\n for href in all_orgs_hrefs:\n edo_id = re.findall(r\"\\d+\", href.attrs[\"href\"])\n sro_id_centers = AccreditedCenter.objects.filter(\n json_data__org_external_id=edo_id[0]\n )\n edit_org_url = (\n f\"https://ac.naks.ru/org/detail.php?ID={edo_id[0]}&action=edit\"\n )\n\n edit_page = sess.get(edit_org_url)\n edit_page_soup = BeautifulSoup(edit_page.text, \"html.parser\")\n org_name = edit_page_soup.find(\"h3\").get_text()\n\n sm, new_sro_member_created = SROMember.objects.get_or_create(\n short_name=org_name\n )\n if sro_id_centers.exists():\n for center in sro_id_centers:\n center.sro_member = sm\n center.save()\n\n np_select = edit_page_soup.find(\"select\", attrs={\"name\": \"PROP[np]\"})\n sro_option = np_select.find(\"option\", attrs={\"selected\": True})\n print(\"-->\", counter, org_name)\n # print(\"sro option\", sro_option.get_text())\n # sro_members_bd = SROMember.objects.all()\n\n if sro_option:\n if sro_option.get(\"value\") == \"1\":\n sm.status = \"a\"\n else:\n sm.status = \"na\"\n\n sro_actual_addr_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"PROP[address]\"}\n )\n sro_ur_addr_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"PROP[contact]\"}\n )\n sro_post_addr_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"PROP[mail_address]\"}\n )\n sm_short_name_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"NAME\"}\n )\n sm_full_name_input = edit_page_soup.find(\n \"input\", attrs={\"name\": \"LONGNAME\"}\n )\n city_input = edit_page_soup.find(\n \"input\",\n attrs={\"name\": \"PROP[city]\"},\n )\n sm.short_name = sm_short_name_input.get(\"value\")\n\n if city_input.get(\"value\"):\n city, city_created = City.objects.get_or_create(\n title=city_input.get(\"value\")\n )\n sm.city = city\n\n sm.full_name = sm_full_name_input.get(\"value\")\n sm.actual_address = sro_actual_addr_input.get(\"value\")\n sm.ur_address = sro_ur_addr_input.get(\"value\")\n if sro_post_addr_input:\n sm.post_address = sro_post_addr_input.get(\"value\")\n sm.save()\n if sm.status == \"a\":\n sm.load_point_coordinates()\n print(\n \"sro member updated\",\n sm.short_name,\n sm.full_name,\n sm.coordinates,\n sm.status,\n )\n counter += 1\n\n if options.get(\"clean\"):\n sro_members_all = SROMember.objects.all()\n for sm in sro_members_all:\n if not sm.centers.select_related().count():\n sm.delete()\n","sub_path":"mainapp/management/commands/parse_sro.py","file_name":"parse_sro.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"333437503","text":"import os\nimport sys\nimport subprocess\nimport setuptools\nimport shutil\nimport tempfile\nimport logging\nfrom zc.buildout.download import Download\nimport zc\nimport zc.recipe.egg\n\nDOWNLOAD_URL = \"https://projects.unbit.it/downloads/uwsgi-{0}.tar.gz\"\nMARKER = object()\n\n\ndef str_to_bool(s):\n \"\"\"\n Converts a string to a bool value; looks at the first character,\n if it's y(es), t(rue) or 1 returns True, otherwise, False.\n \"\"\"\n if len(s) > 0 and s[0] in \"yYtT1\":\n return True\n return False\n\n\nclass UWSGI:\n \"\"\"\n Buildout recipe downloading, compiling and configuring python paths for uWSGI.\n \"\"\"\n\n def __init__(self, buildout, name, options):\n self.egg = zc.recipe.egg.Egg(buildout, options[\"recipe\"], options)\n self.name = name\n self.buildout = buildout\n self.log = logging.getLogger(self.name)\n\n global_options = buildout[\"buildout\"]\n # Use the \"download-cache\" directory as cache, if present\n self.cache_dir = global_options.get(\"download-cache\")\n\n if self.cache_dir is not None:\n # If cache_dir isn't an absolute path, make it relative to\n # buildout's directory\n if not os.path.isabs(self.cache_dir):\n self.cache_dir = os.path.join(global_options[\"directory\"], self.cache_dir)\n\n self.use_system_binary = str_to_bool(options.get(\"use-system-binary\", \"false\"))\n self.uwsgi_version = options.get(\"version\", \"latest\")\n self.md5sum = options.get('md5sum') or None # empty string => None\n self.uwsgi_binary_path = os.path.join(global_options[\"bin-directory\"], \"uwsgi\")\n\n # xml, ini\n self.config_file_format = options.get(\"output-format\", \"xml\").lower()\n if self.config_file_format not in [\"xml\", \"ini\"]:\n self.log.warning(\"unknown output configuration format, defaulting to xml\")\n self.config_file_format = \"xml\"\n\n if \"extra-paths\" in options:\n options[\"pythonpath\"] = options[\"extra-paths\"]\n else:\n options.setdefault(\"extra-paths\", options.get(\"pythonpath\", \"\"))\n\n self.output = options.setdefault(\"output\",\n os.path.join(global_options[\"parts-directory\"],\n self.name,\n \"uwsgi.{0}\".format(self.config_file_format)))\n self.options = options\n\n def download_release(self):\n \"\"\"\n Download uWSGI release based on \"version\" option and return path to downloaded file.\n \"\"\"\n if self.cache_dir is not None:\n download = Download(cache=self.cache_dir)\n else:\n self.log.warning(\"not using a download cache for uwsgi\")\n download = Download()\n\n download_url = self.options.get(\"download-url\", DOWNLOAD_URL)\n download_path, is_temp = download(\n download_url.format(self.uwsgi_version), md5sum=self.md5sum)\n return download_path\n\n def extract_release(self, download_path):\n \"\"\"\n Extracts uWSGI package and returns path containing uwsgiconfig.py along with path to extraction root.\n \"\"\"\n uwsgi_path = None\n extract_path = tempfile.mkdtemp(\"-uwsgi\")\n setuptools.archive_util.unpack_archive(download_path, extract_path)\n for root, dirs, files in os.walk(extract_path):\n if \"uwsgiconfig.py\" in files:\n uwsgi_path = root\n return uwsgi_path, extract_path\n\n def build_uwsgi(self, uwsgi_path):\n \"\"\"\n Build uWSGI and returns path to executable.\n \"\"\"\n current_path = os.getcwd()\n profile = self.options.get(\"profile\", MARKER)\n\n if profile is MARKER:\n profile = '%s/buildconf/default.ini' % uwsgi_path\n elif not os.path.isabs(profile):\n # if the specified profile is not an absolute path, try\n # looking for it in the buildout folder first; otherwise,\n # look for it in the current directory\n buildout_dir_profile = '%s/buildconf/%s' % (uwsgi_path, profile)\n if os.path.isfile(buildout_dir_profile):\n profile = buildout_dir_profile\n else:\n profile = os.path.abspath(profile)\n\n # Change dir to uwsgi_path for compile.\n os.chdir(uwsgi_path)\n build_stdout = tempfile.TemporaryFile()\n try:\n # Build uWSGI. We don't use the Makefile, since it uses an\n # override variable (with :=) we cannot specify the\n # Python interpreter we want to use.\n subprocess.check_call([self.options.get('executable', sys.executable),\n os.path.join(uwsgi_path, 'uwsgiconfig.py'),\n '--build',\n profile],\n stdout=build_stdout)\n finally:\n # Change back to original path.\n os.chdir(current_path)\n\n if os.path.isfile(self.uwsgi_binary_path):\n os.unlink(self.uwsgi_binary_path)\n\n shutil.copy(os.path.join(uwsgi_path, \"uwsgi\"), self.uwsgi_binary_path)\n\n def get_extra_paths(self):\n # Add libraries found by a site .pth files to our extra-paths.\n if 'pth-files' in self.options:\n import site\n for pth_file in self.options['pth-files'].splitlines():\n pth_libs = site.addsitedir(pth_file, set())\n if not pth_libs:\n self.log.warning('No site *.pth libraries found for pth_file=%s' % pth_file)\n else:\n self.log.info('Adding *.pth libraries=%s' % pth_libs)\n self.options['extra-paths'] += '\\n' + '\\n'.join(pth_libs)\n\n # Add local extra-paths.\n return [p.replace('/', os.path.sep) for p in\n self.options['extra-paths'].splitlines() if p.strip()]\n\n def create_configuration_file(self):\n warned = False\n conf = []\n\n for key, value in self.options.items():\n\n if key.startswith(\"xml-\") and len(key) > 4:\n if not warned:\n self.log.warning(\"using 'xml-' options has been deprecated in favor of 'config-'. \"\n \"See documentation for details.\")\n warned = True\n\n key = key[4:]\n\n elif key.startswith(\"config-\") and len(key) > 7:\n key = key[7:]\n else:\n continue\n\n if \"\\n\" in value:\n for subvalue in value.splitlines():\n conf.append((key, subvalue))\n else:\n conf.append((key, value))\n\n _, ws = self.egg.working_set()\n\n # get list of paths to put into pythonpath\n pythonpaths = ws.entries + self.get_extra_paths()\n\n # mungle basedir of pythonpath entries\n if 'pythonpath-eggs-directory' in self.options:\n source = self.options['eggs-directory']\n target = self.options['pythonpath-eggs-directory']\n pythonpaths = [path.replace(source, target) for path in pythonpaths]\n\n # generate pythonpath directives\n for path in pythonpaths:\n conf.append((\"pythonpath\", path))\n\n directory = os.path.dirname(self.output)\n if not os.path.isdir(directory):\n os.makedirs(directory)\n\n if self.config_file_format == \"xml\":\n self.write_config_as_xml(conf)\n elif self.config_file_format == \"ini\":\n self.write_config_as_ini(conf)\n\n return self.output\n\n def write_config_as_xml(self, conf_options):\n conf = \"\"\n for key, value in conf_options:\n if value.lower() == \"true\":\n conf += \"<{0}/>\\n\".format(key)\n elif value.lower() != \"false\":\n conf += \"<{0}>{1}{0}>\\n\".format(key, value)\n\n with open(self.output, \"w\") as f:\n f.write(\"\\n{0} \".format(conf))\n\n def write_config_as_ini(self, conf_options):\n conf = \"[uwsgi]\\n\"\n for key, value in conf_options:\n conf += \"{0} = {1}\\n\".format(key, value)\n with open(self.output, \"w\") as f:\n f.write(conf)\n\n def is_uwsgi_installed(self):\n if not os.path.isfile(self.uwsgi_binary_path):\n return False\n\n if self.uwsgi_version == 'latest':\n # If you ask for the latest version, we say we don't, in order to\n # force a download+recompile (since we can't know for sure if the package was\n # updated upstream or not)\n return False\n\n # Check the version\n process = subprocess.Popen([self.uwsgi_binary_path, '--version'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True)\n stdout, stderr = process.communicate()\n return stdout.strip() == self.uwsgi_version\n\n def install(self):\n paths = []\n if not self.use_system_binary:\n if not self.is_uwsgi_installed():\n # Download uWSGI.\n download_path = self.download_release()\n\n # Extract uWSGI.\n uwsgi_path, extract_path = self.extract_release(download_path)\n\n try:\n # Build uWSGI.\n self.build_uwsgi(uwsgi_path)\n finally:\n # Remove extracted uWSGI package.\n shutil.rmtree(extract_path)\n\n paths.append(self.uwsgi_binary_path)\n\n # Create uWSGI config file.\n paths.append(self.create_configuration_file())\n return paths\n\n update = install\n","sub_path":"buildout/recipe/uwsgi.py","file_name":"uwsgi.py","file_ext":"py","file_size_in_byte":9774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"287773859","text":"#!/usr/bin/env python\n\nimport time\nimport os\nimport sys\nimport xmlrpclib\nimport urlparse\nimport logging\nimport json\nimport shlex\nimport subprocess\nimport threading\nimport copy\n\nfrom crontab import CronItem\nfrom datetime import datetime, timedelta\nfrom socket import gethostname\n\n\nLOG_LEVEL = logging.INFO\nCRON_FILE = 'superbeat.cron'\n\nlog = logging.getLogger('superbeat')\nlogging.basicConfig(\n level=LOG_LEVEL,\n format='%(asctime)s.%(msecs)03d %(name)s %(levelname)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n\ncmd_map = {\n 'start': 'supervisor.startProcess',\n 'stop': 'supervisor.stopProcess',\n 'exec': 'run_process',\n}\n\nwait_arg_map = {\n 'wait': 'true',\n 'nowait': 'false'\n}\n\n\ndef cron_file_name():\n cron_file = sys.argv[1] if len(sys.argv) > 1 else CRON_FILE\n exe_dir = os.path.abspath(os.path.dirname(sys.argv[0]))\n return os.path.join(exe_dir, os.path.expanduser(cron_file))\n\n\ndef cron_file_time():\n try:\n return os.stat(cron_file_name()).st_mtime\n except IOError:\n return 0\n\n\ndef read_cron_file():\n cron_tabs = []\n filename = cron_file_name()\n try:\n for n, line in enumerate(open(filename)):\n line = line.strip()\n if line and not line.startswith('#'):\n cron_tabs.append(('TAB_%02d' % n, line))\n log.info('read cron file: %s', filename)\n except IOError:\n log.debug('skip cron file: %s', filename)\n return cron_tabs\n\n\ndef json_conv(s):\n try:\n return json.loads(s)\n except ValueError:\n return s\n\n\ndef prepare_crons():\n cron_args = [('ARG_%02d' % n, arg) for n, arg in enumerate(sys.argv[2:])]\n cron_vars = [(name, val) for name, val in os.environ.items()\n if name.startswith('CRON_')]\n cron_tabs = read_cron_file()\n localhost = gethostname().partition('.')[0]\n\n for name, token in sorted(cron_args + cron_vars + cron_tabs):\n try:\n when, where, action = token.split(':', 2)\n cron = CronItem(when.strip())\n where = where.strip().split()\n args = shlex.split(action.strip())\n cmd = args.pop(0)\n cmd = cmd_map.get(cmd, cmd)\n if args:\n args[-1] = wait_arg_map.get(args[-1], args[-1])\n args = map(json_conv, args)\n cmd_str = '%s(%s)' % (cmd, ','.join(map(str, args)))\n if '*' in where or localhost in where:\n log.debug('prepare cron %s: %s - %s' %\n (name, cmd_str, when.strip()))\n yield (cron, cmd, args, cmd_str)\n else:\n log.info('skip non-local cron \"%s\"' % token)\n except (ValueError, IOError) as exc:\n log.error('invalid cron \"%s\": %s' % (token, exc))\n\n\ndef connect_rpc():\n try:\n url = os.environ['SUPERVISOR_SERVER_URL']\n except KeyError:\n log.error('please run under supervisord')\n sys.exit(1)\n\n try:\n user_pass = os.environ['SUPERVISOR_USERPASS'] + '@'\n except KeyError:\n user_pass = ''\n\n if user_pass:\n parts = urlparse.urlsplit(url)\n rpc_url = urlparse.urlunsplit((\n parts.scheme, user_pass + parts.netloc,\n parts.path, parts.query, parts.fragment))\n else:\n rpc_url = url\n\n log.info('connect to supervisor %s', url)\n if url.startswith('unix://'):\n # See: http://stackoverflow.com/a/23837147\n import httplib\n import socket\n\n class UnixStreamHTTPConnection(httplib.HTTPConnection):\n def connect(self):\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.sock.connect(self.host)\n\n class UnixStreamTransport(xmlrpclib.Transport, object):\n def __init__(self, socket_path):\n self.socket_path = socket_path\n super(UnixStreamTransport, self).__init__()\n\n def make_connection(self, host):\n return UnixStreamHTTPConnection(self.socket_path)\n\n sock_path = rpc_url[7:]\n log.debug('socket path is %s', sock_path)\n conn = xmlrpclib.Server(\n 'http://127.0.0.1', transport=UnixStreamTransport(sock_path))\n else:\n log.debug('rpc url is %s', rpc_url)\n conn = xmlrpclib.ServerProxy(rpc_url)\n log.debug('methods: %s', conn.system.listMethods())\n\n return conn\n\n\ndef resolve_method(cmd, runners):\n for obj in runners:\n try:\n if getattr(obj, cmd, None):\n return obj, cmd\n except Exception:\n try:\n if cmd in obj.system.listMethods():\n return obj, cmd\n except Exception:\n pass\n else:\n raise ValueError('method \"%s\" not found' % cmd)\n\n\nclass RunProcess(object):\n def __init__(self):\n self._threads = set()\n\n def __repr__(self):\n return '<%s>' % self.__class__.__name__\n\n def _poll(self):\n for thread in tuple(self._threads):\n if not thread.is_alive():\n thread.join(0)\n self._threads.remove(thread)\n\n def run_process(self, *args):\n args = copy.copy(args)\n\n def target():\n null = open(os.devnull)\n proc = subprocess.Popen(\n args,\n shell=False,\n stdin=null.fileno(),\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n bufsize=0)\n cmd = '%s[%d]' % (os.path.basename(args[0]), proc.pid)\n log.debug('%s started', cmd)\n stdout, stderr = proc.communicate()\n null.close()\n for line in stdout.split('\\n'):\n line = line.strip()\n if line:\n log.info('%s says: %s', cmd, line)\n if proc.returncode:\n log.info('%s finished with status %d', cmd, proc.returncode)\n else:\n log.debug('%s finished', cmd)\n\n thread = threading.Thread(target=target)\n thread.start()\n thread.join(0.1)\n if thread.is_alive():\n self._threads.add(thread)\n\n\ndef main():\n tick = datetime.now().replace(second=0, microsecond=0)\n one_min = timedelta(0, 60)\n one_sec = timedelta(0, 1)\n gap_sec = one_sec * 2\n\n rpc_conn = connect_rpc()\n file_time = cron_file_time()\n run_proc = RunProcess()\n runners = (run_proc, rpc_conn)\n cron_list = list(prepare_crons())\n\n while 1:\n tick += one_min\n tick_eps = tick - gap_sec\n delay = (tick_eps - datetime.now()).total_seconds()\n log.debug('will tick in %d seconds', delay)\n if delay > 0:\n time.sleep(delay)\n run_proc._poll()\n\n new_file_time = cron_file_time()\n if new_file_time != file_time:\n cron_list = list(prepare_crons())\n file_time = new_file_time\n\n next_list = []\n for cron, cmd, args, cmd_str in cron_list:\n cron_next = cron.schedule().get_next()\n next_list.append((cron_next, cmd, args, cmd_str))\n\n delay = (tick - datetime.now()).total_seconds()\n if delay > 0:\n time.sleep(delay)\n\n for cron_next, cmd, args, cmd_str in next_list:\n diff = (tick - cron_next).total_seconds()\n log.debug('%s next %s (diff=%s)', cmd_str, cron_next, diff)\n if -30 < diff < 30:\n log.debug('calling %s', cmd_str)\n try:\n obj, met = resolve_method(cmd, runners)\n getattr(obj, met)(*args)\n log.info('called: %s', cmd_str)\n except ValueError:\n log.error('%s: method not found', cmd_str)\n except xmlrpclib.Fault as fault:\n log.error('%s: call failed: %s',\n cmd_str, fault.faultString)\n except xmlrpclib.ProtocolError as perr:\n log.error('%s: protocol failed: %s', cmd_str, perr.errmsg)\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n log.info('stopped')\n","sub_path":"roles.devel/dev-supervisor/files/superbeat.py","file_name":"superbeat.py","file_ext":"py","file_size_in_byte":8164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"425661336","text":"def main():\n\n res=open('attractor_10A.txt').read().split('\\n')\n sen=open('attractor_231.txt').read().split('\\n')\n\n resd = {}\n send={}\n for line in res:\n #print(line)\n (key, val) = line.split()\n resd[key] = float(val)\n\n for line in sen:\n (key, val)=line.split()\n send[key]=float(val)\n\n for key,val1 in resd.items():\n val2=send.get(key)\n if val1<0 and val2>0:\n print (key+'\\t'+str(val1)+'\\t'+str(val2))\n if val1>0 and val2<0:\n print (key+'\\t'+str(val1)+'\\t'+str(val2))\nmain()\n \n","sub_path":"_site/_projects/project2/OLD/NetworkAnalysis 1/SFA_2/find_readoutnodes.py","file_name":"find_readoutnodes.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"579671434","text":"import tkinter\nfrom tkinter import ttk\nimport keyboard\nimport math\nfrom screeninfo import get_monitors\nimport datetime\n\n\ndef handle_focus(event):\n if event.widget == root:\n root.focus_set()\n calcInput.userInputEntry.focus_set()\n\n\nglobal screenWidth, screenHeight\n\nroot = tkinter.Tk()\nscreenInfo = get_monitors()[0]\nscreenWidth = screenInfo.width\nscreenHeight = screenInfo.height\nprint(screenWidth, screenHeight)\n\nroot.title(\"Calculator\")\nroot.geometry(\"%sx67+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\nroot.resizable(False, False)\nroot.wm_attributes('-type', 'splash')\nroot.attributes(\"-topmost\", True)\nglobal histFilename\nhistFilename = 'calculatorHistory.txt'\n\n\nclass CalculatorMain:\n\n def __init__(self, main):\n\n self.magicFrame = tkinter.Frame(main) # all the other grid elements (in column 0) are relative to this one so by giving it monitor width, the other frames have it too\n self.magicFrame.config(height=0, width=screenWidth)\n self.magicFrame.grid(row=0, column=0, sticky=\"WE\")\n\n # self.mathFunctionsFrame = tkinter.Frame(main)\n # self.mathFunctionsFrame.config(height=80, bg=\"red\")\n # self.mathFunctionsFrame.grid(row=1, column=0)\n # self.mathFunctionsFrame.grid_remove()\n\n self.numberAndFuncLayoutFrame = tkinter.Frame(main)\n self.numberAndFuncLayoutFrame.config(height=20)\n self.numberAndFuncLayoutFrame.grid(row=2, column=0, sticky=\"WE\")\n self.numberAndFuncLayoutFrame.grid_remove()\n\n self.buttonMenuFrame = tkinter.Frame(main)\n self.buttonMenuFrame.config(height=20)\n self.buttonMenuFrame.grid(row=3, column=0, sticky=\"WE\")\n\n self.inputFrame = tkinter.Frame(main)\n self.inputFrame.config(height=40)\n self.inputFrame.grid(row=4, column=0, sticky=\"WE\")\n self.inputFrame.focus_set()\n\n\n def showANDhide(self, main, event=None):\n if 'normal' == main.state():\n main.withdraw()\n else:\n main.deiconify()\n main.after(1, lambda: main.focus_force())\n # calcInput.userInputEntry.focus()\n\n\n\nclass CalculatorInput:\n\n def __init__(self, frame):\n self.userInputEntry = tkinter.Entry(frame, font=(\"Helvetica\", 20), takefocus=1)\n self.userInputEntry.pack(fill=tkinter.BOTH)\n self.upAndDownKeyPress = 0\n\n def calculateUserInput(self, event=None):\n\n command = self.userInputEntry.get()\n command = command.rstrip('\\n')\n try:\n result = eval(command)\n except:\n result = \"error\"\n\n self.addToHistory(command, result)\n\n self.userInputEntry.delete(0, tkinter.END)\n self.userInputEntry.insert(0, result)\n self.upAndDownKeyPress = 0\n\n def clearUserInput(self):\n self.userInputEntry.delete(0, tkinter.END)\n\n def clearLastInputedChar(self):\n get = self.userInputEntry.get()[:-1]\n self.userInputEntry.delete(0, tkinter.END)\n self.userInputEntry.insert(0, get)\n\n def addToHistory(self, command, result):\n try:\n file = open(histFilename, 'x')\n except FileExistsError:\n file = open(histFilename, 'a')\n\n writeMsg = \"DATE: \" + str(datetime.datetime.now())+\" \"+command + \" = \" + str(result)\n file.write(writeMsg)\n file.write('\\n')\n file.close()\n\n try:\n if calcHistoryMenu.window.winfo_exists():\n calcHistoryMenu.insertNewLine(writeMsg)\n\n except AttributeError or tkinter.TclError:\n pass\n\n def traverseHistoryUpDown(self, event, key):\n\n if key == 'up':\n self.upAndDownKeyPress += 1\n if key == 'down':\n self.upAndDownKeyPress -= 1\n\n if self.upAndDownKeyPress <= 0:\n self.userInputEntry.delete(0, tkinter.END)\n self.upAndDownKeyPress = 0\n return\n\n file = open(histFilename, 'r')\n lines = file.read().splitlines()\n if self.upAndDownKeyPress >= len(lines):\n self.upAndDownKeyPress = len(lines)\n line = lines[-1*self.upAndDownKeyPress]\n\n line = line[40:] # remove everything except the initial command\n line = line.split('=', 1)[0]\n line = ''.join(line.split())\n\n self.userInputEntry.delete(0, tkinter.END)\n self.userInputEntry.insert(0, line)\n\n\nclass CalculatorButtonMenu:\n\n def __init__(self, frame, main):\n\n self.btnEquals = tkinter.Button(frame, text=\"=\", command=lambda: calcInput.calculateUserInput())\n self.btnEquals.pack(side=\"left\")\n\n self.numberLayoutPopUp = tkinter.Button(frame, text=\"🠕\", command=lambda: self.popUpMoreFuncFrame(main))\n self.numberLayoutPopUp.pack(side=\"left\")\n\n self.btnClear = tkinter.Button(frame, text=\"C\", command=lambda: calcInput.clearUserInput())\n self.btnClear.pack(side=\"left\")\n\n self.btnDot = tkinter.Button(frame, text=\".\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnDot.cget('text')))\n\n self.btnPlus = tkinter.Button(frame, text=\"+\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnPlus.cget('text')))\n\n self.btnMinus = tkinter.Button(frame, text=\"-\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnMinus.cget('text')))\n\n self.btnMultiply = tkinter.Button(frame, text=\"*\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnMultiply.cget('text')))\n\n self.btnDivide = tkinter.Button(frame, text=\"/\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnDivide.cget('text')))\n\n self.btnComma = tkinter.Button(frame, text=\",\", command=lambda: calcNumberAndFuncLayout.insertInUserInputEntry(self.btnComma.cget('text')))\n\n self.exitButton = tkinter.Button(frame, text=\"x\", command=lambda: self.closeMainWindow(main))\n self.exitButton.pack(side=\"right\")\n\n self.histViewButton = tkinter.Button(frame, text=\"History\", command=lambda: self.viewCalcHistory(main))\n self.histViewButton.pack(side=\"right\")\n\n self.mathLayoutPopUp = tkinter.Button(frame, text=\"🠕🠕\", command=lambda: calcNumberAndFuncLayout.mathFunctionsPopUp(main))\n self.mathLayoutPopUp.pack(side=\"right\")\n\n def closeMainWindow(self, main):\n main.destroy()\n\n def viewCalcHistory(self, main):\n try:\n if calcHistoryMenu.window.winfo_exists():\n calcHistoryMenu.destroyWindow()\n self.histViewButton[\"text\"] = \"History\"\n else:\n calcHistoryMenu.createWindow(main)\n self.histViewButton[\"text\"] = \"Close History\"\n\n except AttributeError:\n calcHistoryMenu.createWindow(main)\n self.histViewButton[\"text\"] = \"Close History\"\n\n def popUpMoreFuncFrame(self, main):\n if calc.numberAndFuncLayoutFrame.winfo_ismapped():\n calc.numberAndFuncLayoutFrame.grid_remove()\n main.geometry(\"%sx67+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\n\n self.btnDot.pack_forget()\n self.btnPlus.pack_forget()\n self.btnMinus.pack_forget()\n self.btnMultiply.pack_forget()\n self.btnDivide.pack_forget()\n self.btnComma.pack_forget()\n\n else:\n main.geometry(\"%sx98+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\n calc.numberAndFuncLayoutFrame.grid()\n\n self.btnDot.pack(side=\"left\")\n self.btnPlus.pack(side=\"left\")\n self.btnMinus.pack(side=\"left\")\n self.btnMultiply.pack(side=\"left\")\n self.btnDivide.pack(side=\"left\")\n self.btnComma.pack(side=\"left\")\n self.btnClear.pack_forget()\n self.btnClear.pack(side=\"left\")\n\n\nclass CalculatorNumberAndFunctionLayout:\n\n def __init__(self, frame):\n\n self.btn1 = tkinter.Button(frame, text=\"1\", command=lambda: self.insertInUserInputEntry(self.btn1.cget('text')))\n self.btn1.pack(side=\"left\")\n self.btn2 = tkinter.Button(frame, text=\"2\", command=lambda: self.insertInUserInputEntry(self.btn2.cget('text')))\n self.btn2.pack(side=\"left\")\n self.btne = tkinter.Button(frame, text=\"e\", command=lambda: self.insertInUserInputEntry('math.e'))\n self.btne.pack(side=\"left\")\n self.btn3 = tkinter.Button(frame, text=\"3\", command=lambda: self.insertInUserInputEntry(self.btn3.cget('text')))\n self.btn3.pack(side=\"left\")\n self.btnpi = tkinter.Button(frame, text=\"π\", command=lambda: self.insertInUserInputEntry('math.pi'))\n self.btnpi.pack(side=\"left\")\n self.btn4 = tkinter.Button(frame, text=\"4\", command=lambda: self.insertInUserInputEntry(self.btn4.cget('text')))\n self.btn4.pack(side=\"left\")\n self.btn5 = tkinter.Button(frame, text=\"5\", command=lambda: self.insertInUserInputEntry(self.btn5.cget('text')))\n self.btn5.pack(side=\"left\")\n self.btn6 = tkinter.Button(frame, text=\"6\", command=lambda: self.insertInUserInputEntry(self.btn6.cget('text')))\n self.btn6.pack(side=\"left\")\n self.btne = tkinter.Button(frame, text=\"𝜏\", command=lambda: self.insertInUserInputEntry('math.tau'))\n self.btne.pack(side=\"left\")\n self.btn7 = tkinter.Button(frame, text=\"7\", command=lambda: self.insertInUserInputEntry(self.btn7.cget('text')))\n self.btn7.pack(side=\"left\")\n self.btn8 = tkinter.Button(frame, text=\"8\", command=lambda: self.insertInUserInputEntry(self.btn8.cget('text')))\n self.btn8.pack(side=\"left\")\n self.btn9 = tkinter.Button(frame, text=\"9\", command=lambda: self.insertInUserInputEntry(self.btn9.cget('text')))\n self.btn9.pack(side=\"left\")\n self.btn0 = tkinter.Button(frame, text=\"0\", command=lambda: self.insertInUserInputEntry(self.btn0.cget('text')))\n self.btn0.pack(side=\"left\")\n self.btnClearOne = tkinter.Button(frame, text=\"<-\", command=lambda: calcInput.clearLastInputedChar())\n self.btnClearOne.pack(side=\"left\")\n\n self.optFrame = tkinter.Frame(frame)\n\n self.optPossibilitiesValues = [\"combination(n,k)\", \"permutation(n,k)\", \"factorial(x)\"]\n self.optPossibilities = ttk.Combobox(self.optFrame, values=self.optPossibilitiesValues)\n self.optPossibilities.state = (['disabled'])\n self.optPossibilities.set('possibilities')\n self.optPossibilities.bind(\"\", lambda e: \"break\")\n self.optPossibilities.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optPossibilities.get()))\n self.optPossibilities.pack(side=\"right\")\n\n self.optTrigonometryValues = [\"sin(x)\", \"cos(x)\", \"tan(x)\", \"asin(x)\", \"acos(x)\", \"atan(x)\"]\n self.optTrigonometry = ttk.Combobox(self.optFrame, values=self.optTrigonometryValues)\n self.optTrigonometry.state = (['disabled'])\n self.optTrigonometry.set('trigonometry')\n self.optTrigonometry.bind(\"\", lambda e: \"break\")\n self.optTrigonometry.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optTrigonometry.get()))\n self.optTrigonometry.pack(side=\"right\")\n\n self.optAngularConversionValues = [\"radians\", \"degrees\"]\n self.optAngularConversion = ttk.Combobox(self.optFrame, values=self.optAngularConversionValues)\n self.optAngularConversion.state = (['disabled'])\n self.optAngularConversion.set('angular conversion')\n self.optAngularConversion.bind(\"\", lambda e: \"break\")\n self.optAngularConversion.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optAngularConversion.get()))\n self.optAngularConversion.pack(side=\"right\")\n\n self.optHyperbolicValues = [\"sinh(x)\", \"cosh(x)\", \"tanh(x)\", \"asinh(x)\", \"acosh(x)\", \"atanh(x)\"]\n self.optHyperbolic = ttk.Combobox(self.optFrame, values=self.optHyperbolicValues)\n self.optHyperbolic.state = (['disabled'])\n self.optHyperbolic.set('hyperbolic')\n self.optHyperbolic.bind(\"\", lambda e: \"break\")\n self.optHyperbolic.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optHyperbolic.get()))\n self.optHyperbolic.pack(side=\"right\")\n\n self.optOtherValues = [\"pow(x, y)\", \"sqrt(x)\", \"log(x, base)\"]\n self.optOther = ttk.Combobox(self.optFrame, values=self.optOtherValues)\n self.optOther.state = (['disabled'])\n self.optOther.set('other')\n self.optOther.bind(\"\", lambda e: \"break\")\n self.optOther.bind(\"<>\", lambda event: self.detectMathFunction(event, self.optOther.get()))\n self.optOther.pack(side=\"right\")\n\n self.selectionCommandTable = {\n \"combination(n,k)\": \"math.comb()\",\n \"permutation(n,k)\": \"math.perm()\",\n \"factorial(x)\": \"math.factorial()\",\n \"sin(x)\": \"math.sin()\",\n \"cos(x)\": \"math.cos()\",\n \"tan(x)\": \"math.tan()\",\n \"asin(x)\": \"math.asin()\",\n \"acos(x)\": \"math.acos()\",\n \"atan(x)\": \"math.atan()\",\n \"radians\": \"math.radians()\",\n \"degrees\": \"math.degrees()\",\n \"sinh(x)\": \"math.sinh()\",\n \"cosh(x)\": \"math.cosh()\",\n \"tanh(x)\": \"math.tanh()\",\n \"asinh(x)\": \"math.asinh()\",\n \"acosh(x)\": \"math.acosh()\",\n \"atanh(x)\": \"math.atanh()\",\n \"pow(x, y)\": \"math.pow()\",\n \"sqrt(x)\": \"math.sqrt()\",\n \"log(x, base)\": \"math.log()\"\n }\n\n def insertInUserInputEntry(self, char):\n self.optOther.set('other')\n self.optHyperbolic.set('hyperbolic')\n self.optAngularConversion.set('angular conversion')\n self.optTrigonometry.set('trigonometry')\n self.optPossibilities.set('possibilities')\n\n calcInput.userInputEntry.insert(calcInput.userInputEntry.index(\"insert\"), char)\n\n def detectMathFunction(self, event, selection):\n self.insertMathFunctionInEntry(self.selectionCommandTable[selection])\n\n def insertMathFunctionInEntry(self, command):\n entryMsg = calcInput.userInputEntry.get()\n\n isDigitBool = 0\n commaCounter = 0\n for i in entryMsg: # also add , if user didnt\n if i.isnumeric():\n isDigitBool = 1\n\n if isDigitBool:\n if i == ',':\n commaCounter += 1\n\n if commaCounter == 0:\n prev = ''\n for i in enumerate(entryMsg):\n if i[1].isnumeric() or i[1] == '.':\n prev = i[1]\n elif prev.isnumeric():\n entryMsg = entryMsg[:i[0]] + ',' + entryMsg[i[0]+1:]\n else:\n prev = i[1]\n\n entryMsg = command[:-1] + entryMsg + \")\"\n calcInput.userInputEntry.delete(0, tkinter.END)\n calcInput.userInputEntry.insert(0, entryMsg)\n\n def mathFunctionsPopUp(self, main):\n if calc.numberAndFuncLayoutFrame.winfo_ismapped():\n pass\n else:\n calc.numberAndFuncLayoutFrame.grid()\n main.geometry(\"%sx98+%s+%s\" % (screenWidth, screenWidth, screenHeight-40))\n\n if self.optFrame.winfo_ismapped():\n self.optFrame.pack_forget()\n else:\n self.optFrame.pack(side=\"right\")\n\n\nclass CalculatorHistoryMenu:\n\n def createWindow(self, main):\n self.window = tkinter.Toplevel(main)\n self.window.config(bg=\"grey\")\n self.window.geometry(\"%sx%s+%s+0\" % (screenWidth, screenHeight, screenWidth))\n self.window.protocol(\"WM_DELETE_WINDOW\", self.onClose)\n\n self.histText = tkinter.Text(self.window)\n self.histText.config(bg=\"grey\", font=(\"Helvetica\", 15), height=screenHeight-(screenHeight-main.winfo_screenheight()))\n self.histText.pack(side=\"top\", anchor=\"center\", fill=tkinter.BOTH)\n self.histText.bind(\"\", lambda e: \"break\") # so that u cant edit\n self.writeHist()\n\n def destroyWindow(self):\n self.window.destroy()\n\n def writeHist(self):\n file = open(histFilename, \"r\")\n lines = file.readlines()\n\n for line in lines:\n self.histText.insert(1.0, line)\n\n file.close()\n\n def insertNewLine(self, message):\n self.histText.insert(1.0, \"%s\\n\" % (message))\n\n def onClose(self):\n self.window.destroy()\n calcButtonMenu.histViewButton[\"text\"] = \"History\"\n\n\nglobal calc, calcButtonMenu, calcInput, calcNumberAndFuncLayout, calcHistoryMenu\ncalc = CalculatorMain(root)\ncalcInput = CalculatorInput(calc.inputFrame)\ncalcHistoryMenu = CalculatorHistoryMenu()\ncalcButtonMenu = CalculatorButtonMenu(calc.buttonMenuFrame, root)\ncalcNumberAndFuncLayout = CalculatorNumberAndFunctionLayout(calc.numberAndFuncLayoutFrame)\n\n\nkeyboard.add_hotkey(\"ctrl+space\", lambda: calc.showANDhide(root))\nroot.bind('', lambda event: calcInput.calculateUserInput())\ncalcInput.userInputEntry.bind(\"\", lambda event, key=\"up\": calcInput.traverseHistoryUpDown(event, key))\ncalcInput.userInputEntry.bind(\"\", lambda event, key=\"down\": calcInput.traverseHistoryUpDown(event, key))\ncalcInput.userInputEntry.bind(\"\", lambda arg: calcInput.clearUserInput())\nroot.bind(\"\", handle_focus)\n\n\nroot.mainloop()\n","sub_path":"Calculator.py","file_name":"Calculator.py","file_ext":"py","file_size_in_byte":17255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"479590331","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id$\n\"\"\"\nfrom zope.interface import Interface\nfrom zope.configuration.fields import GlobalObject, Tokens, \\\n PythonIdentifier, MessageID\nfrom zope.schema import TextLine, Id\nfrom zope.app.security.fields import Permission\n\nclass IBasicComponentInformation(Interface):\n\n component = GlobalObject(\n title=u\"Component to be used\",\n required=False\n )\n\n permission = Permission(\n title=u\"Permission\",\n required=False\n )\n\n factory = GlobalObject(\n title=u\"Factory\",\n required=False\n )\n\nclass IBasicViewInformation(Interface):\n \"\"\"\n This is the basic information for all views.\n \"\"\"\n \n for_ = Tokens(\n title=u\"Specifications of the objects to be viewed\",\n description=u\"\"\"This should be a list of interfaces or classes\n \"\"\",\n required=True,\n value_type=GlobalObject(missing_value=object())\n )\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"The permission needed to use the view.\",\n required=False\n )\n\n class_ = GlobalObject(\n title=u\"Class\",\n description=u\"A class that provides attributes used by the view.\",\n required=False\n )\n\n layer = TextLine(\n title=u\"The layer the view is in.\",\n description=u\"\"\"\n A skin is composed of layers. It is common to put skin\n specific views in a layer named after the skin. If the 'layer'\n attribute is not supplied, it defaults to 'default'.\"\"\",\n required=False\n )\n\n allowed_interface = Tokens(\n title=u\"Interface that is also allowed if user has permission.\",\n description=u\"\"\"\n By default, 'permission' only applies to viewing the view and\n any possible sub views. By specifying this attribute, you can\n make the permission also apply to everything described in the\n supplied interface.\n\n Multiple interfaces can be provided, separated by\n whitespace.\"\"\",\n required=False,\n value_type=GlobalObject()\n )\n\n allowed_attributes = Tokens(\n title=u\"View attributes that are also allowed if user has permission.\",\n description=u\"\"\"\n By default, 'permission' only applies to viewing the view and\n any possible sub views. By specifying 'allowed_attributes',\n you can make the permission also apply to the extra attributes\n on the view object.\"\"\",\n required=False,\n value_type=PythonIdentifier()\n )\n\nclass IBasicResourceInformation(Interface):\n \"\"\"\n Basic information for resources\n \"\"\"\n\n name = TextLine(\n title=u\"The name of the resource.\",\n description=u\"The name shows up in URLs/paths. For example 'foo'.\",\n required=True,\n default=u'',\n )\n\n provides = GlobalObject(\n title=u\"The interface this component provides.\",\n description=u\"\"\"\n A view can provide an interface. This would be used for\n views that support other views.\"\"\",\n required=False,\n default=Interface,\n )\n\n type = GlobalObject(\n title=u\"Request type\",\n required=True\n )\n\nclass IInterfaceDirective(Interface):\n \"\"\"\n Define an interface\n \"\"\"\n \n interface = GlobalObject(\n title=u\"Interface\",\n required=True\n )\n\n type = GlobalObject(\n title=u\"Interface type\",\n required=False\n )\n\nclass IAdapterDirective(Interface):\n \"\"\"\n Register an adapter\n \"\"\"\n\n factory = Tokens(\n title=u\"Adapter factory/factories\",\n description=u\"\"\"A list of factories (usually just one) that create the\n adapter instance.\"\"\",\n required=True,\n value_type=GlobalObject()\n )\n\n provides = GlobalObject(\n title=u\"Interface the component provides\",\n description=u\"\"\"This attribute specifes the interface the adapter\n instance must provide.\"\"\",\n required=True\n )\n\n for_ = Tokens(\n title=u\"Specifications to be adapted\",\n description=u\"\"\"This should be a list of interfaces or classes\n \"\"\",\n required=True,\n value_type=GlobalObject(missing_value=object())\n )\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"\"\"This adapter is only available, if the principal has\n this permission.\"\"\",\n required=False\n )\n\n name = TextLine(\n title=u\"Name\",\n description=u\"\"\"Adapters can have names. This attribute allows you to\n specify the name for this adapter.\"\"\",\n required=False\n )\n\nclass ISubscriberDirective(Interface):\n \"\"\"\n Register a subscriber\n \"\"\"\n\n factory = GlobalObject(\n title=u\"Subscriber factory\",\n description=u\"A factory used to create the subscriber instance.\",\n required=True\n )\n\n provides = GlobalObject(\n title=u\"Interface the component provides\",\n description=u\"\"\"This attribute specifes the interface the adapter\n instance must provide.\"\"\",\n required=False,\n )\n\n for_ = Tokens(\n title=u\"Interfaces or classes that this subscriber depends on\",\n description=u\"This should be a list of interfaces or classes\",\n required=True,\n value_type=GlobalObject(missing_value = object()),\n )\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"\"\"This subscriber is only available, if the principal has\n this permission.\"\"\",\n required=False\n )\n\nclass IUtilityDirective(IBasicComponentInformation):\n \"\"\"\n Register a utility\n \"\"\"\n\n provides = GlobalObject(\n title=u\"Interface the component provides\",\n required=True\n )\n\n name = TextLine(\n title=u\"Name\",\n required=False\n )\n\nclass IFactoryDirective(Interface):\n \"\"\"\n Define a factory\n \"\"\"\n\n component = GlobalObject(\n title=u\"Component to be used\",\n required=True\n )\n \n id = TextLine(\n title=u\"ID\",\n required=False\n )\n\n title = MessageID(\n title=u\"Title\",\n description=u\"\"\"\n text suitable for use in the 'add content' menu of a\n management interface\"\"\",\n required=False\n )\n\n description = MessageID(\n title=u\"Description\",\n description=u\"Longer narrative description of what this factory does\",\n required=False\n )\n\n\nclass IViewDirective(IBasicViewInformation, IBasicResourceInformation):\n \"\"\"\n Register a view for a component\n \"\"\"\n\n factory = Tokens(\n title=u\"Factory\",\n required=False,\n value_type=GlobalObject()\n )\n\nclass IDefaultViewDirective(IBasicResourceInformation):\n \"\"\"The name of the view that should be the default.\n\n This name refers to view that should be the\n view used by default (if no view name is supplied\n explicitly).\n \"\"\"\n\n for_ = GlobalObject(\n title=u\"The interface this view is the default for.\",\n description=u\"\"\"\n The view is the default view for the supplied interface. If\n this is not supplied, the view applies to all objects (XXX\n this ought to change).\"\"\",\n required=False\n )\n\n\n\nclass IResourceDirective(IBasicComponentInformation,\n IBasicResourceInformation):\n \"\"\"\n Register a resource\n \"\"\"\n \n layer = TextLine(\n title=u\"The layer the resource is in.\",\n required=False\n )\n\n allowed_interface = Tokens(\n title=u\"Interface that is also allowed if user has permission.\",\n required=False,\n value_type=GlobalObject()\n )\n\n allowed_attributes = Tokens(\n title=u\"View attributes that are also allowed if user has permission.\",\n required=False,\n value_type=PythonIdentifier()\n )\n\nclass ILayerDirective(Interface):\n \"\"\"\n Register a layer\n \"\"\"\n\n name = TextLine(\n title=u\"Layer name\",\n description=u\"Layer name\",\n required=True\n )\n\nclass ISkinDirective(Interface):\n \"\"\"\n Register a skin\n \"\"\"\n\n name = TextLine(\n title=u\"Skin name\",\n description=u\"Skin name\",\n required=True\n )\n\n layers = Tokens(\n title=u\"The layers it consists of.\",\n required=True,\n value_type=TextLine()\n )\n\nclass IDefaultSkinDirective(Interface):\n \"\"\"\n Register a skin\n \"\"\"\n\n name = TextLine(\n title=u\"Default skin name\",\n description=u\"Default skin name\",\n required=True\n )\n\nclass IServiceTypeDirective(Interface):\n\n id = TextLine(\n title=u\"ID of the service type\",\n required=True\n )\n\n interface = GlobalObject(\n title=u\"Interface of the service type\",\n required=True\n )\n\nclass IServiceDirective(IBasicComponentInformation):\n \"\"\"\n Register a service\n \"\"\"\n\n serviceType = TextLine(\n title=u\"ID of service type\",\n required=True\n )\n\nclass IClassDirective(Interface):\n \"\"\"\n Make statements about a class\n \"\"\"\n\n class_ = GlobalObject(\n title=u\"Class\",\n required=True\n )\n\nclass IImplementsSubdirective(Interface):\n \"\"\"\n Declare that the class given by the content directive's class\n attribute implements a given interface\n \"\"\"\n\n interface = Tokens(\n title=u\"One or more interfaces\",\n required=True,\n value_type=GlobalObject()\n )\n\nclass IRequireSubdirective(Interface):\n \"\"\"\n Indicate that the a specified list of names or the names in a\n given Interface require a given permission for access.\n \"\"\"\n\n permission = Permission(\n title=u\"Permission\",\n description=u\"\"\"\n Specifies the permission by id that will be required to\n access or mutate the attributes and methods specified.\"\"\",\n required=False\n )\n\n attributes = Tokens(\n title=u\"Attributes and methods\",\n description=u\"\"\"\n This is a list of attributes and methods that can be accessed.\"\"\",\n required=False,\n value_type=PythonIdentifier()\n )\n \n set_attributes = Tokens(\n title=u\"Attributes that can be set\",\n description=u\"\"\"\n This is a list of attributes that can be modified/mutated.\"\"\",\n required=False,\n value_type=PythonIdentifier()\n )\n\n interface = Tokens(\n title=u\"Interfaces\",\n description=u\"\"\"\n The listed interfaces' methods and attributes can be accessed.\"\"\",\n required=False,\n value_type=GlobalObject()\n )\n\n set_schema = Tokens(\n title=u\"The attributes specified by the schema can be set\",\n description=u\"\"\"\n The listed schemas' properties can be modified/mutated.\"\"\",\n required=False,\n value_type=GlobalObject()\n )\n\n like_class = GlobalObject(\n title=u\"Configure like this class\",\n description=u\"\"\"\n This argument says that this content class should be configured in the\n same way the specified class' security is. If this argument is\n specifed, no other argument can be used.\"\"\",\n required=False\n )\n \nclass IAllowSubdirective(Interface):\n \"\"\"\n Declare a part of the class to be publicly viewable (that is,\n requires the zope.Public permission). Only one of the following\n two attributes may be used.\n \"\"\"\n\n attributes = Tokens(\n title=u\"Attributes\",\n required=False,\n value_type=PythonIdentifier()\n )\n\n interface = Tokens(\n title=u\"Interface\",\n required=False,\n value_type=GlobalObject()\n )\n\nclass IFactorySubdirective(Interface):\n \"\"\"\n Specify the factory used to create this content object\n \"\"\"\n\n id = TextLine(\n title=u\"ID\",\n description=u\"\"\"\n the identifier for this factory in the ZMI factory\n identification scheme. If not given, defaults to the literal\n string given as the content directive's 'class' attribute.\"\"\",\n required=False\n )\n\n title = MessageID(\n title=u\"Title\",\n description=u\"\"\"\n text suitable for use in the 'add content' menu of a\n management interface\"\"\",\n required=False\n )\n\n description = MessageID(\n title=u\"Description\",\n description=u\"Longer narrative description of what this factory does\",\n required=False\n )\n","sub_path":"Zope3/tags/ZopeInterface-3.0.0b1/src/zope/app/component/metadirectives.py","file_name":"metadirectives.py","file_ext":"py","file_size_in_byte":13163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"595326339","text":"#!/home/brandonmfong/SOURCES/Repo/DualPowerGeneration/MaxPowerTracker/py/bin/python3\n\n\"\"\"Simple FTDI EEPROM configurator.\n\"\"\"\n\n# Copyright (c) 2019, Emmanuel Blot \n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the Neotion nor the names of its contributors may\n# be used to endorse or promote products derived from this software\n# without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,\n# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,\n# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,\n# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom argparse import ArgumentParser, FileType\nfrom logging import Formatter, StreamHandler, DEBUG, ERROR\nfrom sys import modules, stderr\nfrom traceback import format_exc\nfrom pyftdi import FtdiLogger\nfrom pyftdi.eeprom import FtdiEeprom\nfrom pyftdi.misc import hexdump\n\ndef main():\n \"\"\"Main routine\"\"\"\n debug = False\n try:\n argparser = ArgumentParser(description=modules[__name__].__doc__)\n argparser.add_argument('device', nargs='?', default='ftdi:///?',\n help='serial port device name')\n argparser.add_argument('-x', '--hexdump', action='store_true',\n help='dump EEPROM content as ASCII')\n argparser.add_argument('-o', '--output', type=FileType('wt'),\n help='output ini file to save EEPROM content')\n argparser.add_argument('-s', '--serial-number',\n help='set serial number')\n argparser.add_argument('-m', '--manufacturer',\n help='set manufacturer name')\n argparser.add_argument('-p', '--product',\n help='set product name')\n argparser.add_argument('-e', '--erase', action='store_true',\n help='erase the whole EEPROM content')\n argparser.add_argument('-u', '--update', action='store_true',\n help='perform actual update, use w/ care')\n argparser.add_argument('-v', '--verbose', action='count', default=0,\n help='increase verbosity')\n argparser.add_argument('-d', '--debug', action='store_true',\n help='enable debug mode')\n args = argparser.parse_args()\n debug = args.debug\n\n if not args.device:\n argparser.error('Serial device not specified')\n\n loglevel = max(DEBUG, ERROR - (10 * args.verbose))\n loglevel = min(ERROR, loglevel)\n if debug:\n formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '\n '%(message)s', '%H:%M:%S')\n else:\n formatter = Formatter('%(message)s')\n FtdiLogger.set_formatter(formatter)\n FtdiLogger.set_level(loglevel)\n FtdiLogger.log.addHandler(StreamHandler(stderr))\n\n eeprom = FtdiEeprom()\n eeprom.open(args.device)\n if args.erase:\n eeprom.erase()\n if args.serial_number:\n eeprom.set_serial_number(args.serial_number)\n if args.manufacturer:\n eeprom.set_manufacturer_name(args.manufacturer)\n if args.product:\n eeprom.set_product_name(args.product)\n if args.hexdump:\n print(hexdump(eeprom.data))\n if args.update:\n eeprom.commit(False)\n if args.verbose > 0:\n eeprom.dump_config()\n if args.output:\n eeprom.save_config(args.output)\n\n except (IOError, ValueError) as exc:\n print('\\nError: %s' % exc, file=stderr)\n if debug:\n print(format_exc(chain=False), file=stderr)\n exit(1)\n except KeyboardInterrupt:\n exit(2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"MaxPowerTracker/py/bin/ftconf.py","file_name":"ftconf.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"614355138","text":"import numpy as np\nfrom numpy.linalg import solve\nimport pandas as pd\nimport time\n\nclass BCMP_lib:\n \n def __init__(self, N, R, K, mu, type_list):\n self.N = N #網内の拠点数(プログラム内部で指定する場合は1少なくしている)\n self.R = R #クラス数\n self.K = K #網内の客数 K = [K1, K2]のようなリスト。トータルはsum(K)\n alp, self.p = self.getProbArrival() #推移確率の自動生成と到着率を求める\n self.saveCSVi(self.p, './tpr/tprNR_'+str(N)+'_'+str(R)+'.csv')#推移確率をcsvで保存しておく\n self.alpha = alp.T #転置して計算形式に合わせる\n self.mu = mu #サービス率 (N×R)\n self.type_list = type_list #Type1(FCFS),Type2プロセッサシェアリング(Processor Sharing: PS),Type3無限サーバ(Infinite Server: IS),Type4後着順割込継続型(LCFS-PR)\n self.combi_list = []\n self.combi_list = self.getCombiList2([], self.K, self.R, 0, self.combi_list) #K, Rのときの組み合わせ\n NK = [k+1 for k in self.K]\n NK.insert(0, self.N)\n self.mp_set= np.zeros(NK) #周辺分布を格納\n self.exp = np.zeros((self.N, self.R)) #平均計内人数を格納\n self.tp = np.zeros((self.N, self.R)) #スループットを格納\n self.rho = [] #利用率を格納(各拠点ごと)\n #self.m = m #各拠点の窓口数\n #self.alpha = alpha\n #課題\n #1. 推移確率行列から到着率の計算過程を入れる(推移確率は取り込みとランダム生成(エルゴード性を担保)の両方)\n #2. 窓口数を加味した計算(利用率やFSの部分)\n \n def getBCMP(self):\n for n in range(self.N): #P324 Table 8.3\n for i, k in enumerate(self.combi_list):\n g = self.getConvolution(n, k)\n #print('G{0}({1}) = {2}'.format(n,k,g))\n if n == self.N-1 and i == len(self.combi_list)-1: #combi_listの最後の要素のとき(注意:常に最後のリストで大丈夫か?)\n self.GK = g\n \n self.getMarginalProbabilitySet() #[(0,0),(0,1),(0,2)],[(1,0),(1,1),(1,2)]\n \n self.getEXP()\n \n self.getThroughput()\n \n self.getUsageRate()\n \n return self.mp_set, self.exp, self.tp, self.rho\n \n \n def getMarginalProbabilitySet(self): #全ての周辺分布を求める\n for n in range(self.N):\n for k in self.combi_list:\n nk = []\n nk.append([n])\n for i in k: #ndarrayでリストで要素を指定するときには[[n],[k1],[k2]]のように[]をつけて、タプルにする\n nk.append([i])\n mp = self.getMarginalProbability(n, k)\n self.mp_set[tuple(nk)] = mp\n \n \n #周辺分布を求める(拠点nに対して)\n #G(K)をgetConvolutionでN=n-1、Si=[K1,K2]となった時がGKなので最初にそれを取得してから(8.23)を計算する\n def getMarginalProbability(self, n, Si):\n mp = self.FS(n, Si) * self.getGi(n, np.array(K)-np.array(Si)) / self.GK\n return mp\n \n def getGi(self, n, Si): #P322 式(8.25,26,27) 引数nはG_N^(i)のiに対応する\n if sum(Si) == 0: #式(8.27)\n gi = 1\n else:\n gi = self.getConvolution(self.N-1, Si)\n combi_list = []\n combi_list = self.getCombiList2([], Si, R, 0, combi_list)\n for k in combi_list:\n if(sum(k) == 0):#j=0の場合は除く(8.26) \n continue\n else:\n gi -= self.FS(n, k) * self.getGi(n, np.array(Si)-np.array(k))\n return gi\n \n def getConvolution(self, n, Si):#P321式(8.21)\n g = 0\n combi_list = []\n combi_list = self.getCombiList2([], Si, R, 0, combi_list)\n if n == 0:\n g = self.FS(n, Si)\n elif n >= 1:\n for k in combi_list:\n g += self.getConvolution(n-1, k) * self.FS(n, np.array(Si)-np.array(k)) \n return g \n \n # P323 Table 8.2 Fi(Si)の計算, P303式(7.82)\n #Siはノードiにおけるクラス別人数分布:(ノードiのクラス0の人数, ノードiのクラス1の人数)\n #Siは(0,0),(1,0),(0,1),(1,1),(0,2),(1,2) K1=1,K2=2なので\n def FS(self, n, Si):#sは状態分布, type_number =1(FCFS),2(PS),3(IS),4(LCFS-PR)\n f = 1\n if self.type_list[n] == 1:\n print('FCFS') #ここはまだ未実装\n else:\n for r in range(R):#type-3はこのループで終わり\n f *= 1 / self.fact(Si[r]) * (self.alpha[n,r] / self.mu[n,r])**Si[r]\n if self.type_list[n] == 2 or self.type_list[n] == 4:#type-2,4は累乗をかける \n f *= self.fact(sum(Si))\n return f\n \n def fact(self, n):\n if n <= 1:\n return 1\n return n * self.fact(n-1)\n \n def getCombiList2(self, combi, K, R, idx, Klist):\n if len(combi) == R:\n Klist.append(combi.copy())\n return Klist\n for v in range(K[idx]+1):\n combi.append(v)\n Klist = self.getCombiList2(combi, K, R, idx+1, Klist)\n combi.pop()\n return Klist\n \n #平均系内人数を求める\n def getEXP(self):\n for n in range(self.N):\n for k in self.combi_list:\n nk = []\n nk.append([n])\n for i in k: #ndarrayでリストで要素を指定するときには[[n],[k1],[k2]]のように[]をつけて、タプルにする\n nk.append([i])\n for r in range(self.R):\n self.exp[n,r] += k[r] * self.mp_set[tuple(nk)]\n #return self.exp\n \n #スループット算出\n def getThroughput(self):\n for n in range(self.N):\n for r in range(self.R):\n r1 = np.zeros(self.R, dtype = int)\n r1[r] = 1\n self.tp[n,r] = self.alpha[n,r] * self.getConvolution(self.N-1, np.array(self.K) - r1) / self.GK\n #return self.tp\n \n def getUsageRate(self): #利用率算出 P322 式(8.29)lambda / (m * mu) 今回はm = 1 \n self.rho = self.tp / self.mu\n #return self.rho\n \n #クラス数分推移確率行列を生成して、それぞれの到着率を返す関数\n def getProbArrival(self):\n pr = np.zeros((self.R*self.N, self.R*self.N))\n alpha = np.zeros((self.R, self.N))\n for r in range(self.R):\n class_number = 0\n while class_number != 1: #エルゴード性を持つか確認\n p = np.random.rand(self.N, self.N)\n for i, val in enumerate(np.sum(p, axis=1)): #正規化 axis=1で行和\n p[i] /= val\n for i in range(self.N):#推移確率のマージ\n for j in range(self.N):\n pr[r*self.N+i,r*self.N+j] = p[i,j]\n equivalence, class_number = self.getEquivalence(0, 5, p)#0は閾値、5はステップ数\n if class_number == 1: #クラス数が1(エルゴード性を持つ)\n break\n alpha_r = self.getCloseTraffic(p)\n for i, val in enumerate(alpha_r): #到着率を配列alphaに格納\n alpha[r,i] = val\n #print('r = {0}, i = {1}, val = {2}'.format(r,i,val))\n return alpha, pr\n \n def getCloseTraffic(self, p):\n e = np.eye(len(p)-1) #次元を1つ小さくする\n pe = p[1:len(p), 1:len(p)].T - e #行と列を指定して次元を小さくする\n lmd = p[0, 1:len(p)] #0行1列からの値を右辺に用いる\n slv = solve(pe, lmd * (-1))\n alpha = np.insert(slv, 0, 1.0) #α1=1を追加\n return alpha\n \n #同値類を求める関数\n def getEquivalence(self, th, roop, p):\n list_number = 0 #空のリストを最初から使用する\n\n #1. 空のリストを作成して、ノードを追加しておく(作成するのはノード数分)\n equivalence = [[] for i in range(len(p))] \n \n #2. Comunicationか判定して、Commnicateの場合リストに登録\n for ix in range(roop):\n p = np.linalg.matrix_power(p.copy(), ix+1) #累乗\n for i in range(len(p)):\n for j in range(i+1, len(p)):\n if(p[i][j] > th and p[j][i] > th): #Communicateの場合\n #3. Communicateの場合登録するリストを選択\n find = 0 #既存リストにあるか\n for k in range(len(p)):\n if i in equivalence[k]: #既存のk番目リストに発見(iで検索)\n find = 1 #既存リストにあった\n if j not in equivalence[k]: #jがリストにない場合登録\n equivalence[k].append(j) \n break\n if j in equivalence[k]: #既存のk番目リストに発見(jで検索)\n find = 1 #既存リストにあった\n if i not in equivalence[k]:\n equivalence[k].append(i) \n break\n if(find == 0):#既存リストにない\n equivalence[list_number].append(i)\n if(i != j):\n equivalence[list_number].append(j)\n list_number += 1\n\n #4. Communicateにならないノードを登録\n for i in range(len(p)):\n find = 0\n for j in range(len(p)):\n if i in equivalence[j]:\n find = 1\n break\n if find == 0:\n equivalence[list_number].append(i)\n list_number += 1\n\n #5. エルゴード性の確認(class数が1のとき)\n class_number = 0\n for i in range(len(p)):\n if len(equivalence[i]) > 0:\n class_number += 1\n\n return equivalence, class_number\n \n #データの保存\n def saveCSVi(self, df, fname):\n pdf = pd.DataFrame(df) #データフレームをpandasに変換\n pdf.to_csv(fname, index=True) #index=Falseでインデックスを出力しない\n \nif __name__ == '__main__':\n \n N = 4 #与える\n R = 2 #与える\n K_total = 5 #与える\n K = [(K_total + i) // R for i in range(R)] #クラス人数を自動的に配分する\n mu = np.full((N, R), 1) #サービス率を同じ値で生成(サービス率は調整が必要)\n type_list = np.full(N, 2) #サービスタイプはPSとする\n #K1 = 1\n #K2 = 2\n #K = [K1, K2]\n #mu = np.array([[1/1, 1/2],[1/4, 1/5],[1/8, 1/10],[1/12, 1/16]])\n #type_list = [2, 4, 4, 3] #Node1:Type2プロセッサシェアリング(Processor Sharing: PS), Node2:Type4後着順割込継続型(LCFS-PR), Node3:Type4後着順割込継続型(LCFS-PR), Node4:Type3無限サーバ(Infinite Server: IS), その他Type1(FCFS) \n #alpha = np.array([[1, 1],[0.4, 0.4],[0.4, 0.3],[0.2, 0.3]])\n \n #bcmp = BCMP_lib(N, R, K, mu, type_list, alpha)\n start = time.time()\n bcmp = BCMP_lib(N, R, K, mu, type_list) #この条件で推移確率を自動生成して、到着率をコンストラクタで求める\n mp_set, exp, tp, rho = bcmp.getBCMP()\n elapsed_time = time.time() - start\n print (\"calclation_time:{0}\".format(elapsed_time) + \"[sec]\")\n \n print('周辺分布')\n print(mp_set)\n print('平均系内人数')\n print(exp)\n print('スループット')\n print(tp)\n print('利用率')\n print(rho)","sub_path":"BCMP_lib.py","file_name":"BCMP_lib.py","file_ext":"py","file_size_in_byte":11922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"597713359","text":"\"\"\"Representations Extractor for ``transformers`` toolkit models.\n\nScript that given a file with input sentences and a ``transformers``\nmodel, extracts representations from all layers of the model. The script\nsupports aggregation over sub-words created due to the tokenization of\nthe provided model.\n\nAuthor: Fahim Dalvi\nLast Modified: 2 March, 2020\nLast Modified: 9 September, 2020\nLast Modified: 15 September, 2020\nLast Modified: 1 February, 2020\n\"\"\"\n\nimport argparse\nimport collections\nimport json\nimport sys\n\nimport numpy as np\nimport torch\nimport h5py\n\nfrom tqdm import tqdm\nfrom transformers import AutoTokenizer, AutoModel\n\n\ndef get_model_and_tokenizer(model_desc, device=\"cpu\", random_weights=False):\n \"\"\"\n Automatically get the appropriate ``transformers`` model and tokenizer based\n on the model description\n\n Parameters\n ----------\n model_desc : str\n Model description; can either be a model name like ``bert-base-uncased``\n or a path to a trained model\n\n device : str, optional\n Device to load the model on, cpu or gpu. Default is cpu.\n\n random_weights : bool, optional\n Whether the weights of the model should be randomized. Useful for analyses\n where one needs an untrained model.\n\n Returns\n -------\n model : transformers model\n An instance of one of the transformers.modeling classes\n tokenizer : transformers tokenizer\n An instance of one of the transformers.tokenization classes\n \"\"\"\n model = AutoModel.from_pretrained(model_desc, output_hidden_states=True).to(device)\n tokenizer = AutoTokenizer.from_pretrained(model_desc)\n\n if random_weights:\n print(\"Randomizing weights\")\n model.init_weights()\n\n return model, tokenizer\n\n\ndef aggregate_repr(state, start, end, aggregation):\n \"\"\"\n Function that aggregates activations/embeddings over a span of subword tokens.\n This function will usually be called once per word. For example, if we had the sentence::\n\n This is an example\n\n which is tokenized by BPE into::\n\n this is an ex @@am @@ple\n\n The function should be called 4 times::\n\n aggregate_repr(state, 0, 0, aggregation)\n aggregate_repr(state, 1, 1, aggregation)\n aggregate_repr(state, 2, 2, aggregation)\n aggregate_repr(state, 3, 5, aggregation)\n\n Returns a zero vector if end is less than start, i.e. the request is to\n aggregate over an empty slice.\n\n Parameters\n ----------\n state : numpy.ndarray\n Matrix of size [ NUM_LAYERS x NUM_SUBWORD_TOKENS_IN_SENT x LAYER_DIM]\n start : int\n Index of the first subword of the word being processed\n end : int\n Index of the last subword of the word being processed\n aggregation : {'first', 'last', 'average'}\n Aggregation method for combining subword activations\n\n Returns\n -------\n word_vector : numpy.ndarray\n Matrix of size [NUM_LAYERS x LAYER_DIM]\n \"\"\"\n if end < start:\n sys.stderr.write(\"WARNING: An empty slice of tokens was encountered. \" +\n \"This probably implies a special unicode character or text \" +\n \"encoding issue in your original data that was dropped by the \" +\n \"transformer model's tokenizer.\\n\")\n return np.zeros((state.shape[0], state.shape[2]))\n if aggregation == \"first\":\n return state[:, start, :]\n elif aggregation == \"last\":\n return state[:, end, :]\n elif aggregation == \"average\":\n return np.average(state[:, start : end + 1, :], axis=1)\n\n\ndef extract_sentence_representations(\n sentence,\n model,\n tokenizer,\n device=\"cpu\",\n include_embeddings=True,\n aggregation=\"last\",\n tokenization_counts={}\n):\n \"\"\"\n Get representations for one sentence\n \"\"\"\n # this follows the HuggingFace API for transformers\n\n special_tokens = [\n x for x in tokenizer.all_special_tokens if x != tokenizer.unk_token\n ]\n special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)\n\n original_tokens = sentence.split(\" \")\n # Add a letter and space before each word since some tokenizers are space sensitive\n tmp_tokens = [\n \"a\" + \" \" + x if x_idx != 0 else x for x_idx, x in enumerate(original_tokens)\n ]\n assert len(original_tokens) == len(tmp_tokens)\n\n with torch.no_grad():\n # Get tokenization counts if not already available\n for token_idx, token in enumerate(tmp_tokens):\n tok_ids = [\n x for x in tokenizer.encode(token) if x not in special_tokens_ids\n ]\n if token_idx != 0:\n # Ignore the first token (added letter)\n tok_ids = tok_ids[1:]\n\n if token in tokenization_counts:\n assert tokenization_counts[token] == len(\n tok_ids\n ), \"Got different tokenization for already processed word\"\n else:\n tokenization_counts[token] = len(tok_ids)\n ids = tokenizer.encode(sentence, truncation=True)\n input_ids = torch.tensor([ids]).to(device)\n # Hugging Face format: tuple of torch.FloatTensor of shape (batch_size, sequence_length, hidden_size)\n # Tuple has 13 elements for base model: embedding outputs + hidden states at each layer\n all_hidden_states = model(input_ids)[-1]\n\n if include_embeddings:\n all_hidden_states = [\n hidden_states[0].cpu().numpy() for hidden_states in all_hidden_states\n ]\n else:\n all_hidden_states = [\n hidden_states[0].cpu().numpy()\n for hidden_states in all_hidden_states[1:]\n ]\n all_hidden_states = np.array(all_hidden_states)\n\n print('Sentence : \"%s\"' % (sentence))\n print(\"Original (%03d): %s\" % (len(original_tokens), original_tokens))\n print(\n \"Tokenized (%03d): %s\"\n % (\n len(tokenizer.convert_ids_to_tokens(ids)),\n tokenizer.convert_ids_to_tokens(ids),\n )\n )\n\n # Remove special tokens\n ids_without_special_tokens = [x for x in ids if x not in special_tokens_ids]\n idx_without_special_tokens = [\n t_i for t_i, x in enumerate(ids) if x not in special_tokens_ids\n ]\n filtered_ids = [ids[t_i] for t_i in idx_without_special_tokens]\n assert all_hidden_states.shape[1] == len(ids)\n all_hidden_states = all_hidden_states[:, idx_without_special_tokens, :]\n assert all_hidden_states.shape[1] == len(filtered_ids)\n print(\n \"Filtered (%03d): %s\"\n % (\n len(tokenizer.convert_ids_to_tokens(filtered_ids)),\n tokenizer.convert_ids_to_tokens(filtered_ids),\n )\n )\n segmented_tokens = tokenizer.convert_ids_to_tokens(filtered_ids)\n\n # Perform actual subword aggregation/detokenization\n counter = 0\n detokenized = []\n final_hidden_states = np.zeros(\n (all_hidden_states.shape[0], len(original_tokens), all_hidden_states.shape[2])\n )\n inputs_truncated = False\n\n for token_idx, token in enumerate(tmp_tokens):\n current_word_start_idx = counter\n current_word_end_idx = counter + tokenization_counts[token]\n\n # Check for truncated hidden states in the case where the\n # original word was actually tokenized\n if (tokenization_counts[token] != 0 and current_word_start_idx >= all_hidden_states.shape[1]) \\\n or current_word_end_idx > all_hidden_states.shape[1]:\n final_hidden_states = final_hidden_states[:, :len(detokenized), :]\n inputs_truncated = True\n break\n\n final_hidden_states[:, len(detokenized), :] = aggregate_repr(\n all_hidden_states,\n current_word_start_idx,\n current_word_end_idx - 1,\n aggregation,\n )\n detokenized.append(\n \"\".join(segmented_tokens[current_word_start_idx:current_word_end_idx])\n )\n counter += tokenization_counts[token]\n\n print(\"Detokenized (%03d): %s\" % (len(detokenized), detokenized))\n print(\"Counter: %d\" % (counter))\n\n if inputs_truncated:\n print(\"WARNING: Input truncated because of length, skipping check\")\n else:\n assert counter == len(ids_without_special_tokens)\n assert len(detokenized) == len(original_tokens)\n print(\"===================================================================\")\n\n return final_hidden_states, detokenized\n\n\ndef extract_representations(\n model_desc,\n input_corpus,\n output_file,\n device=\"cpu\",\n aggregation=\"last\",\n output_type=\"json\",\n random_weights=False,\n ignore_embeddings=False,\n):\n print(f\"Loading model: {model_desc}\")\n model, tokenizer = get_model_and_tokenizer(\n model_desc, device=device, random_weights=random_weights\n )\n\n print(\"Reading input corpus\")\n\n def corpus_generator(input_corpus_path):\n with open(input_corpus_path, \"r\") as fp:\n for line in fp:\n yield line.strip()\n return\n\n print(\"Preparing output file\")\n if output_type == \"hdf5\":\n if not output_file.endswith(\".hdf5\"):\n print(\n \"[WARNING] Output filename (%s) does not end with .hdf5, but output file type is hdf5.\"\n % (output_file)\n )\n output_file = h5py.File(output_file, \"w\")\n sentence_to_index = {}\n elif output_type == \"json\":\n if not output_file.endswith(\".json\"):\n print(\n \"[WARNING] Output filename (%s) does not end with .json, but output file type is json.\"\n % (output_file)\n )\n output_file = open(output_file, \"w\", encoding=\"utf-8\")\n\n print(\"Extracting representations from model\")\n tokenization_counts = {} # Cache for tokenizer rules\n for sentence_idx, sentence in enumerate(corpus_generator(input_corpus)):\n hidden_states, extracted_words = extract_sentence_representations(\n sentence,\n model,\n tokenizer,\n device=device,\n include_embeddings=(not ignore_embeddings),\n aggregation=aggregation,\n tokenization_counts=tokenization_counts\n )\n\n print(\"Hidden states: \", hidden_states.shape)\n print(\"# Extracted words: \", len(extracted_words))\n\n if output_type == \"hdf5\":\n output_file.create_dataset(\n str(sentence_idx),\n hidden_states.shape,\n dtype=\"float32\",\n data=hidden_states,\n )\n # TODO: Replace with better implementation with list of indices\n final_sentence = sentence\n counter = 1\n while final_sentence in sentence_to_index:\n counter += 1\n final_sentence = f\"{sentence} (Occurrence {counter})\"\n sentence = final_sentence\n sentence_to_index[sentence] = str(sentence_idx)\n elif output_type == \"json\":\n output_json = collections.OrderedDict()\n output_json[\"linex_index\"] = sentence_idx\n all_out_features = []\n\n for word_idx, extracted_word in enumerate(extracted_words):\n all_layers = []\n for layer_idx in range(hidden_states.shape[0]):\n layers = collections.OrderedDict()\n layers[\"index\"] = layer_idx\n layers[\"values\"] = [\n round(x.item(), 8)\n for x in hidden_states[layer_idx, word_idx, :]\n ]\n all_layers.append(layers)\n out_features = collections.OrderedDict()\n out_features[\"token\"] = extracted_word\n out_features[\"layers\"] = all_layers\n all_out_features.append(out_features)\n output_json[\"features\"] = all_out_features\n output_file.write(json.dumps(output_json) + \"\\n\")\n\n if output_type == \"hdf5\":\n sentence_index_dataset = output_file.create_dataset(\n \"sentence_to_index\", (1,), dtype=h5py.special_dtype(vlen=str)\n )\n sentence_index_dataset[0] = json.dumps(sentence_to_index)\n\n output_file.close()\n\n\nHDF5_SPECIAL_TOKENS = {\".\": \"__DOT__\", \"/\": \"__SLASH__\"}\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"model_desc\", help=\"Name of model\")\n parser.add_argument(\n \"input_corpus\", help=\"Text file path with one sentence per line\"\n )\n parser.add_argument(\n \"output_file\",\n help=\"Output file path where extracted representations will be stored\",\n )\n parser.add_argument(\n \"--aggregation\",\n help=\"first, last or average aggregation for word representation in the case of subword segmentation\",\n default=\"last\",\n )\n parser.add_argument(\n \"--output-type\",\n choices=[\"hdf5\", \"json\"],\n default=\"json\",\n help=\"Output format of the extracted representations\",\n )\n parser.add_argument(\"--disable_cuda\", action=\"store_true\")\n parser.add_argument(\"--ignore_embeddings\", action=\"store_true\")\n parser.add_argument(\n \"--random_weights\",\n action=\"store_true\",\n help=\"generate representations from randomly initialized model\",\n )\n args = parser.parse_args()\n\n assert args.aggregation in [\n \"average\",\n \"first\",\n \"last\",\n ], \"Invalid aggregation option, please specify first, average or last.\"\n\n if not args.disable_cuda and torch.cuda.is_available():\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n\n extract_representations(\n args.model_desc,\n args.input_corpus,\n args.output_file,\n device=device,\n aggregation=args.aggregation,\n output_type=args.output_type,\n random_weights=args.random_weights,\n ignore_embeddings=args.ignore_embeddings,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"neurox/data/extraction/transformers_extractor.py","file_name":"transformers_extractor.py","file_ext":"py","file_size_in_byte":13974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"151305237","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport pdb\nfrom openerp import api, fields, models\n\n_logger = logging.getLogger(__name__)\n\n\nclass ContratosVentas(models.Model):\n _inherit = 'account.analytic.account'\n\n tipo = fields.Selection((('client', 'Cliente'), ('provider', 'Proveedor')), 'Tipo', default='client')\n\n @api.model\n def _prepare_invoice_data(self, contract):\n invoice_data = super(ContratosVentas, self)._prepare_invoice_data(contract)\n _logger.info('Invoice Data: {}'.format(invoice_data))\n if contract.tipo == 'provider':\n account_id = self.env['res.partner'].browse(invoice_data['partner_id']).property_account_payable.id\n invoice_data.update({'type': 'in_invoice', 'account_id': account_id})\n _logger.info('Invoice Data despues de actulizar el tipo: {}'.format(invoice_data))\n\n return invoice_data\n\n\n @api.v7\n def _prepare_invoice_lines(self, cr, uid, contract, fiscal_position_id, context=None):\n fpos_obj = self.pool.get('account.fiscal.position')\n fiscal_position = None\n if fiscal_position_id:\n fiscal_position = fpos_obj.browse(cr, uid, fiscal_position_id, context=context)\n invoice_lines = []\n for line in contract.recurring_invoice_line_ids:\n\n if contract.tipo == 'provider':\n res = line.product_id\n account_id = res.property_account_expense.id\n if not account_id:\n account_id = res.categ_id.property_account_expense_categ.id\n else:\n res = line.product_id\n account_id = res.property_account_income.id\n if not account_id:\n account_id = res.categ_id.property_account_income_categ.id\n\n account_id = fpos_obj.map_account(cr, uid, fiscal_position, account_id)\n\n taxes = res.taxes_id or False\n tax_id = fpos_obj.map_tax(cr, uid, fiscal_position, taxes)\n\n invoice_lines.append((0, 0, {\n 'name': line.name,\n 'account_id': account_id,\n 'account_analytic_id': contract.id,\n 'price_unit': line.price_unit or 0.0,\n 'quantity': line.quantity,\n 'uos_id': line.uom_id.id or False,\n 'product_id': line.product_id.id or False,\n 'invoice_line_tax_id': [(6, 0, tax_id)],\n }))\n return invoice_lines","sub_path":"addons-obs/contrato_ventas_ext/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"93278251","text":"from htk.utils import htk_setting\n\ndef help():\n event_handlers = htk_setting('HTK_SLACK_EVENT_HANDLERS')\n commands = ['`%s`' % command for command in sorted(event_handlers.keys())]\n usage_dict = {\n 'description': 'Displays available commands. Available commands are: %s' % ', '.join(commands),\n 'basic' : 'htk: command args',\n 'examples' : [\n 'htk: help help',\n ]\n }\n return usage_dict\n\ndef default():\n usage_dict = {\n 'description' : 'This is not a very useful command; it simply parrots back what you said (to test whether the Slack bot is functioning)',\n 'basic' : 'htk: default',\n 'examples' : [],\n }\n return usage_dict\n\ndef bible():\n usage_dict = {\n 'description' : 'Look up a Bible passage',\n 'basic' : 'htk: bible [esv|nasb] passage',\n 'examples' : [\n 'htk: bible esv John 3:16',\n 'htk: bible nasb 1 Cor 13:4-7',\n 'htk: bible Lamentations 3:22-23',\n 'htk: bible Psalm 119:11',\n ],\n }\n return usage_dict\n\ndef stock():\n usage_dict = {\n 'description' : 'Look up most recent stock quotes',\n 'basic': 'htk: stock SYMBOL[( |;|,)SYMBOLS]',\n 'examples' : [\n 'htk: stock AAPL AMZN GOOG LNKD YHOO',\n ],\n }\n return usage_dict\n\ndef weather():\n usage_dict = {\n 'description' : 'Look up weather',\n 'basic' : 'htk: weather LOCATION',\n 'examples' : [\n 'htk: weather 90210',\n 'htk: weather San Francisco, CA',\n 'htk: weather 1600 Pennsylvania Ave NW, Washington, DC 20500',\n ],\n }\n return usage_dict\n","sub_path":"lib/slack/event_handler_usages.py","file_name":"event_handler_usages.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"30323547","text":"#!/usr/bin/env python3\n\nname = 'AJ32'\ndf = None\ncoding = {'all_the_time': [1,2], 'not_at_all': [4,5], 'mid': [3]}\n\npartition = {'high': [['all_the_time', 'not_at_all', 'mid'], lambda x: high(x)],\n 'low': [['all_the_time', 'not_at_all', 'mid'], lambda x: low(x)]}\n\n\ndef high(*x):\n a, b, c = _organize(x)\n return a > b and a > c\n\ndef low(*x):\n a, b, c = _organize(x)\n return b > a and b > c\n\ndef _rename(col):\n return name + '_' + col\n\ndef _organize(row):\n x = row[0]\n a = x[_rename('all_the_time')]\n b = x[_rename('not_at_all')]\n c = x[_rename('mid')]\n return a, b, c\n\n# high: all_the_time > not_at_all AND all_the_time > mid\n# low: not_at_all > all_the_time AND not_at_all > mid\npartition_tract_nos = None\n","sub_path":"diseases/AJ32.py","file_name":"AJ32.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"214678423","text":"\"\"\"\nGiven two strings, determine if one is an anagram of the other.\nTwo words are anagrams of each other if they are made of the \nsame letters in a different order.\n\"\"\"\nimport re\nfrom collections import Counter\n\ndef is_word(str):\n\t\"\"\"\n\tDetermines whether a string can be considered a word.\n\tA word contains only uppercase and lowercase letters/\n\n \tArgs:\n \tstr: a string.\n\n \tReturns:\n \tTrue if str is a word, False otherwise.\n \t\"\"\"\n\tif not str:\n\t\t# string is empty/None\n\t\treturn False\n\tstr = str.strip()\n\tword_pattern = re.compile(\"^[a-zA-Z]+$\")\n\tif word_pattern.match(str) is None:\n\t\t# string contains characters other than letters\n\t\treturn False\n\treturn True\n\t\ndef are_anagrams(str1, str2):\n\t\"\"\"\n\tDetermines whether one word is an anagram of the other.\n\n \tArgs:\n \tstr1: a string, the first word.\n \tstr2: a string, the second word.\n\n \tReturns:\n \tTrue if str1 and str2 are anagrams, False otherwise.\n \t\"\"\"\n\n\tif (not is_word(str1)) or (not is_word(str2)):\n\t\t# one or both arguments are not words\n\t\treturn False\n\n\t# remove leading and trailing whitespace characters\n\tstr1 = str1.strip()\n\tstr2 = str2.strip()\n\n\n\tif len(str1) != len(str2):\n\t\t# if the strings are of different lengths they cannot be anagrams\n\t\treturn False\n\n\t\"\"\"\n\t# slower\n\tif sorted(str1) == sorted(str2):\n\t\t# if the strings are equal once they have been sorted then they are anagrams\n\t\treturn True\n\telse:\n\t\treturn False\n\t\"\"\"\n\treturn Counter(str1) == Counter(str2)","sub_path":"MilenaFilipovic/assignment-1/anagrams.py","file_name":"anagrams.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"127723135","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 5 07:32:50 2019\r\n\r\n@author: Mahnoor\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\nimg1 = cv2.imread('final.jpeg')\r\nimg = cv2.resize(img1, (960, 540)) \r\n\r\nimgray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nret, thresh = cv2.threshold(imgray, 127, 255, 0)\r\ncontours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)\r\nprint(\"Number of Contours = \" + str(len(contours)) )\r\n\r\nareas = []\r\nfor c in contours:\r\n areas.append(cv2.contourArea(c))\r\nprint(\"Area of contour(s): \",areas)\r\n \r\nmax = max(areas[0],areas[1]) \r\nsecondmax = min(areas[0],areas[1]) \r\n \r\nfor i in range(2,len(areas)): \r\n if areas[i]>max: \r\n secondmax=max\r\n max=areas[i] \r\n else: \r\n if areas[i]>secondmax: \r\n secondmax=areas[i]\r\n int(secondmax)\r\n \r\nprint(\"Second highest number is : \",str(secondmax)) \r\nindex = areas.index(secondmax)\r\nprint(index)\r\nprint(type(secondmax))\r\n\r\n\r\ncv2.drawContours(img, contours, index, (0, 0, 255), 1)\r\n\r\ncv2.imshow('Image', img)\r\ncv2.imshow('Image Gray', imgray)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"118896745","text":"import json\n# import time\nimport pandas as pd\nimport geopandas as gpd\nfrom django.contrib.gis.db import models\nfrom django.db.models import JSONField\nfrom django.core import serializers\nfrom django.utils import timezone\nfrom rest_framework_gis.serializers import GeoFeatureModelSerializer \n\nclass PandasModelMixin(models.Model):\n class Meta:\n abstract = True\n\n @classmethod\n def as_dataframe(cls, queryset=None, field_list=None):\n # t1 = time.time()\n\n if queryset is None:\n queryset = cls.objects.all()\n if field_list is None:\n field_list = [_field.name for _field in cls._meta._get_fields(reverse=False)]\n\n data = []\n [data.append([obj.serializable_value(column) for column in field_list]) for obj in queryset]\n\n columns = field_list\n\n df = pd.DataFrame(data, columns=columns)\n # print(\"Execution time without serialization: %s\" % time.time()-t1)\n return df\n\n @classmethod\n def as_dataframe_using_django_serializer(cls, queryset=None):\n # t1 = time.time()\n\n if queryset is None:\n queryset = cls.objects.all()\n\n if queryset.exists():\n serialized_models = serializers.serialize(format='python', queryset=queryset)\n serialized_objects = [s['fields'] for s in serialized_models]\n data = [x.values() for x in serialized_objects]\n\n columns = serialized_objects[0].keys()\n\n df = pd.DataFrame(data, columns=columns)\n df = pd.DataFrame()\n # print(\"Execution time using Django serializer: %s\" % time.time()-t1)\n return df\n\n @classmethod\n def as_dataframe_using_drf_serializer(cls, queryset=None, drf_serializer=None, field_list=None, as_gdf=False):\n from rest_framework import serializers\n\n\n if queryset is None:\n queryset = cls.objects.all()\n\n if drf_serializer is None:\n class CustomModelSerializer(serializers.ModelSerializer):\n class Meta:\n model = cls\n fields = field_list or '__all__'\n\n drf_serializer = CustomModelSerializer\n\n serialized_objects = drf_serializer(queryset, many=True).data\n data = [x.values() for x in serialized_objects]\n\n columns = drf_serializer().get_fields().keys()\n\n df = pd.DataFrame(data, columns=columns)\n if as_gdf:\n df = gpd.GeoDataFrame(df)\n\n return df\n\n @classmethod\n def as_geojson_using_drfg_serializer(cls, queryset=None, drfg_serializer=None, field_list=None, geo_field=\"geom\"): #, id_field=None):\n from rest_framework import serializers\n\n if queryset is None:\n queryset = cls.objects.all()\n\n if drfg_serializer is None:\n class CustomModelSerializer(GeoFeatureModelSerializer):\n class Meta:\n model = cls\n fields = field_list or '__all__'\n geo_field = geo_field\n\n drfg_serializer = CustomModelSerializer\n\n table_as_geojson_odicts = drfg_serializer(queryset, many=True).data\n \n # if id_field:\n # features_with_id = []\n # for f in table_as_geojson_odicts['features']:\n # f['id'] = f['properties'][id_field]\n # features_with_id.append(f)\n # table_as_geojson_odicts['features'] = features_with_id\n\n return json.loads(json.dumps(table_as_geojson_odicts))\n\n @classmethod\n def as_dataframe_from_raw_query(cls, sql):\n r = cls.objects.raw(sql)\n cols = r.columns\n # convert the response to a list of dictionaries\n data = [{k: t.__dict__[k] for k in t.__dict__.keys() & set(cols)} for t in r]\n # return as a dataframe\n return pd.DataFrame(data, columns=cols)\n\n # @classmethod\n # def as geodataframe_using_drf_serializer\n\nclass TimestampedMixin(PandasModelMixin):\n \"\"\"Abstract class, provides auto-populating \"created\" and \"modified\" \n fields to any table model that inherits it.\n \"\"\"\n created = models.DateTimeField(editable=False, default=timezone.now)\n modified = models.DateTimeField(default=timezone.now)\n version = models.CharField(null=True, blank=True, max_length=255)\n\n def save(self, *args, **kwargs):\n ''' On save, update timestamps '''\n if not self.id:\n self.created = timezone.now()\n self.modified = timezone.now()\n return super().save(*args, **kwargs)\n\n class Meta:\n abstract = True\n\n\nclass MetricMixin(PandasModelMixin):\n\n #code = models.SlugField(max_length=255)\n code = models.CharField(max_length=255, null=True, blank=True)\n label = models.CharField(max_length=1000, null=True, blank=True)\n icon = models.CharField(max_length=1000, null=True, blank=True)\n color = models.CharField(max_length=255, null=True, blank=True)\n definition = models.TextField(null=True, blank=True)\n source = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return self.label\n \n class Meta:\n abstract = True\n ordering = ['code']\n\n\nclass DataPointMixin(MetricMixin):\n\n def data_default():\n return {}\n \n data = JSONField(default=data_default)\n # expected top-level keys:\n # [\n # metadata {dict},\n # properties {dict},\n # metrics {dict},\n # sum {float},\n # mean {float},\n # pct {float},\n # value {float}\n # ]\n\n class Meta:\n abstract = True\n ","sub_path":"server/api/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"238213126","text":"##encoding=UTF8\n\n\"\"\"\n[EN]create a invert index dictionary from a regular index dictionary\n\n[CN]将一个正向索引的字典转化成反向索引的字典\n 正向索引字典的例子:\n 物品1 : {索引1, 索引2, ...}\n 物品2 : {索引1, 索引2, ...}\n \n 反向索引字典的例子:\n 索引1 : {物品1, 物品2, ...}\n 索引2 : {物品1, 物品2, ...}\n \nimport:\n from angora.DATA.invertindex import invertindex\n\"\"\"\n\nfrom __future__ import print_function\nfrom six import iteritems\n\ndef invertindex(pos_index):\n \"\"\"\n [Args]\n ------\n pos_index: normal index dictionary\n key: value = item_id: set{[index1, index2, ..., ]}\n \n [Returns]\n ---------\n inv_index:\n key: value = index: set{[item_id1, item_id2, ...,]}\n \"\"\"\n invert_index = dict()\n for item_id, indices in iteritems(pos_index):\n for index in indices:\n if index not in invert_index:\n invert_index[index] = set({item_id})\n else:\n invert_index[index].add(item_id)\n return invert_index\n\nif __name__ == \"__main__\":\n def test_inv_index():\n print(\"{:=^40}\".format(\"test_inv_index\"))\n pos_index = {\"let it go\": {\"mp3\", \"pop\", \"dance\"},\n \"can you feel the love tonight\": {\"acc\", \"pop\", \"movie\"},\n \"Just dance\": {\"pop\", \"dance\", \"club\"}}\n print(invertindex(pos_index))\n \n test_inv_index()","sub_path":"angora/DATA/invertindex.py","file_name":"invertindex.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"399287761","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras import layers\nfrom keras import models\nfrom keras import optimizers\n\nimport matplotlib.pyplot as plt\n\nIMAGE_DIR = \"GemAngle\"\nSHAPE = \"Round\"\nTRAIN_DIR = \"Train\"\nVALIDATE_DIR = \"Validate\"\nTEST_DIR = \"Test\"\nPOSSIBLE_ANGLES = [\"0\", \"5\", \"10\", \"15\", \"20\", \"25\", \"30\", \"35\", \"40\"] # Octagon so 0 -> 45\n\nIMAGE_DIM_X = 254\nIMAGE_DIM_Y = 254\n\n\nif __name__ == '__main__':\n\n base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), IMAGE_DIR + \"\\\\\" + SHAPE)\n \n train_dir = os.path.join(base_dir, TRAIN_DIR)\n print(\"Training images files in {0}\".format(dir))\n\n\n val_dir = os.path.join(base_dir, VALIDATE_DIR)\n print(\"Validation images files in {0}\".format(dir))\n\n test_dir = os.path.join(base_dir, TEST_DIR)\n print(\"Test images files in {0}\".format(dir))\n\n train_datagen = ImageDataGenerator(rescale=1./255)\n test_datagen = ImageDataGenerator(rescale=1./255)\n\n train_generator = train_datagen.flow_from_directory(\n train_dir,\n target_size=(IMAGE_DIM_X, IMAGE_DIM_Y),\n batch_size=20,\n class_mode='categorical')\n \n validation_generator = test_datagen.flow_from_directory(\n val_dir,\n target_size=(IMAGE_DIM_X, IMAGE_DIM_Y),\n batch_size=20,\n class_mode='categorical')\n \n for data_batch, labels_batch in train_generator:\n print(\"Data batch shape:\", data_batch.shape)\n print(\"Labels batch shape:\", labels_batch.shape)\n break\n\n #Build convnet\n model = models.Sequential()\n model.add(layers.Conv2D(32, (3,3), activation='relu', input_shape = (IMAGE_DIM_X, IMAGE_DIM_Y, 3)))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(64, (3,3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3,3), activation='relu'))\n model.add(layers.MaxPooling2D((2, 2)))\n model.add(layers.Conv2D(128, (3,3), activation='relu'))\n model.add(layers.Flatten())\n model.add(layers.Dense(512, activation='relu'))\n model.add(layers.Dense(len(POSSIBLE_ANGLES), activation='softmax'))\n\n print(model.summary())\n\n model.compile(loss='mean_squared_error', #mean_squared_error #categorical_crossentropy\n optimizer=optimizers.RMSprop(lr=2e-5),\n metrics=['acc']) \n\n print(\"Classes \" + str(train_generator.class_indices))\n time.sleep(10)\n history = model.fit_generator(\n train_generator,\n steps_per_epoch = 100,\n epochs = 150,\n validation_data = validation_generator,\n validation_steps = 50) \n\n \n model_json = model.to_json()\n with open(\"round_angle_model.json\", \"w\") as json_file:\n json_file.write(model_json) \n model.save_weights(\"cnn_round_angles.h5\")\n print(\"Saved model to disk\")\n \n acc = history.history['acc']\n val_acc = history.history['val_acc']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(1, len(acc) + 1)\n plt.plot(epochs, acc, 'bo', label = \"Train acc\")\n plt.plot(epochs, val_acc, 'b', label = \"Validation acc\")\n plt.title(\"Training and validation acc\")\n plt.legend()\n\n plt.figure()\n\n plt.plot(epochs, loss, 'bo', label = \"Train loss\")\n plt.plot(epochs, val_loss, 'b', label = \"Validation loss\")\n plt.title(\"Training and validation loss\")\n plt.legend()\n\n plt.show()\n\n","sub_path":"gem_angle_train_round.py","file_name":"gem_angle_train_round.py","file_ext":"py","file_size_in_byte":3571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"614723758","text":"import json\n\nfrom flask_cors import cross_origin\nfrom flask_socketio import emit, SocketIO\n\nfrom Utilities.Tocken import verify\n\nmySocket = SocketIO(cors_allowed_origins=\"*\")\n\n@mySocket.on('connect')\ndef on_connect():\n print('socket connected')\n emit('test', 'AAAAA')\n\n@mySocket.on('message')\ndef handle_message(data):\n authDetails = json.loads(data)\n\n if authDetails[\"type\"] != \"authorization\":\n print('Nu e bine!!')\n else:\n verify(authDetails['payload']['token'])","sub_path":"api/v1/sockets.py","file_name":"sockets.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"621427951","text":"from flask import session\nfrom apps.article.models import Article_type\nfrom apps.user.models import User\n\ndef user_type():\n # 获取文章分类\n types = Article_type.query.all()\n # 登录用户\n user = None\n user_id = session.get('uid', None)\n if user_id:\n user = User.query.get(user_id)\n return user, types\n\n","sub_path":"apps/user/util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"33135813","text":"import smtplib, json, os, sys\n\n#credentials\nimport sib\n\ndefault = {\n\t'sender': 'My SMTP mailer ',\n\t'recipient': '' # email address to receive message when no recipient is specified\n}\n\ndef sendMessage(param):\n\ttry:\n\t\tsubject = param['s']\n\texcept:\n\t\treturn {'status': False, 'description': 'missing subject [s]'}\n\ttry:\n\t\tmessage = param['m']\n\texcept:\n\t\treturn {'status': False, 'description': 'missing body [m]'}\n\n\t# create and send email\n\tmailserver = smtplib.SMTP('smtp-relay.sendinblue.com', 587)\n\tmailserver.login(sib.user, sib.pw)\n\n\t# defaults\n\tsender = default['sender']\n\trecipient = default['recipient']\n\tif 'f' in param:\n\t\tsender = param['f']\n\tif 't' in param:\n\t\trecipient = param['t']\n\tif recipient == '':\n\t\treturn {'status': False, 'description': 'no recipient named, set parameter [t]'}\n\n\temail = 'From: %s\\nTo: %s' % (sender, recipient)\n\temail += '\\nSubject: '+ param['s']\n\temail += \"\\n\\n\" + param['m']\n\tmailserver.sendmail(sender, recipient, email)\n\treturn {'status': True, 'description': 'sent message'}\n\n# file colled directly with JSONic string as argument\nif __name__ == '__main__':\n\ttry:\n\t\targ = sys.argv[1]\n\texcept:\n\t\tprint( \"No parameter provided!\\nquitting\" )\n\t\tsys.exit()\n\ttry:\n\t\tparam = json.loads( arg )\n\texcept:\n\t\tprint( \"Valid JSON not found\\nquitting\" )\n\t\tsys.exit()\n\n\tsendMessage( param )\n","sub_path":"mailer.py","file_name":"mailer.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"9371974","text":"'''https://www.acmicpc.net/problem/1504\n문제:\n - 방향성 없는 그래프 \n - 노드 2 이상 8백 이하\n - 링크 0 이상 20만 이하\n - 임의로 주어진 두 정점은 반드시 통과\n - 한번 이동했던 정점/간선 다시 이동 가능\n - 반드시 최단 경로로 이동\n'''\nimport sys\nimport heapq\nINF = int(1e+9)\nn, e = map(int, input().split())\ngraph = [[]*(n+1) for _ in range(n+1)]\n\nfor _ in range(e):\n n1, n2, w = map(int, input().split())\n graph[n1].append((w, n2))\n graph[n2].append((w, n1))\n\nm1, m2 = map(int, input().split())\n\ncase1 = 0 # 1 -> m1 -> m2 -> N\ncase2 = 0 # 1 -> m2 -> m1 -> N\n\nfor idx, start in enumerate([(0, 1), (0, m1), (0, m2)]):\n distances = [INF] * (n+1)\n prior_q = []\n heapq.heappush(prior_q, start)\n distances[start[1]] = 0\n\n while prior_q:\n cur_dist, cur_loc = heapq.heappop(prior_q)\n if distances[cur_loc] < cur_dist:\n continue\n \n for dist, loc in graph[cur_loc]:\n cumul_dist = cur_dist + dist\n if cumul_dist < distances[loc]:\n distances[loc] = cumul_dist\n heapq.heappush(prior_q, (cumul_dist, loc))\n if idx == 0:\n if case1 < INF:\n case1 += distances[m1]\n if case2 < INF:\n case2 += distances[m2]\n\n elif idx == 1:\n if case1 < INF:\n case1 += distances[m2]\n if case2 < INF:\n case2 += distances[n]\n\n elif idx == 2:\n if case1 < INF:\n case1 += distances[n]\n if case2 < INF:\n case2 += distances[m1]\n\n\nshortest = min(case1, case2)\nif shortest < INF:\n print(shortest)\nelse:\n print(-1)\n\n\n\n\n","sub_path":"shortest-path/1504_특정한최단경로.py","file_name":"1504_특정한최단경로.py","file_ext":"py","file_size_in_byte":1697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"585992547","text":"import scrapy\n\n\nclass IndiangoSpider(scrapy.Spider):\n name = 'IndiaNGO'\n allowed_domains = ['https://www.indiangoslist.com/ngo-address/achukuru-welfare-society-in-itanagar-arunachal-pradesh_AR-2009-0015817']\n start_urls = ['https://www.indiangoslist.com/ngo-address/achukuru-welfare-society-in-itanagar-arunachal-pradesh_AR-2009-0015817']\n\n def parse(self, response):\n ngo_left = response.css(\".ngo_left_head::text\").extract()\n ngo_right = response.css(\".ngo_right_head::text\").extract()\n span = response.xpath(\"//*[@class='ngo_right_head']//text()\").extract()\n print(ngo_right)\n print(span)\n count_1 = 0\n count_2 = 0\n for i in range(len(span)):\n if span[i] == ' ':\n count_1 += 1\n elif span[i] == '\\n':\n count_2 += 1\n for _ in range(count_1):\n span.remove(' ')\n for _ in range(count_2):\n span.remove('\\n')\n print(span)\n span = span[0:len(ngo_left)]\n span[len(span)-4] = span[len(span)-4] + span[len(span)-3]\n span = span[0:len(span)-3] + span[len(span)-2:]\n print(span)\n for item in zip(ngo_left,span):\n\n scraped = {\n 'name' : item[0],\n 'description' : item[1]\n }\n yield scraped\n pass","sub_path":"HarryScrapy/ourfirstscraper/ourfirstscraper/spiders/IndiaNGO.py","file_name":"IndiaNGO.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"572224101","text":"from urllib.error import HTTPError\nimport bs4\nimport urllib.request\nimport datetime\n\ndef podajczas(tabelaminut):\n koniec = False\n for minutaRozkladowa in tabelaminut[hourtable]:\n\n if minutaRozkladowa == -1:\n for godzinyRozkladowe in range(24):\n if koniec:\n break\n for minutyRozkladowe in tabelaminut[godzinyRozkladowe]:\n if minutyRozkladowe != -1:\n czasGodzina = abs((godzinyRozkladowe + 4) % 24)\n print('Najszybszy autobus masz o: ' + str(czasGodzina) + ':' + str(minutyRozkladowe))\n if tabelaminut[godzinyRozkladowe][-1] == tabelaminut[godzinyRozkladowe][0]:\n print('Nastepny o: ' + str(czasGodzina + 1) + ':' + str(\n tabelaminut[godzinyRozkladowe + 1][0]))\n koniec = True\n break\n else:\n indeks = tabelaminut[godzinyRozkladowe].index(minutyRozkladowe)\n print('Nastepny o: ' + str(czasGodzina) + ':' + str(\n tabelaminut[godzinyRozkladowe][indeks + 1]))\n koniec = True\n break\n\n\n\n elif minutaRozkladowa > minuta:\n indeks = (tabelaminut[hourtable].index(minutaRozkladowa))\n print('Najszybszy autobus masz o: ' + str(godzina) + ':' + str(tabelaminut[hourtable][indeks]))\n if tabelaminut[hourtable][-1] == tabelaminut[hourtable][indeks]:\n print('Nastepny o: ' + str(godzina + 1) + ':' + str(tabelaminut[hourtable + 1][0]))\n else:\n print('Nastepny o: ' + str(godzina) + ':' + str(tabelaminut[hourtable][indeks + 1]))\n break\n\n\n\n elif minutaRozkladowa < minuta and minutaRozkladowa == tabelaminut[hourtable][-1]:\n if tabelaminut[hourtable + 1][0] == -1:\n for godzinyRozkladowe in range(24):\n if koniec:\n break\n for minutyRozkladowe in tabelaminut[godzinyRozkladowe]:\n if minutyRozkladowe != -1:\n czasGodzina = abs((godzinyRozkladowe % 24) + 4)\n print('Najszybszy autobus masz o: ' + str(czasGodzina) + ':' + str(minutyRozkladowe))\n if tabelaminut[godzinyRozkladowe][-1] == tabelaminut[godzinyRozkladowe][0]:\n print('Nastepny o: ' + str(czasGodzina + 1) + ':' + str(\n tabelaminut[godzinyRozkladowe + 1][0]))\n koniec = True\n break\n else:\n indeks = tabelaminut[godzinyRozkladowe].index(minutyRozkladowe + 1)\n print('Nastepny o: ' + str(czasGodzina) + ':' + str(\n tabelaminut[godzinyRozkladowe][indeks]))\n koniec = True\n break\n print('Najszybszy autobus masz o: ' + str(godzina + 1) + ':' + str(tabelaminut[hourtable + 1][0]))\n if tabelaminut[hourtable + 1][-1] == tabelaminut[hourtable + 1][0]:\n print('Nastepny o: ' + str(godzina + 2) + ':' + str(tabelaminut[hourtable + 2][0]))\n else:\n print('Nastepny o: ' + str(godzina + 1) + ':' + str(tabelaminut[hourtable + 1][1]))\n break\n\n\nprint('''\n _____ _____ _______ __ __ _______ _____ __ __ ______ \n| __ \\ /\\ | __ \\|__ __|\\ \\ / / |__ __||_ _|| \\/ || ____|\n| |__) |/ \\ | |__) | | | \\ \\_/ / | | | | | \\ / || |__ \n| ___// /\\ \\ | _ / | | \\ / | | | | | |\\/| || __| \n| | / ____ \\ | | \\ \\ | | | | | | _| |_ | | | || |____ \n|_| /_/ \\_\\|_| \\_\\ |_| |_| |_| |_____||_| |_||______|\n ''')\n# łączenie się z mpk\nprint('[*] Lacze z \\'http://www.mpk.poznan.pl/component/transport\\'')\nprint()\nprint('[*] Pobieram liste autobusow i tramwajow')\nprint()\nurl = 'http://www.mpk.poznan.pl/component/transport'\ntry:\n html = urllib.request.urlopen(url)\n page = html.read()\n html.close()\n htmlpage = bs4.BeautifulSoup(page, 'html.parser')\n tabela = htmlpage.find('div', {'id': 'MIMMPK'})\n tabelatramwaj = tabela.find_all('div', {'class': 'box_trams'})\n tramwajedzienne = tabelatramwaj[0].text.splitlines()\n tramwajenocne = tabelatramwaj[1].text.splitlines()\n tramwajedzienne = list(filter(None, tramwajedzienne))\n tramwajenocne = list(filter(None, tramwajenocne))\n\n tabelabusy = tabela.find_all('div', {'class': 'box_buses'})\n busydzienne = tabelabusy[0].text.splitlines()\n busynocne = tabelabusy[1].text.splitlines()\n busydzienne = list(filter(None, busydzienne))\n busynocne = list(filter(None, busynocne))\n\n choice = input('''[?] Pokaz mi wszystkie autobusy/tramwaje (1)\n lub \n Wpisz numer autobusu/tramwaju (2): ''')\n print()\n if choice == '1':\n x = 0\n print('Autobusy Dzienne')\n for autobus in busydzienne:\n print(str(x) + '. ' + autobus)\n x += 1\n x = 0\n print('Autobusy Nocne')\n for autobus in busynocne:\n print(str(x) + '. ' + autobus)\n x += 1\n x = 0\n print('Tramwaje Dzienne')\n for tramwaj in tramwajedzienne:\n print(str(x) + '. ' + tramwaj)\n x += 1\n x = 0\n print('Tramwaje Nocne')\n for tramwaj in tramwajenocne:\n print(str(x) + '. ' + tramwaj)\n x += 1\n input('Wcisnij enter by zakonczyc proram')\n exit(0)\n else:\n Wybranalinia = input('[?] Wpisz numer tramwaju/autobusu: ')\n print()\n\n url = url + '/' + Wybranalinia\n\n print('[*] Lacze z \\'http://www.mpk.poznan.pl/component/transport/' + Wybranalinia + '\\'')\n print()\n\n html = urllib.request.urlopen(url)\n page = html.read()\n html.close()\n htmlpage = bs4.BeautifulSoup(page, 'html.parser')\n tabelalewo = htmlpage.find('div', {'id': 'box_timetable_left'})\n tabelaprawo = htmlpage.find('div', {'id': 'box_timetable_right'})\n\n print('[*] Parsuje przystanki')\n print()\n if tabelaprawo is None:\n print('[!] Jedna linia ' + tabelalewo.h4.text)\n wybor = 1\n else:\n wybor = input('[?] ' + tabelalewo.h4.text + ' (1) lub ' + tabelaprawo.h4.text + ' (2) : ')\n print()\n if wybor == '1' or tabelaprawo is None:\n print('[*] Wybierz przystanek')\n print()\n tabela = tabelalewo.find_all('ul')\n tabela = tabela[1]\n tabela = tabela.find_all('li')\n numer = 0\n for data in tabela:\n print(str(numer) + '. ' + data.text.replace('\\n', ''))\n numer += 1\n print()\n wybor2 = int(input('[?] Numer przystanku: '))\n print()\n link = (tabela[wybor2].a['href'])\n else:\n print('[*] Wybierz przystanek')\n print()\n tabela = tabelaprawo.find_all('ul')\n tabela = tabela[1]\n tabela = tabela.find_all('li')\n numer = 0\n for data in tabela:\n print(str(numer) + '. ' + data.text.replace('\\n', ''))\n numer += 1\n print()\n wybor2 = int(input('[?] Numer przystanku: '))\n print()\n link = (tabela[wybor2].a['href'])\n url = 'http://www.mpk.poznan.pl' + link\n\n print('[*] Lacze z \\'http://www.mpk.poznan.pl' + link + '\\'')\n print()\n html = urllib.request.urlopen(url)\n page = html.read()\n html.close()\n print('[*] Parsuje html-a')\n print()\n page_soup = bs4.BeautifulSoup(page, \"html.parser\")\n\n # znajduje dane\n print('[*] Szukam danych')\n print()\n\n # rozklad jazdy\n tabelka = page_soup.findAll(\"tr\", {\"class\": \"MpkTimetableRow\"})\n\n godziny = []\n minutyRobocze = []\n minutySobotnie = []\n minutyOdswietne = []\n\n for rzad in tabelka:\n\n # biore minuty i godziny do tabelki\n x = rzad.findAll(\"td\", {\"class\": \"MpkMinutes\"})\n y = rzad.findAll(\"td\", {\"class\": \"MpkHours\"})\n\n # laduje godzine, przetwarzam i wrzucam do zmiennej\n godzina = y[0].text\n godzina = godzina.strip()\n godzina = (int(godzina))\n godziny.append(godzina)\n\n # laduje minuty, przetwarzam i wrzucam do zmiennej\n minR = x[0].text # robocze\n minS = x[1].text # sobotnie\n minSw = x[2].text # swiateczne\n\n minR = minR.strip()\n minS = minS.strip()\n minSw = minSw.strip()\n\n minR = minR.replace('N', '')\n minR = minR.replace('G', '')\n minR = minR.replace('p', '')\n minR = minR.replace('P', '')\n minR = minR.replace('F', '')\n\n minS = minS.replace('N', '')\n minS = minS.replace('G', '')\n minS = minS.replace('p', '')\n minS = minS.replace('P', '')\n minS = minS.replace('F', '')\n\n minSw = minSw.replace('N', '')\n minSw = minSw.replace('G', '')\n minSw = minSw.replace('p', '')\n minSw = minSw.replace('P', '')\n minSw = minSw.replace('F', '')\n\n # Jesli tabelka nie jest pusta to ma parsowac\n if not minR == '':\n minR = [int(s) for s in minR.split(' ')]\n\n # Jesli pusta to ma wypelnic -1\n else:\n minR = [-1]\n\n if not minS == '':\n minS = [int(s) for s in minS.split(' ')]\n else:\n minS = [-1]\n\n if not minSw == '':\n minSw = [int(s) for s in minSw.split(' ')]\n else:\n minSw = [-1]\n\n minutyRobocze.append(minR)\n minutySobotnie.append(minS)\n minutyOdswietne.append(minSw)\n print(' [+] Udalo sie pozyskac dane')\n print()\n\n print('====================================================================')\n dzienTygodnia = datetime.date.isoweekday(datetime.date.today())\n dzien = datetime.datetime.today()\n godzina = dzien.hour\n minuta = dzien.minute\n if minuta < 10:\n minutaString = str(0)+str(minuta)\n else:\n minutaString = str(minuta)\n print('Jest ' + str(dzien.hour) + ':' + minutaString + ' ' + str(dzien.date()))\n hourtable = abs((dzien.hour - 4) % 24)\n\n if dzienTygodnia == 6:\n print(\"Dzisiaj jest sobota\")\n podajczas(minutySobotnie)\n\n elif dzienTygodnia == 7:\n print(\"Dzisiaj jest niedziela\")\n podajczas(minutyOdswietne)\n\n else:\n print(\"Dzisiaj jest dzien roboczy\")\n podajczas(minutyRobocze)\n\n print()\n input(\"Wcisnij enter by zakonczyc program\")\n\nexcept HTTPError as e:\n print(' [-]Nie udalo sie polaczyc: ' + str(e))\n exit(1)\nexcept Exception as e:\n print(' [-]Cos sie nie powiodlo: ' + str(e))\n input('wcisnij enter')\n exit(1)\n","sub_path":"PartyTime.py","file_name":"PartyTime.py","file_ext":"py","file_size_in_byte":10993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"252397430","text":"\n\nfrom xai.brain.wordbase.nouns._polyhedron import _POLYHEDRON\n\n#calss header\nclass _POLYHEDRA(_POLYHEDRON, ):\n\tdef __init__(self,): \n\t\t_POLYHEDRON.__init__(self)\n\t\tself.name = \"POLYHEDRA\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"polyhedron\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_polyhedra.py","file_name":"_polyhedra.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"230255500","text":"import heapq\nn = int(input())\nmheap = list()\nfor i in range(n):\n cmnd = str(input())\n if cmnd[0] == 'I' :\n cmnd = cmnd[7:]\n heapq.heappush(mheap,(10**10 - int(cmnd)) )\n else:\n print(10**10 - heapq.heappop(mheap))\n","sub_path":"data/heap-max.py","file_name":"heap-max.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"546183392","text":"#\n# Copyright (c) 2023 Airbyte, Inc., all rights reserved.\n#\n\nimport logging\nfrom unittest.mock import Mock\n\nimport pytest\nfrom airbyte_cdk.models import SyncMode\nfrom google.ads.googleads.errors import GoogleAdsException\nfrom google.ads.googleads.v11.errors.types.errors import ErrorCode, GoogleAdsError, GoogleAdsFailure\nfrom google.ads.googleads.v11.errors.types.request_error import RequestErrorEnum\nfrom google.api_core.exceptions import DataLoss, InternalServerError, ResourceExhausted, TooManyRequests\nfrom grpc import RpcError\nfrom source_google_ads.google_ads import GoogleAds\nfrom source_google_ads.streams import ClickView, cyclic_sieve\n\nfrom .common import MockGoogleAdsClient as MockGoogleAdsClient\n\n\n@pytest.fixture\ndef mock_ads_client(mocker, config):\n \"\"\"Mock google ads library method, so it returns mocked Client\"\"\"\n mocker.patch(\"source_google_ads.google_ads.GoogleAdsClient.load_from_dict\", return_value=MockGoogleAdsClient(config))\n\n\n# EXPIRED_PAGE_TOKEN exception will be raised when page token has expired.\nexception = GoogleAdsException(\n error=RpcError(),\n failure=GoogleAdsFailure(errors=[GoogleAdsError(error_code=ErrorCode(request_error=RequestErrorEnum.RequestError.EXPIRED_PAGE_TOKEN))]),\n call=RpcError(),\n request_id=\"test\",\n)\n\n\ndef mock_response_1():\n yield [\n {\"segments.date\": \"2021-01-01\", \"click_view.gclid\": \"1\"},\n {\"segments.date\": \"2021-01-02\", \"click_view.gclid\": \"2\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n ]\n raise exception\n\n\ndef mock_response_2():\n yield [\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"5\"},\n {\"segments.date\": \"2021-01-04\", \"click_view.gclid\": \"6\"},\n {\"segments.date\": \"2021-01-05\", \"click_view.gclid\": \"7\"},\n ]\n\n\nclass MockGoogleAds(GoogleAds):\n count = 0\n\n def parse_single_result(self, schema, result):\n return result\n\n def send_request(self, query: str, customer_id: str):\n self.count += 1\n if self.count == 1:\n return mock_response_1()\n else:\n return mock_response_2()\n\n\ndef test_page_token_expired_retry_succeeds(mock_ads_client, config, customers):\n \"\"\"\n Page token expired while reading records on date 2021-01-03\n The latest read record is {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"}\n It should retry reading starting from 2021-01-03, already read records will be reread again from that date.\n It shouldn't read records on 2021-01-01, 2021-01-02\n \"\"\"\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-01\", \"end_date\": \"2021-01-15\"}\n\n google_api = MockGoogleAds(credentials=config[\"credentials\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n customers=customers,\n end_date=\"2021-04-04\",\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n result = list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n assert len(result) == 9\n assert stream.get_query.call_count == 2\n stream.get_query.assert_called_with({\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-15\"})\n\n\ndef mock_response_fails_1():\n yield [\n {\"segments.date\": \"2021-01-01\", \"click_view.gclid\": \"1\"},\n {\"segments.date\": \"2021-01-02\", \"click_view.gclid\": \"2\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n ]\n\n raise exception\n\n\ndef mock_response_fails_2():\n yield [\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"5\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"6\"},\n ]\n\n raise exception\n\n\nclass MockGoogleAdsFails(MockGoogleAds):\n def send_request(self, query: str, customer_id: str):\n self.count += 1\n if self.count == 1:\n return mock_response_fails_1()\n else:\n return mock_response_fails_2()\n\n\ndef test_page_token_expired_retry_fails(mock_ads_client, config, customers):\n \"\"\"\n Page token has expired while reading records within date \"2021-01-03\", it should raise error,\n because Google Ads API doesn't allow filter by datetime.\n \"\"\"\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-01\", \"end_date\": \"2021-01-15\"}\n\n google_api = MockGoogleAdsFails(credentials=config[\"credentials\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n end_date=\"2021-04-04\",\n customers=customers,\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n with pytest.raises(GoogleAdsException):\n list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n\n stream.get_query.assert_called_with({\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-15\"})\n assert stream.get_query.call_count == 2\n\n\ndef mock_response_fails_one_date():\n yield [\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"3\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"4\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"5\"},\n {\"segments.date\": \"2021-01-03\", \"click_view.gclid\": \"6\"},\n ]\n\n raise exception\n\n\nclass MockGoogleAdsFailsOneDate(MockGoogleAds):\n def send_request(self, query: str, customer_id: str):\n return mock_response_fails_one_date()\n\n\ndef test_page_token_expired_it_should_fail_date_range_1_day(mock_ads_client, config, customers):\n \"\"\"\n Page token has expired while reading records within date \"2021-01-03\",\n it should raise error, because Google Ads API doesn't allow filter by datetime.\n Minimum date range is 1 day.\n \"\"\"\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-04\"}\n\n google_api = MockGoogleAdsFailsOneDate(credentials=config[\"credentials\"])\n incremental_stream_config = dict(\n api=google_api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n end_date=\"2021-04-04\",\n customers=customers,\n )\n stream = ClickView(**incremental_stream_config)\n stream.get_query = Mock()\n stream.get_query.return_value = \"query\"\n\n with pytest.raises(GoogleAdsException):\n list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n\n stream.get_query.assert_called_with({\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-04\"})\n assert stream.get_query.call_count == 1\n\n\n@pytest.mark.parametrize(\"error_cls\", (ResourceExhausted, TooManyRequests, InternalServerError, DataLoss))\ndef test_retry_transient_errors(mocker, config, customers, error_cls):\n mocker.patch(\"time.sleep\")\n credentials = config[\"credentials\"]\n credentials.update(use_proto_plus=True)\n api = GoogleAds(credentials=credentials)\n mocked_search = mocker.patch.object(api.ga_service, \"search\", side_effect=error_cls(\"Error message\"))\n incremental_stream_config = dict(\n api=api,\n conversion_window_days=config[\"conversion_window_days\"],\n start_date=config[\"start_date\"],\n end_date=\"2021-04-04\",\n customers=customers,\n )\n stream = ClickView(**incremental_stream_config)\n customer_id = next(iter(customers)).id\n stream_slice = {\"customer_id\": customer_id, \"start_date\": \"2021-01-03\", \"end_date\": \"2021-01-04\"}\n records = []\n with pytest.raises(error_cls):\n records = list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=[\"segments.date\"], stream_slice=stream_slice))\n assert mocked_search.call_count == 5\n assert records == []\n\n\ndef test_cyclic_sieve(caplog):\n original_logger = logging.getLogger(\"test\")\n sieve = cyclic_sieve(original_logger, fraction=10)\n for _ in range(20):\n sieve.info(\"Ground Control to Major Tom\")\n sieve.info(\"Your circuit's dead, there's something wrong\")\n sieve.info(\"Can you hear me, Major Tom?\")\n sieve.bump()\n assert len(caplog.records) == 6 # 20 * 3 / 10\n","sub_path":"dts/airbyte/airbyte-integrations/connectors/source-google-ads/unit_tests/test_streams.py","file_name":"test_streams.py","file_ext":"py","file_size_in_byte":8952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"81800671","text":"import pandas as pd \r\nimport numpy as np \r\nfrom collections import OrderedDict\r\n\r\ncorr = pd.read_csv(\"C:/Users/pratyush/Downloads/corr1.csv\",index_col = 0)\r\nstart = 0\r\nleve = 0\r\ncurr = -1\r\ncurr_lev_sorted = []\r\ncompleted_pars = {}\r\n\r\ndef saturate(thres):\r\n\tif thres<=2.5:\r\n\t\treturn False\r\n\telse:\r\n\t\treturn True\r\n\r\n# run each time the student has taken a assignment..\r\ndef get_path(theta1,levels,parents,threshold=2.5):\r\n\tglobal start,leve,curr,curr_lev_sorted,completed_pars\r\n\ttheta = {}\r\n\tinv_th = {}\r\n\tfor i in theta1:\r\n\t\ttheta[i[\"t_id\"]] = i[\"theta\"]\r\n\t\tinv_th[i[\"theta\"][-1]] = i[\"t_id\"]\r\n\tsort_t = sorted(list(inv_th.keys()))\r\n\r\n\t# if starts.. at level 0..\r\n\tif leve==0 and start==0:\r\n\t\tstart = 1\r\n\t\tthet0 = dict([(theta[i][-1],i) for i in levels[0]])\r\n\t\tcurr_lev_sorted = sorted(list(thet0.keys()))\r\n\t\tcurr_lev_sorted = [thet0[i] for i in curr_lev_sorted]\r\n\t\tcurr = curr_lev_sorted[-1]\r\n\t\tcurr_lev_sorted = curr_lev_sorted[:-1]\r\n\r\n\telif leve>=0 and start==1:\r\n\t\tif saturate(theta[curr][-1]) and len(curr_lev_sorted)==0:\r\n\t\t\tcompleted_pars[curr] = (theta[curr][-1]-theta[curr][0])/theta[curr][0]\r\n\t\t\tleve += 1\r\n\t\t\tthet1 = dict([(theta[i][-1],i) for i in levels[leve]])\r\n\t\t\tthet2 = dict([(i,theta[i][-1]) for i in levels[leve]])\r\n\t\t\tthet2 = {k: v for k, v in sorted(thet2.items(), key=lambda item: item[1],reverse = True)}\r\n\t\t\tpars = {}\r\n\t\t\tfor i in levels[leve]:\r\n\t\t\t\tfor j in parents[i]:\r\n\t\t\t\t\tif i in pars:\r\n\t\t\t\t\t\tpars[i] += completed_pars[j]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tpars[i] = completed_pars[j]\r\n\r\n\t\t\tpars = {k: v for k, v in sorted(pars.items(), key=lambda item: item[1],reverse = True)}\r\n\t\t\t\r\n\t\t\tprev = list(pars.values())[0]\r\n\t\t\tprev_k =list(pars.keys())[0]\r\n\t\t\tsort = [list(pars.keys())[0]]\r\n\r\n\t\t\tfor i,j in zip(list(pars.keys())[1:],list(pars.values())[1:]):\r\n\t\t\t\tif j==prev:\r\n\t\t\t\t\ttemp = [prev_k]\r\n\t\t\t\t\twhile j==prev:\r\n\t\t\t\t\t\ttemp.append(i)\r\n\t\t\t\t\tfor k,o in zip(list(temp.keys()),list(temp.values())):\r\n\t\t\t\t\t\tprint(k,o)\r\n\t\t\t\t\t\tif k in temp:\r\n\t\t\t\t\t\t\tsort.append(k)\r\n\t\t\t\telse:\r\n\t\t\t\t\tsort.append(i)\r\n\r\n\t\t\tcurr_lev_sorted = sort\r\n\t\t\tcurr = curr_lev_sorted[-1]\r\n\t\t\tcurr_lev_sorted = curr_lev_sorted[:-1]\r\n\t\t\t\r\n\r\n\t\telif saturate(theta[curr][-1]) and len(curr_lev_sorted)!=0:\r\n\t\t\tcompleted_pars[curr] = (theta[curr][-1]-theta[curr][0])/theta[curr][0]\r\n\t\t\tcurr = curr_lev_sorted[-1]\r\n\t\t\tcurr_lev_sorted = curr_lev_sorted[:-1]\r\n\t\telse:\r\n\t\t\tprint(\"continue...\")\r\n\r\n\r\n# run only once\r\ndef pathfinder():\r\n\t# make the graph..\r\n\tcorr.columns = [int(i) for i in corr.columns]\r\n\tcorr.index = [int(i) for i in corr.index]\r\n\ttags = list(corr.columns)\r\n\tqueue = []\r\n\tlevels = {}\r\n\tparents = {}\r\n\r\n\t# parents immediate..\r\n\tfor i in corr.columns:\r\n\t\tfor j,k in zip(corr.index,corr[i]):\r\n\t\t\tif k==1:\r\n\t\t\t\tif i in parents:\r\n\t\t\t\t\tparents[i].append(j)\r\n\t\t\t\telse:\r\n\t\t\t\t\tparents[i]=[j]\r\n\r\n\t# init the levels..\r\n\tfor i in tags:\r\n\t\tlevels[i] = 0\r\n\t\r\n\t# find the level zero nodes..\r\n\tqueue.extend(list(corr.columns[(corr == 0).all()]))\r\n\twhile len(queue)>0:\r\n\t\tnode = queue[0]\r\n\t\tqueue = queue[1:]\r\n\t\tlev = levels[node]\r\n\r\n\t\tdf = corr==1\r\n\t\tdf = df.ix[node]\r\n\t\tdf = list(df[df].index)\r\n\t\tfor i in df:\r\n\t\t\tlevels[i] = lev + 1\r\n\t\t\tqueue.append(i)\r\n\r\n\treturn levels,parents\r\n\r\ninp0 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3]}]\r\n\r\ninp1 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.4,2.4,2.4]}]\r\n\r\ninp2 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.4,2.4,2.4,2.6,2.8,2.7]}]\r\n\r\ninp3 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2,2.0,2.4,2.6]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n\t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n\t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n\t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.4,2.4,2.4,2.6,2.8,2.7]}]\r\n\r\n# inp1 = [{\"t_id\":1,\"theta\":[1.2,1.3,1.2,2.0,2.4,2.6]},{\"t_id\":2,\"theta\":[1.3,1.2,1.5]}\r\n# \t\t\t,{\"t_id\":3,\"theta\":[0.6,1.1,1.2]},{\"t_id\":4,\"theta\":[1.4,1.8,2.1]}\r\n# \t\t\t,{\"t_id\":5,\"theta\":[1.7,1.8,2.0]},{\"t_id\":6,\"theta\":[2.1,2.3,2.5]}\r\n# \t\t\t,{\"t_id\":7,\"theta\":[1.5,1.7,2.3,2.6,2.8,2.7]}]\r\n\r\nlevels1,parents = pathfinder()\r\nlevels = {}\r\nfor i in levels1:\r\n\tif levels1[i] in levels:\r\n\t\tlevels[levels1[i]].append(i)\r\n\telse:\r\n\t\tlevels[levels1[i]] = [i]\r\n\r\nprint(levels)\r\n#first run...\r\nget_path(inp0,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\r\nget_path(inp1,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\r\nget_path(inp2,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\r\nget_path(inp3,levels,parents)\r\nprint(curr)\r\nprint(leve)\r\nprint(curr_lev_sorted)\r\n\t\t","sub_path":"prep/paths.py","file_name":"paths.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"38954718","text":"from pulp import *\nfrom modules.lb_extract import LBExtract\nfrom modules.lb_transform import LBTransform\n\nimport numpy as np\nimport pandas as pd\nimport pickle\nimport my_config as mc\n\nstart_date = '2019/01/01'\nend_date = '2019/12/31'\n\next = LBExtract(start_date, end_date, False)\ntr = LBTransform(start_date, end_date)\n#ext.mock_flag = True\n#ext.set_mock_path()\n\npd.set_option('display.max_columns', 200)\npd.set_option('display.max_rows', 200)\n\ntemp_df = ext.get_raceuma_table_base()\n#temp_df = tr.normalize_raceuma_df(raceuma_base_df)\n\ndf = temp_df[[\"競走コード\", \"馬番\", \"デフォルト得点\", \"確定着順\", \"単勝配当\", \"複勝配当\", \"得点V3\"]]\n\ndict_path = mc.return_base_path(False)\nintermediate_folder = dict_path + 'intermediate/'\nwith open(intermediate_folder + 'lb_v1_lb_v1/raceuma_ens/export_data.pkl', 'rb') as f:\n lb_v1_df = pickle.load(f)\nwith open(intermediate_folder + 'lb_v2_lb_v2/raceuma_ens/export_data.pkl', 'rb') as f:\n lb_v2_df = pickle.load(f)\nwith open(intermediate_folder + 'lb_v3_lb_v3/raceuma_ens/export_data.pkl', 'rb') as f:\n lb_v3_df = pickle.load(f)\n\nmy_df = pd.merge(lb_v1_df, lb_v2_df , on=[\"RACE_KEY\", \"UMABAN\", \"target\"]).rename(columns={\"predict_std_x\": \"偏差v1\", \"predict_std_y\":\"偏差v2\"})\nmy_df = pd.merge(my_df, lb_v3_df , on=[\"RACE_KEY\", \"UMABAN\", \"target\"]).rename(columns={\"predict_std\": \"偏差v3\", \"RACE_KEY\": \"競走コード\", \"UMABAN\": \"馬番\"})\nwin_df = my_df[my_df[\"target\"] == \"WIN_FLAG\"]\njiku_df = my_df[my_df[\"target\"] == \"JIKU_FLAG\"]\nana_df = my_df[my_df[\"target\"] == \"ANA_FLAG\"]\nwin_df.loc[:, \"勝ち偏差\"] = win_df[\"偏差v1\"] * 0.50 + win_df[\"偏差v2\"] * 0.30 + win_df[\"偏差v3\"] * 0.20\njiku_df.loc[:, \"軸偏差\"] = jiku_df[\"偏差v1\"] * 0.50 + jiku_df[\"偏差v2\"] * 0.25 + jiku_df[\"偏差v3\"] * 0.25\nana_df.loc[:, \"穴偏差\"] = ana_df[\"偏差v1\"] * 0.45 + ana_df[\"偏差v2\"] * 0.10 + ana_df[\"偏差v3\"] * 0.45\n\nmy_score_df = pd.merge(win_df[[\"競走コード\", \"馬番\", \"勝ち偏差\"]], jiku_df[[\"競走コード\", \"馬番\", \"軸偏差\"]], on=[\"競走コード\", \"馬番\"])\nmy_score_df = pd.merge(my_score_df, ana_df[[\"競走コード\", \"馬番\", \"穴偏差\"]], on=[\"競走コード\", \"馬番\"])\n\ndf = pd.merge(df, my_score_df, on=[\"競走コード\", \"馬番\"])\ndf.loc[:, \"勝\"] = df[\"確定着順\"].apply(lambda x: 1 if x == 1 else 0)\ndf.loc[:, \"連\"] = df[\"確定着順\"].apply(lambda x: 1 if x in (1, 2) else 0)\ndf.loc[:, \"複\"] = df[\"確定着順\"].apply(lambda x: 1 if x in (1, 2, 3) else 0)\nprint(df.head())\nprint(\"------ check ------\")\nprint(\"df\", df.shape)\n\niter_range = 5\nscore_rate = range(0, 1, iter_range)\nv3_rate = range(0, 1, iter_range)\nwin_rate = range(20, 101, iter_range)\njiku_rate = range(20, 101, iter_range)\nana_rate = range(20, 101, iter_range)\n\ns1_list = []\nv3_list = []\nwin_list = []\njiku_list = []\nana_list = []\n\ncnt_list = []\nav_win_list = []\nav_ren_list = []\nav_fuku_list = []\ntan_ret_list = []\nfuku_ret_list = []\n\n#df = df.head(200)\ntotal_count = len(df)\n\nfor s1 in score_rate:\n print(s1)\n for v3 in v3_rate:\n for win in win_rate:\n for jiku in jiku_rate:\n for ana in ana_rate:\n if s1 + v3 + win + jiku + ana == 100:\n print(\"s1:\" + str(s1) + \" win:\" + str(win) + \" jiku:\" + str(jiku) + \" ana:\" + str(ana))\n temp_df = df\n temp_df.loc[:, \"最適得点\"] = df[\"デフォルト得点\"] * s1/100 + df[\"得点V3\"] * v3/100 + df[\"勝ち偏差\"] * win/100 + df[\"軸偏差\"] * jiku/100 + df[\"穴偏差\"] * ana/100\n target_df = temp_df[temp_df[\"最適得点\"] >= 55]\n cnt_list.append(len(target_df))\n s1_list.append(s1)\n v3_list.append(v3)\n win_list.append(win)\n jiku_list.append(jiku)\n ana_list.append(ana)\n av_win_list.append(round(target_df[\"勝\"].mean() * 100, 2))\n av_ren_list.append(round(target_df[\"連\"].mean() * 100, 2))\n av_fuku_list.append(round(target_df[\"複\"].mean() * 100, 2))\n tan_ret_list.append(round(target_df[\"単勝配当\"].mean(), 2))\n fuku_ret_list.append(round(target_df[\"複勝配当\"].mean(), 2))\n\n\nscore_df = pd.DataFrame(\n data={'score_rate': s1_list, 'win_rate': win_list, 'jiku_rate': jiku_list, 'ana_rate': ana_list,\n 'count': cnt_list, 'v3_rate': v3_list,\n 'av_win': av_win_list, 'av_ren': av_ren_list, 'av_fuku': av_fuku_list, 'tan_return': tan_ret_list , 'fuku_return': fuku_ret_list},\n columns=['score_rate', 'v3_rate', 'win_rate', 'jiku_rate', 'ana_rate', 'count', 'av_win', 'av_ren', 'av_fuku', 'tan_return', 'fuku_return']\n)\nscore_df.loc[:,'tan_return_rank'] = score_df['tan_return'].rank(ascending=False)\nscore_df.loc[:,'fuku_return_rank'] = score_df['tan_return'].rank(ascending=False)\nscore_df.loc[:,'av_win_rank'] = score_df['av_win'].rank(ascending=False)\nscore_df.loc[:,'av_ren_rank'] = score_df['av_ren'].rank(ascending=False)\nscore_df.loc[:,'av_fuku_rank'] = score_df['av_fuku'].rank(ascending=False)\nscore_df.loc[:,'total_rank'] = score_df['tan_return_rank'] + score_df['fuku_return_rank'] \\\n + score_df['av_win_rank'] + score_df['av_ren_rank'] + score_df['av_fuku_rank']\n\nprint(\"----------- tan_return -----------------\")\nprint(score_df.sort_values('tan_return', ascending=False).head())\nprint(\"----------- fuku_return -----------------\")\nprint(score_df.sort_values('fuku_return', ascending=False).head())\nprint(\"----------- av_win -----------------\")\nprint(score_df.sort_values('av_win', ascending=False).head())\nprint(\"----------- av_ren -----------------\")\nprint(score_df.sort_values('av_ren', ascending=False).head())\nprint(\"----------- av_fuku -----------------\")\nprint(score_df.sort_values('av_fuku', ascending=False).head())\n\nprint(\"----------- total_rank -----------------\")\ndump_df = score_df.sort_values('total_rank').head(10)\nprint(dump_df)\nprint(score_df.describe())\n\nwith open(dict_path + 'temp_analysis/output/find_parameter.pkl', 'wb') as f:\n pickle.dump(dump_df, f)\n","sub_path":"temp_analysis/find_parameter.py","file_name":"find_parameter.py","file_ext":"py","file_size_in_byte":6201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"36699713","text":"#!/usr/bin/env python\n\nimport uuid\nimport qrcode\nimport json\nimport locale\nimport time\nimport os\nimport re\nimport pandas as pd\nimport matplotlib as mpl\nmpl.use('Agg')\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib import rcParams\nfrom matplotlib import colors\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom xml.etree.ElementTree import Element, SubElement, dump, ElementTree\n\nfrom workflow_sw import workflow_software\nfrom workflow_logger import log_progress, log_info, log_version, log_error\nfrom workflow_exec import run_command, run_command_file_handle\n\n__modname__ = \"qc_report_generation_v2.py\"\n\nclass germline_qc_report(object):\n\n def __init__(self, sample_name, output_dir, pipeline, pipeline_name,\n instrument, specimen, reagent_kit, cycle, run_name,\n log_file, workflow_dir, script_dir, final_bam):\n self._sample_name = sample_name\n self._output_dir = output_dir\n self._pipeline = pipeline\n self._pipeline_name = pipeline_name\n\n self._instrument = str(instrument).replace(\"_\", \" \")\n self._specimen = str(specimen).replace(\"_\", \" \")\n self._reagent_kit = str(reagent_kit).replace(\"_\", \" \")\n self._cycle = str(cycle).replace(\"_\", \" \")\n self._run_name = str(run_name).replace(\"_\", \" \")\n\n self._xml_file = \"%s/data/stat/%s.xml\"%(self._output_dir, self._sample_name)\n self._pdf_file = \"%s/data/stat/%s.pdf\"%(self._output_dir, self._sample_name)\n self._log_file = log_file\n self._final_bam = final_bam\n\n self._default_coverage_heatmap = \"%s/modules/images/logo-yellow.png\"%(workflow_dir)\n self._default_control_file = \"%s/assay_reference/control_ampstat.txt\"%(script_dir)\n self._stat_json = \"%s/data/stat/%s.stat.json\"%(self._output_dir, self._sample_name)\n\n self._sw = workflow_software(\"germline\")\n\n pattern=r'^[Cc][Oo][Nn][Tt][Rr][Oo][Ll]'\n if re.match(pattern,sample_name):\n self._is_control=\"yes\"\n else:\n self._is_control=\"no\"\n\n def indent(self, elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n self.indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i\n\n def main_page(self):\n main_page = Element(\"main_page\")\n\n ### sample name\n SubElement(main_page, \"sample_name\").text = self._sample_name\n\n ### report ID\n report_id = str(uuid.uuid4()) # Convert UUID format to a Python string.\n report_id = report_id.upper() # Make all characters uppercase.\n report_id = report_id.replace(\"-\",\"\") # Remove the UUID '-'.\n self._report_id = report_id[0:6]\n SubElement(main_page, \"report_id\").text = self._report_id\n\n ### qrcode\n qr = qrcode.QRCode(version=1, error_correction=qrcode.constants.ERROR_CORRECT_L, box_size=10, border=4)\n validation_url=\"http://www.ngenebio.com\"\n qr.add_data(validation_url)\n qr.make(fit=True)\n qr_img = qr.make_image()\n output = \"%s/data/stat/%s_validation_qr.png\"%(self._output_dir, self._sample_name)\n qr_img.save(output, kind='png')\n SubElement(main_page, \"qrcode_path\").text = output\n\n return main_page\n\n def header_contents(self):\n header_contents = Element(\"header_contents\")\n SubElement(header_contents, \"content\").text = \"%s (%s) Quality Report %s (%s)\"%(self._pipeline_name, self._pipeline, self._sample_name, self._report_id)\n return header_contents\n\n def analysis_information(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Analysis Information\"\n\n sn = Element(\"analysis_information\")\n SubElement(sn, \"name\").text = \"Sample Name\"\n SubElement(sn, \"value\").text = self._sample_name\n qc_contents.append(sn)\n\n at = Element(\"analysis_information\")\n SubElement(at, \"name\").text = \"Analysis Type\"\n if self._specimen == 'FFPE':\n SubElement(at, \"value\").text = \"Somatic Variation\"\n else:\n SubElement(at, \"value\").text = \"Germline Variation\"\n qc_contents.append(at)\n\n assay = Element(\"analysis_information\")\n SubElement(assay, \"name\").text = \"Assay Type\"\n SubElement(assay, \"value\").text = \"%s (%s)\" % (self._pipeline_name, self._pipeline)\n qc_contents.append(assay)\n\n pv = Element(\"analysis_information\")\n SubElement(pv, \"name\").text = \"Pipeline Version\"\n SubElement(pv, \"value\").text = \"v1.0\"\n qc_contents.append(pv)\n\n rd = Element(\"analysis_information\")\n SubElement(rd, \"name\").text = \"Report Date\"\n locale.setlocale(locale.LC_ALL, \"\")\n now = time.localtime()\n format_date = \"%04d-%02d-%02d %02d:%02d:%02d\"%(now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)\n SubElement(rd, \"value\").text = format_date\n qc_contents.append(rd)\n\n return qc_contents\n\n def sequencing_information(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Sequencing Information\"\n \n it = Element(\"sequencing_information\")\n SubElement(it, \"name\").text = \"Instrument\"\n SubElement(it, \"value\").text = self._instrument\n qc_contents.append(it)\n \n sm = Element(\"sequencing_information\")\n SubElement(sm, \"name\").text = \"Specimen\"\n SubElement(sm, \"value\").text = self._specimen\n qc_contents.append(sm)\n \n #rk = Element(\"sequencing_information\")\n #SubElement(rk, \"name\").text = \"Reagent Kit\"\n #SubElement(rk, \"value\").text = self._reagent_kit\n #qc_contents.append(rk)\n \n #cc = Element(\"sequencing_information\")\n #SubElement(cc, \"name\").text = \"Cycle\"\n #SubElement(cc, \"value\").text = self._cycle\n #qc_contents.append(cc)\n \n rn = Element(\"sequencing_information\")\n SubElement(rn, \"name\").text = \"Run Name\"\n SubElement(rn, \"value\").text = self._run_name\n qc_contents.append(rn)\n \n return qc_contents\n \n def raw_fastq_format(self, js):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Raw FASTQ Format\"\n \n ft = Element(\"raw_fastq_format\")\n SubElement(ft, \"name\").text = \"FASTQ File Type\"\n SubElement(ft, \"value\").text = js['fastqc']['raw_fastqc']['summary']['File type']\n qc_contents.append(ft)\n \n ec = Element(\"raw_fastq_format\")\n SubElement(ec, \"name\").text = \"Quality Encoding\"\n SubElement(ec, \"value\").text = js['fastqc']['raw_fastqc']['summary']['Encoding']\n qc_contents.append(ec)\n \n return qc_contents\n\n def data_summary(self):\n data_summary_file = \"%s/data/stat/%s.panel.txt\"%(self._output_dir, self._sample_name)\n f = open(data_summary_file, \"r\")\n lines = f.readlines()\n \n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Data Summary\"\n ds = Element(\"data_summary\")\n \n for line in lines:\n if line.startswith(\"No\"):\n continue\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = sp[1].replace(\">\", \">\")\n SubElement(row, \"col\").text = sp[2]\n ds.append(row)\n\n qc_contents.append(ds)\n f.close()\n return qc_contents\n\n def raw_read_quality(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Raw Read Quality\"\n \n rrq = Element(\"raw_read_quality\")\n bsq = Element(\"base_sequence_quality\")\n SubElement(bsq, \"r1\").text = \"%s/data/basecall/fastqc/%s_normalize.1_fastqc/Images/per_base_quality.png\"%(self._output_dir, self._sample_name)\n SubElement(bsq, \"r2\").text = \"%s/data/basecall/fastqc/%s_normalize.2_fastqc/Images/per_base_quality.png\"%(self._output_dir, self._sample_name)\n rrq.append(bsq)\n \n sqs = Element(\"base_sequence_quality_score\")\n SubElement(sqs, \"r1\").text = \"%s/data/basecall/fastqc/%s_normalize.1_fastqc/Images/per_sequence_quality.png\"%(self._output_dir, self._sample_name)\n SubElement(sqs, \"r2\").text = \"%s/data/basecall/fastqc/%s_normalize.2_fastqc/Images/per_sequence_quality.png\"%(self._output_dir, self._sample_name)\n rrq.append(sqs)\n \n sld = Element(\"sequence_length_distribution\")\n SubElement(sld, \"r1\").text = \"%s/data/basecall/fastqc/%s_normalize.1_fastqc/Images/sequence_length_distribution.png\"%(self._output_dir, self._sample_name)\n SubElement(sld, \"r2\").text = \"%s/data/basecall/fastqc/%s_normalize.2_fastqc/Images/sequence_length_distribution.png\"%(self._output_dir, self._sample_name)\n rrq.append(sld)\n \n qc_contents.append(rrq)\n return qc_contents\n\n def alignment(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Alignment\"\n \n align = Element(\"alignment\")\n alignment_stat_plot = Element(\"alignment_stat_plot\")\n SubElement(alignment_stat_plot, \"img\").text = \"%s/data/stat/%s.alignment.jpg\"%(self._output_dir, self._sample_name)\n align.append(alignment_stat_plot)\n \n mapping_quality_stat_plot = Element(\"mapping_quality_stat_plot\")\n SubElement(mapping_quality_stat_plot, \"img\").text = \"%s/data/stat/%s.mapqual.jpg\"%(self._output_dir, self._sample_name)\n align.append(mapping_quality_stat_plot)\n \n qc_contents.append(align)\n return qc_contents\n\n def mapping_statistics(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Mapping Statistics\"\n \n map_stat = Element(\"mapping_statistics\")\n SubElement(map_stat, \"sample_name\").text = self._sample_name\n read_stat_plot = Element(\"read_stat_plot\")\n SubElement(read_stat_plot, \"img\").text = \"%s/data/stat/%s.mapstat.png\"%(self._output_dir, self._sample_name)\n map_stat.append(read_stat_plot)\n\n qc_contents.append(map_stat)\n return qc_contents\n\n def softclip(self):\n softclip_file = \"%s/data/stat/%s_softclip.txt\"%(self._output_dir, self._sample_name)\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Soft Clipping Statistics\"\n\n softclip = Element(\"softclip\")\n softclip_png = Element(\"softclip_png\")\n softclip_plot = \"%s/data/stat/%s_softclip.png\"%(self._output_dir, self._sample_name)\n SubElement(softclip_png, \"img\").text = softclip_plot\n softclip.append(softclip_png)\n\n softclip_data = Element(\"softclip_data\")\n\n try:\n f = open(softclip_file, \"r\")\n lines = f.readlines()\n total_count = 0\n front_clip = 0\n end_clip = 0\n both_clip = 0\n\n for line in lines:\n if line.startswith(\"Amplicon\"):\n continue\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n \n total_count += int(sp[1])\n front_clip += int(sp[3])\n end_clip += int(sp[4])\n both_clip += int(sp[5])\n\n row = Element(\"row\")\n for _str in sp:\n SubElement(row, \"col\").text = _str\n softclip_data.append(row)\n\n SubElement(softclip, \"total_count\").text = str(total_count)\n SubElement(softclip, \"front_clip\").text = str(front_clip)\n SubElement(softclip, \"end_clip\").text = str(end_clip)\n SubElement(softclip, \"both_clip\").text = str(both_clip)\n f.close()\n except Exception as ex_str:\n print(ex_str)\n\n softclip.append(softclip_data)\n\n cmd = [\"Rscript\",\n self._sw.workflow_software[\"r_softclip\"],\n softclip_file,\n softclip_plot]\n run_command(__modname__, \" \".join(cmd), self._log_file)\n \n warn_file = \"%s/data/stat/%s_warn.bed\"%(self._output_dir, self._sample_name)\n \n try:\n warn_file_size = os.path.getsize(warn_file)\n \n if (warn_file_size != 0):\n SubElement(softclip, \"is_softclipped\").text = \"true\"\n warn_data = Element(\"warn_data\")\n\n f = open(warn_file, \"r\")\n lines = f.readlines()\n \n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = str(round(float(sp[5]), 2))\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = str(int(sp[1]) + 100)\n SubElement(row, \"col\").text = str(int(sp[2]) - 100)\n warn_data.append(row)\n softclip.append(warn_data)\n f.close()\n else:\n SubElement(softclip, \"is_softclipped\").text = \"false\"\n except Exception as ex_str:\n print(ex_str)\n\n qc_contents.append(softclip)\n return qc_contents\n\n def coverage_and_depth(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Coverage and Depth\"\n\n cov_plot1 = \"%s/data/stat/%s_BRCA1_coverage.png\"%(self._output_dir, self._sample_name)\n cov_plot2 = \"%s/data/stat/%s_BRCA2_coverage.png\"%(self._output_dir, self._sample_name)\n\n cmd1 = [\"Rscript\",\n self._sw.workflow_software[\"r_coverage_and_depth\"],\n self._sw.workflow_software[\"ngb_txdb\"],\n self._final_bam,\n \"41196000\",\n \"41277450\",\n \"chr17\",\n cov_plot1]\n run_command(__modname__, \" \".join(cmd1), self._log_file)\n\n cmd2 = [\"Rscript\",\n self._sw.workflow_software[\"r_coverage_and_depth\"],\n self._sw.workflow_software[\"ngb_txdb\"],\n self._final_bam,\n \"32889500\",\n \"32974000\",\n \"chr13\",\n cov_plot2]\n run_command(__modname__, \" \".join(cmd2), self._log_file)\n \n cov_and_depth = Element(\"coverage_and_depth\")\n brca1_coverage_plot = Element(\"brca1_coverage_plot\")\n SubElement(brca1_coverage_plot, \"img\").text = cov_plot1\n cov_and_depth.append(brca1_coverage_plot)\n brca2_coverage_plot = Element(\"brca2_coverage_plot\")\n SubElement(brca2_coverage_plot, \"img\").text = cov_plot2\n cov_and_depth.append(brca2_coverage_plot)\n\n qc_contents.append(cov_and_depth)\n return qc_contents\n\n def amplicon_coverage(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Amplicon Coverage\"\n\n if self._pipeline == '447':\n amp_cov = Element(\"amplicon_coverage_brca_plus\")\n else:\n amp_cov = Element(\"amplicon_coverage\")\n amp_stat_file = \"%s/data/stat/%s_picard_ampstat.txt\"%(self._output_dir, self._sample_name)\n brca1 = Element(\"brca1\")\n brca2 = Element(\"brca2\")\n\n try:\n f = open(amp_stat_file, \"r\")\n lines = f.readlines()\n\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n \n if (sp[0].startswith(\"name\")):\n continue\n else:\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = str(round(float(sp[1]), 2))\n SubElement(row, \"col\").text = sp[2]\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = sp[4]\n if (sp[0].startswith(\"BRCA1\")):\n brca1.append(row)\n elif (sp[0].startswith(\"BRCA2\")):\n brca2.append(row)\n f.close()\n except Exception as ex_str:\n print(ex_str)\n\n amp_cov.append(brca1)\n amp_cov.append(brca2)\n\n # has control\n control_file = \"%s/data/stat/control_ampstat.txt\"%(self._output_dir)\n if os.path.exists(control_file):\n has_control = \"yes\"\n else:\n has_control = \"no\"\n SubElement(amp_cov, \"has_control\").text = has_control\n\n # mapped amplicon coverage (1) plot\n amp_cov_plot = \"%s/data/stat/%s_mapped_amp_cov.png\"%(self._output_dir, self._sample_name)\n\n #if self._specimen == 'FFPE':\n if self._pipeline == '447':\n is_control = 'yes'\n else:\n is_control = self._is_control\n\n cmd = [\"Rscript\",\n self._sw.workflow_software[\"r_mapped_amplicon_coverage\"],\n is_control,\n amp_stat_file,\n self._default_control_file,\n amp_cov_plot]\n run_command(__modname__, \" \".join(cmd), self._log_file)\n\n mapped_amplicon_coverage = Element(\"mapped_amplicon_coverage\")\n SubElement(mapped_amplicon_coverage, \"img\").text = amp_cov_plot\n amp_cov.append(mapped_amplicon_coverage)\n\n # mapped amplicon coverage (2) heatmap\n if self._pipeline == '445':\n mapped_amplicon_coverage_heatmap = Element(\"mapped_amplicon_coverage_heatmap\")\n SubElement(mapped_amplicon_coverage_heatmap, \"img\").text = self.generate_coverage_heatmap()\n amp_cov.append(mapped_amplicon_coverage_heatmap)\n\n qc_contents.append(amp_cov)\n return qc_contents\n\n def generate_coverage_heatmap(self):\n amp_picard_file = \"%s/data/stat/%s_picard_ampstat.txt\"%(self._output_dir, self._sample_name)\n\n df_control = pd.read_table(self._default_control_file)\n df_control = df_control.set_index('Amplicon')\n control = df_control[['Count']].apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n control.columns = ['control_coverage']\n\n df_sample = pd.read_table(amp_picard_file)\n df_sample.columns = ['Amplicon', 'coverage', 'chrom', 'start', 'end']\n df_sample = df_sample.set_index('Amplicon')\n sample = df_sample[['coverage']].apply(lambda x: (x - x.min()) / (x.max() - x.min()))\n\n merged_cov = pd.merge(control, sample, left_index=True, right_index=True, how='inner')\n merged_cov['diff'] = merged_cov['control_coverage'] - merged_cov['coverage']\n\n plt.figure(figsize=(26, 38))\n plt.rcParams['font.size'] = 20\n plt.rcParams['axes.labelsize'] = 30\n plt.rcParams['axes.labelweight'] = 'bold'\n plt.rcParams['xtick.labelsize'] = 20\n plt.rcParams['ytick.labelsize'] = 20\n plt.rcParams['legend.fontsize'] = 20\n plt.rcParams['figure.titlesize'] = 24\n\n if self._is_control == 'yes':\n sns.heatmap(sample, square=False, annot=True, annot_kws={\"size\": 14}, cmap='Reds')\n else:\n sns.heatmap(merged_cov, square=False, annot=True, annot_kws={\"size\": 14}, cmap='Reds')\n \n hfont = {'fontname':'Droid Sans'}\n plt.xticks(fontsize=20,**hfont)\n plt.yticks(fontsize=20,**hfont)\n plt.ylabel(\"Amplicon Regions\",**hfont)\n plt.xlabel(\"Coverages\",**hfont)\n\n heat_map = '%s/data/stat/%s_heatmap.png'%(self._output_dir, self._sample_name)\n plt.savefig(heat_map)\n return heat_map\n\n def warning(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Warning\"\n warning = Element(\"warning\")\n warn_file = \"%s/data/stat/%s_warn.bed\"%(self._output_dir, self._sample_name)\n \n try:\n warn_file_size = os.path.getsize(warn_file)\n \n if (warn_file_size != 0):\n SubElement(warning, \"is_softclipped\").text = \"true\"\n warn_data = Element(\"warn_data\")\n f = open(warn_file, \"r\")\n lines = f.readlines()\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = str(round(float(sp[5]), 2))\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = str(int(sp[1]) + 100)\n SubElement(row, \"col\").text = str(int(sp[2]) - 100)\n warn_data.append(row)\n warning.append(warn_data)\n f.close()\n else:\n SubElement(warning, \"is_softclipped\").text = \"false\"\n except Exception as ex_str:\n print(ex_str)\n\n primer_del_file = \"%s/data/stat/%s.primer.del.bed\"%(self._output_dir, self._sample_name)\n\n try:\n primer_del_file_size = os.path.getsize(primer_del_file)\n \n if (primer_del_file_size != 0):\n SubElement(warning, \"is_primer_del\").text = \"true\"\n primer_del_data = Element(\"primer_del_data\")\n f = open(primer_del_file, \"r\")\n lines = f.readlines()\n for line in lines:\n line = line.replace(\"\\n\", \"\")\n sp = line.split(\"\\t\")\n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = sp[0]\n SubElement(row, \"col\").text = sp[1]\n SubElement(row, \"col\").text = sp[2]\n primer_del_data.append(row)\n warning.append(primer_del_data)\n f.close()\n else:\n SubElement(warning, \"is_primer_del\").text = \"false\"\n except Exception as ex_str:\n print(ex_str) \n \n qc_contents.append(warning)\n return qc_contents\n\n def variants(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Variants\"\n\n if self._specimen == 'FFPE':\n variants = Element(\"variants_FFPE\")\n else:\n variants = Element(\"variants\")\n json_file = \"%s/data/variant/%s.json\"%(self._output_dir, self._sample_name)\n variant_count = 0\n variants_data = Element(\"variants_data\")\n\n with open(json_file) as data_file:\n _file = data_file.readlines()\n\n for line in _file:\n _data = json.loads(line)\n data = _data.get(_data.keys()[0])\n exac_format = str(data.get(\"variant_information\").get(\"exac_format\"))\n sp1 = exac_format.split(\"-\")\n chrom = sp1[0]\n pos = sp1[1]\n ref_allele = sp1[2]\n alt_allele = sp1[3]\n allele_fraction = str(round(float(data.get(\"allele\").get(\"allele_fraction\")), 2))\n zygosity = str(data.get(\"allele\").get(\"zygosity\"))\n type_of_allele = str(data.get(\"allele\").get(\"type_of_allele\"))\n\n row = Element(\"row\")\n SubElement(row, \"col\").text = chrom\n SubElement(row, \"col\").text = pos\n SubElement(row, \"col_\").text = ref_allele\n SubElement(row, \"col_\").text = alt_allele\n SubElement(row, \"col\").text = allele_fraction\n\n if self._specimen == 'FFPE':\n pass\n else:\n SubElement(row, \"col\").text = zygosity\n SubElement(row, \"col\").text = type_of_allele\n variants_data.append(row)\n variant_count += 1\n\n SubElement(variants_data, \"total_variant\").text = str(variant_count)\n variants.append(variants_data)\n qc_contents.append(variants)\n return qc_contents\n\n def software_reference_list(self):\n qc_contents = Element(\"qc_contents\")\n SubElement(qc_contents, \"qc_type\").text = \"Software Reference Database List\"\n software = Element(\"software\")\n software_data = Element(\"software_data\")\n software_list = {}\n f = open(self._log_file, \"r\")\n for line in f:\n if line.find(\"[VERSION] >\") != -1:\n line = line.replace(\"\\n\", \"\")\n sp1 = line.split(\"[VERSION] >\")\n sp2 = sp1[1].strip().split(\",\")\n software_list[str(sp2[1])] = sp1[1].strip()\n f.close()\n for i in software_list:\n sp = software_list[i].split(\",\")\n \n row = Element(\"row\")\n SubElement(row, \"col\").text = sp[1]\n SubElement(row, \"col\").text = sp[2]\n SubElement(row, \"col\").text = sp[3]\n SubElement(row, \"col\").text = sp[5]\n SubElement(row, \"col_\").text = sp[4]\n software_data.append(row)\n \n software.append(software_data)\n qc_contents.append(software)\n return qc_contents\n\n def run(self):\n\n log_progress(__modname__, \"Germline QC report generation start\", f=self._log_file)\n\n ### stat json\n f = open(self._stat_json)\n js = json.loads(f.read())\n f.close()\n\n ### root element\n qc_report = Element(\"qc_report\")\n\n ### main page\n log_progress(__modname__, \"Create main page\", f=self._log_file)\n main_page = self.main_page()\n qc_report.append(main_page)\n\n ### header contents\n log_progress(__modname__, \"Insert header contents\", f=self._log_file)\n header_contents = self.header_contents()\n qc_report.append(header_contents)\n\n ### contents\n log_progress(__modname__, \"Insert analysis information\", f=self._log_file)\n analysis_information = self.analysis_information()\n qc_report.append(analysis_information)\n\n log_progress(__modname__, \"Insert sequencing information\", f=self._log_file)\n sequencing_information = self.sequencing_information()\n qc_report.append(sequencing_information)\n\n log_progress(__modname__, \"Insert raw fastq format\", f=self._log_file)\n raw_fastq_format = self.raw_fastq_format(js)\n qc_report.append(raw_fastq_format)\n\n log_progress(__modname__, \"Insert data summary\", f=self._log_file)\n data_summary = self.data_summary()\n qc_report.append(data_summary)\n\n log_progress(__modname__, \"Insert raw read quality\", f=self._log_file)\n raw_read_quality = self.raw_read_quality()\n qc_report.append(raw_read_quality)\n\n log_progress(__modname__, \"Insert alignment\", f=self._log_file)\n alignment = self.alignment()\n qc_report.append(alignment)\n\n log_progress(__modname__, \"Insert mapping_statistics\", f=self._log_file)\n mapping_statistics = self.mapping_statistics()\n qc_report.append(mapping_statistics)\n\n log_progress(__modname__, \"Insert softclip\", f=self._log_file)\n softclip = self.softclip()\n qc_report.append(softclip)\n\n log_progress(__modname__, \"Insert coverage and depth\", f=self._log_file)\n coverage_and_depth = self.coverage_and_depth()\n qc_report.append(coverage_and_depth)\n\n log_progress(__modname__, \"Insert amplicon coverage\", f=self._log_file)\n amplicon_coverage = self.amplicon_coverage()\n qc_report.append(amplicon_coverage)\n\n log_progress(__modname__, \"Insert warnings\", f=self._log_file)\n warning = self.warning()\n qc_report.append(warning)\n\n log_progress(__modname__, \"Insert variants\", f=self._log_file)\n variants = self.variants()\n qc_report.append(variants)\n\n log_progress(__modname__, \"Insert softwares\", f=self._log_file)\n software_reference_list = self.software_reference_list()\n qc_report.append(software_reference_list)\n\n ### generate xml file\n log_progress(__modname__, \"Generate XML file for data sources\", f=self._log_file)\n self.indent(qc_report)\n #dump(qc_report)\n ElementTree(qc_report).write(self._xml_file)\n\n ### Generate pdf file\n log_progress(__modname__, \"Generate final PDF file\", f=self._log_file)\n cmd = [self._sw.workflow_software[\"fop\"],\n \"-c\", self._sw.workflow_software[\"fop_config\"],\n \"-xml\", self._xml_file,\n \"-xsl\", self._sw.workflow_software[\"qc_report_template\"],\n \"-pdf\", self._pdf_file]\n run_command(__modname__, \" \".join(cmd), self._log_file)\n \n log_progress(__modname__, \"Germline QC report generation finished\", f=self._log_file)\n","sub_path":"pipelines/DNA_Germline/pipelines/tmp/qc_report_generation_v2.py","file_name":"qc_report_generation_v2.py","file_ext":"py","file_size_in_byte":29381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"519489392","text":"# -*- coding: utf-8 -*-\nfrom PyQt5.QtWidgets import QApplication, QVBoxLayout, QMainWindow, QTableWidgetItem\n\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Layout(QVBoxLayout):\n def __init__(self, root):\n super().__init__()\n self.figure = plt.figure()\n self.canvas = FigureCanvas(self.figure)\n # self.toolbar = NavigationToolbar(self.canvas, root)\n # self.addWidget(self.toolbar)\n self.addWidget(self.canvas)\n\n def plot_dnt_current(self, graph_data): # graph_data в формате [[\"Имя1, ед.изм.\", [data]], [\"Имя2, ед.изм.\", [data]]...]\n try:\n # отрисуем график\n self.figure.clear()\n # create an axis\n axes = self.figure.add_subplot(111)\n axes_twinx = axes.twinx()\n # plot data\n time = graph_data[0][1]\n current = graph_data[3][1]\n current_pos_list = []\n current_neg_list = []\n for var in current:\n current_pos = var if var > 1E-12 else 1E-12\n current_neg = -var if var < -1E-12 else 1E-12\n current_pos_list.append(current_pos)\n current_neg_list.append(current_neg)\n axes.plot(time, current_pos_list, line_type_from_index(0), label=u\"Ток +, А\")\n axes.plot(time, current_neg_list, line_type_from_index(1), label=u\"Ток -, А\")\n # подсчет статистических данных\n current_pos_mean = float(np.mean(current_pos_list))\n current_neg_mean = float(np.mean(current_neg_list))\n current_pos_std = float(np.std(current_pos_list))\n current_neg_std = float(np.std(current_neg_list))\n #\n data_text = \" mean_pos=%.3E; std_pos=%.2E;\\n mean_neg=%.3E; std_neg=%.2E;\" \\\n % (current_pos_mean, current_pos_std, current_neg_mean, current_neg_std)\n self.figure.text(0.01, 0.95, data_text)\n #\n axes.set_title(\"График показаний ДНТ\")\n axes.set_xlabel(\"Время, с\")\n axes.set_ylim(bottom=1E-12)\n axes.set_yscale(\"log\")\n axes.legend(loc=2)\n axes.grid()\n # refresh canvas\n self.canvas.draw()\n except Exception as error:\n print(\"plot_dnt_current \" + error)\n pass\n\n def plot_osc_dnt(self, graph_data, osc_data_type=0):\n try:\n # отрисуем график\n self.figure.clear()\n # create an axis\n axes = self.figure.add_subplot(111)\n # plot data\n time = graph_data[0][1]\n read_flag = 0\n for num, var in enumerate(graph_data[1:]):\n if var[1]:\n read_flag = 1\n axes.plot(time, var[1], line_type_from_index(num), label=var[0])\n if read_flag:\n axes.set_title(\"Осциллограмма ДНТ\")\n axes.set_xlabel(\"Время, с\")\n axes.legend(loc=0)\n axes.grid()\n # refresh canvas\n self.canvas.draw()\n except Exception as error:\n print(error)\n\n\ndef line_type_from_index(n):\n color_line = [\"r\", \"b\", \"g\", \"c\", \"m\", \"y\", \"k\"]\n style_line = [\"-\", \"--\", \"-.\", \":\"]\n try:\n color = color_line[n % len(color_line)]\n style = style_line[n // len(color_line)]\n return style + color\n except IndexError:\n return \"-r\"\n","sub_path":"dnt_graph.py","file_name":"dnt_graph.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"584449959","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 19 09:19:22 2020\n\n@author: Shivadhar SIngh\n\"\"\"\n\n\ndef word_frequency():\n from urllib.request import urlopen\n fileobj = urlopen(\"https://cs.anu.edu.au/courses/comp1730/labs/data/wordlist.txt\")\n d = dict()\n for byteseq in fileobj:\n line = byteseq.decode()\n # process line of text\n d[line] = len(line.strip())\n fileobj.close()\n ls = list(d.values())\n ls.sort()\n return ls[-1:-10:-1]\n ","sub_path":"word_frequency.py","file_name":"word_frequency.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"193132728","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport torchvision\nfrom torch.autograd import Variable\nimport itertools\nfrom sklearn.metrics import mean_squared_error, accuracy_score, hamming_loss, roc_curve, auc, f1_score\n\n\ndef to_var(x, requires_grad=True):\n if torch.cuda.is_available():\n x = x.cuda()\n return Variable(x, requires_grad=requires_grad)\n\n\ndef auc_roc(Pr, Tr):\n fpr, tpr, _ = roc_curve(Tr, Pr, pos_label=1.0)\n return auc(fpr, tpr), fpr, tpr\n\n\nclass MetaModule(nn.Module):\n # adopted from: Adrien Ecoffet https://github.com/AdrienLE\n def params(self):\n for name, param in self.named_params(self):\n yield param\n\n def named_leaves(self):\n return []\n\n def named_submodules(self):\n return []\n\n def named_params(self, curr_module=None, memo=None, prefix=''):\n if memo is None:\n memo = set()\n\n if hasattr(curr_module, 'named_leaves'):\n for name, p in curr_module.named_leaves():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n else:\n for name, p in curr_module._parameters.items():\n if p is not None and p not in memo:\n memo.add(p)\n yield prefix + ('.' if prefix else '') + name, p\n\n for mname, module in curr_module.named_children():\n submodule_prefix = prefix + ('.' if prefix else '') + mname\n for name, p in self.named_params(module, memo, submodule_prefix):\n yield name, p\n\n def update_params(self, lr_inner, first_order=False, source_params=None, detach=False):\n if source_params is not None:\n for tgt, src in zip(self.named_params(self), source_params):\n name_t, param_t = tgt\n # name_s, param_s = src\n # grad = param_s.grad\n # name_s, param_s = src\n grad = src\n if first_order:\n grad = to_var(grad.detach().data)\n tmp = param_t - lr_inner * grad\n self.set_param(self, name_t, tmp)\n else:\n\n for name, param in self.named_params(self):\n if not detach:\n grad = param.grad\n if first_order:\n grad = to_var(grad.detach().data)\n tmp = param - lr_inner * grad\n self.set_param(self, name, tmp)\n else:\n param = param.detach_()\n self.set_param(self, name, param)\n\n def set_param(self,curr_mod, name, param):\n if '.' in name:\n n = name.split('.')\n module_name = n[0]\n rest = '.'.join(n[1:])\n for name, mod in curr_mod.named_children():\n if module_name == name:\n self.set_param(mod, rest, param)\n break\n else:\n setattr(curr_mod, name, param)\n\n def detach_params(self):\n for name, param in self.named_params(self):\n self.set_param(self, name, param.detach())\n\n def copy(self, other, same_var=False):\n for name, param in other.named_params():\n if not same_var:\n param = to_var(param.data.clone(), requires_grad=True)\n self.set_param(name, param)\n\n\nclass MetaLinear(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.Linear(*args, **kwargs)\n\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n\n def forward(self, x):\n return F.linear(x, self.weight, self.bias)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass MetaConv2d(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.Conv2d(*args, **kwargs)\n\n self.stride = ignore.stride\n self.padding = ignore.padding\n self.dilation = ignore.dilation\n self.groups = ignore.groups\n\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n\n if ignore.bias is not None:\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n else:\n self.register_buffer('bias', None)\n\n def forward(self, x):\n return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass MetaConvTranspose2d(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.ConvTranspose2d(*args, **kwargs)\n\n self.stride = ignore.stride\n self.padding = ignore.padding\n self.dilation = ignore.dilation\n self.groups = ignore.groups\n\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n\n if ignore.bias is not None:\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n else:\n self.register_buffer('bias', None)\n\n def forward(self, x, output_size=None):\n output_padding = self._output_padding(x, output_size)\n return F.conv_transpose2d(x, self.weight, self.bias, self.stride, self.padding,\n output_padding, self.groups, self.dilation)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass MetaBatchNorm2d(MetaModule):\n def __init__(self, *args, **kwargs):\n super().__init__()\n ignore = nn.BatchNorm2d(*args, **kwargs)\n\n self.num_features = ignore.num_features\n self.eps = ignore.eps\n self.momentum = ignore.momentum\n self.affine = ignore.affine\n self.track_running_stats = ignore.track_running_stats\n\n if self.affine:\n self.register_buffer('weight', to_var(ignore.weight.data, requires_grad=True))\n self.register_buffer('bias', to_var(ignore.bias.data, requires_grad=True))\n\n if self.track_running_stats:\n self.register_buffer('running_mean', torch.zeros(self.num_features))\n self.register_buffer('running_var', torch.ones(self.num_features))\n else:\n self.register_parameter('running_mean', None)\n self.register_parameter('running_var', None)\n\n\n def forward(self, x):\n return F.batch_norm(x, self.running_mean, self.running_var, self.weight, self.bias,\n self.training or not self.track_running_stats, self.momentum, self.eps)\n\n def named_leaves(self):\n return [('weight', self.weight), ('bias', self.bias)]\n\n\nclass BasicBlock(MetaModule):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(BasicBlock, self).__init__()\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n MetaBatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass Bottleneck(MetaModule):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1):\n super(Bottleneck, self).__init__()\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n self.conv3 = MetaConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n self.bn3 = MetaBatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n MetaBatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = F.relu(self.bn2(self.conv2(out)))\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\n\nclass ResNet(MetaModule):\n def __init__(self, block, num_blocks, num_classes=10):\n super(ResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = MetaBatchNorm2d(64)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, num_blocks[0])\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = MetaLinear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride = 1):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.maxpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n # print('size befor avg pooling: ', out.size())\n out = F.avg_pool2d(out, out.size(2))\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out.squeeze()\n\n\ndef ResNet18(num_classes=10):\n return ResNet(BasicBlock, [2,2,2,2], num_classes=num_classes)\n\ndef ResNet34(num_classes=10):\n return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n\n\nclass PreActBlock(MetaModule):\n '''Pre-activation version of the BasicBlock.'''\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBlock, self).__init__()\n self.bn1 = MetaBatchNorm2d(in_planes)\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n out += shortcut\n return out\n\n\nclass PreActBottleneck(MetaModule):\n '''Pre-activation version of the original Bottleneck module.'''\n expansion = 4\n def __init__(self, in_planes, planes, stride=1):\n super(PreActBottleneck, self).__init__()\n self.bn1 = MetaBatchNorm2d(in_planes)\n self.conv1 = MetaConv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn2 = MetaBatchNorm2d(planes)\n self.conv2 = MetaConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn3 = MetaBatchNorm2d(planes)\n self.conv3 = MetaConv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n MetaConv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False)\n )\n\n def forward(self, x):\n out = F.relu(self.bn1(x))\n shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x\n out = self.conv1(out)\n out = self.conv2(F.relu(self.bn2(out)))\n out = self.conv3(F.relu(self.bn3(out)))\n out += shortcut\n return out\n\n\nclass PreActResNet(MetaModule):\n def __init__(self, block, num_blocks, num_classes=10):\n super(PreActResNet, self).__init__()\n self.in_planes = 64\n\n self.conv1 = MetaConv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n self.layer1 = self._make_layer(block, 64, num_blocks[0])\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = MetaLinear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride = 1):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.maxpool(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n # print('size befor avg pooling: ', out.size())\n out = F.avg_pool2d(out, out.size(2))\n out2 = out.view(out.size(0), -1)\n out = self.linear(out2)\n return out.squeeze()\n\n\ndef PreActResNet18(num_classes=10):\n return PreActResNet(PreActBlock, [2,2,2,2], num_classes)\n\ndef PreActResNet34(num_classes=10):\n return PreActResNet(PreActBlock, [3,4,6,3], num_classes)\n\ndef test_2():\n net = PreActResNet34(1)\n y = net((torch.randn(2,3,224,224)))\n print(y.size())\n\n# test_2()\n\n\ndef test():\n net = ResNet18(num_classes=10)\n y = net(torch.randn(16, 1,32,32))\n print(y.size())\n\n#test()\n\n\ndef noise_matrix(x = 0.8, p0 = 0.02, p1 = 0.4):\n t00 = (1 - p0)*x\n t01 = p1*(1 - x)\n t0 = t00 + t01\n t00 = t00/t0\n t01 = t01/t0\n\n t10 = p0*x\n t11 = (1 - p1)*(1 - x)\n t1 = t10 + t11\n t10 = t10/t1\n t11 = t11/t1\n T = np.array([[t00, t01], [t10, t11]])\n if torch.cuda.is_available():\n return torch.from_numpy(T).type(torch.FloatTensor).cuda()\n return torch.from_numpy(T).type(torch.FloatTensor)\n\n\ndef get_mean_and_std_batch(dataset, bs = 4096):\n pop_mean = []\n pop_std0 = []\n pop_std1 = []\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=bs, shuffle=True, num_workers=8)\n for i, data in enumerate(dataloader, 0):\n # shape (batch_size, 3, height, width)\n print('{}/{}'.format(i, len(dataloader)))\n sys.stdout.flush()\n numpy_image, _ = data\n numpy_image = numpy_image.numpy()\n\n # shape (3,)\n batch_mean = np.mean(numpy_image, axis=(0, 2, 3))\n batch_std0 = np.std(numpy_image, axis=(0, 2, 3))\n batch_std1 = np.std(numpy_image, axis=(0, 2, 3), ddof=1)\n print(batch_mean, batch_std0, batch_std1)\n\n pop_mean.append(batch_mean)\n pop_std0.append(batch_std0)\n pop_std1.append(batch_std1)\n\n # shape (num_iterations, 3) -> (mean across 0th axis) -> shape (3,)\n pop_mean = np.array(pop_mean).mean(axis=0)\n pop_std0 = np.array(pop_std0).mean(axis=0)\n pop_std1 = np.array(pop_std1).mean(axis=0)\n print('mean/std0/std1:', pop_mean, pop_std0, pop_std1)\n return pop_mean, pop_std0, pop_std1\n\n\ndef get_mean_and_std(dataset):\n '''Compute the mean and std value of dataset.'''\n dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=4)\n mean = torch.zeros(3)\n std = torch.zeros(3)\n print('==> Computing mean and std..')\n for ind, (inputs, targets) in enumerate(dataloader):\n if ind % 100 == 0:\n print('Processing {}/{}'.format(ind, len(dataloader)))\n\n for i in range(3):\n mean[i] += inputs[:,i,:,:].mean()\n std[i] += inputs[:,i,:,:].std()\n mean.div_(len(dataset))\n std.div_(len(dataset))\n\n print('mean/std: ', mean, std)\n return mean, std\n","sub_path":"training_codes/model_paad.py","file_name":"model_paad.py","file_ext":"py","file_size_in_byte":16769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"639193451","text":"# python argtest.py --fin datain --fout dataout\n\n\nimport sys\n\ndef genSortKey(col,up):\n def key(x):\n if up == '+':\n return x[col]\n elif up == '-':\n return -x[col]\n return key\n\ndef main():\n FIN=\"\"\n FOUT=\"\"\n COL=\"\"\n DIR=\"\"\n nargs=len(sys.argv)\n skip=False\n for i in range(1,nargs):\n if not skip:\n arg=sys.argv[i]\n print(\"INFO: processing\",arg)\n if arg == \"--fin\":\n if i != nargs-1:\n FIN=sys.argv[i+1]\n skip=True\n elif arg == \"--fout\":\n if i != nargs-1:\n FOUT=sys.argv[i+1]\n skip=True\n elif arg == \"--col\":\n if i != nargs-1:\n COL=sys.argv[i+1]\n skip=True\n elif arg == \"--dir\":\n if i != nargs-1:\n DIR=sys.argv[i+1]\n skip=True\n else:\n print(\"ERR: unknown arg:\",arg)\n else:\n skip=False\n\n print(\"INFO: FIN\",FIN)\n print(\"INFO: FOUT\",FOUT)\n print(\"INFO: COL\",COL)\n print(\"INFO: DIR\",DIR)\n accum = []\n try:\n f=open(FIN,'r')\n except:\n print(\"ERR: file\",FIN,\"does not exist or cannot be opened\")\n return False\n try:\n g=open(FOUT,'w')\n except:\n print(\"ERR: file\",FOUT,\"could not be created\")\n try:\n COL = int(COL)\n except:\n print(\"ERR: input\",COL,\"is a non-integer\")\n return False\n if ((DIR != \"+\") and (DIR != \"-\")):\n print(\"ERR: dir\",DIR,\"is invalid\")\n return False\n sortKey = genSortKey(COL,DIR)\n\n #try:\n # f=open(FIN,'r')\n #except:\n # print(\"ERR: file\",FIN,\"does not exist or cannot be opened\")\n # return False\n lines = f.readlines()\n try:\n for line in lines:\n j = line.split('\\n')[0]\n k = j.split(',')\n r = []\n for i in k:\n r += [float(i)]\n accum += [r]\n except:\n print(\"ERR: non-numeric values in\",FIN)\n return False\n #try:\n # g = open(FOUT,'w')\n #except:\n # print(\"ERR: file\",FOUT,\"could not be created\")\n # return False\n for i in accum:\n if COL > len(i)-1:\n print(\"ERR: --col\",COL,\"out of range\")\n g.write(\"\")\n return False\n \n sortedList = sorted(accum,key=sortKey)\n csv = []\n for row in range(0,len(sortedList),1):\n csv += [\"\"]\n for i in sortedList[row]:\n csv[row] += str(i) + \",\"\n csv[row]=csv[row][0:len(csv[row])-1]\n csv[row] += \"\\n\"\n for i in csv:\n g.write(str(i))\n return True\n\nmain()\n","sub_path":"Python/Bubble Sort/sortCSV.py","file_name":"sortCSV.py","file_ext":"py","file_size_in_byte":2526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"42313584","text":"#!C:\\Users\\60067527\\Anaconda3\\envs\\py36\n#-*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport os, io\nimport logging\nimport re\n\nimport tensorflow as tf\n\nfrom six import b\nimport numpy as np\n\n\n\nSCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))\nDEFAULT_LABEL_FILE = os.path.join(SCRIPT_PATH,\n #'../labels/bank_labelsSW.txt')\n '../labels/bank_labels.txt')\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\n\n\ndef generate(annotations_path, output_path, log_step=5000,\n force_uppercase=True, save_filename=False):\n\n logging.info('Building a dataset from %s.', annotations_path)\n logging.info('Output file: %s', output_path)\n\n writer = tf.python_io.TFRecordWriter(output_path)\n longest_label = ''\n idx = 0\n\n\t\t\n with open(annotations_path, 'r', encoding='utf-8') as annotations:\n word=[]\t \n \n for idx, line in enumerate(annotations):\n line = line.rstrip('\\n')\n\n # Split the line on the first whitespace character and allow empty values for the label\n # NOTE: this does not allow whitespace in image paths\n line_match = re.match(r'(\\S+)\\s(.*)', line)\n #print('line ', line)\t\t\t\n if line_match is None:\n logging.error('missing filename or label, ignoring line %i: %s', idx+1, line)\n continue\n (img_path, label) = line_match.groups()\n #print(img_path, label)\t\t\t\n\n with open(img_path, 'rb') as img_file:\n img = img_file.read()\n\n# if force_uppercase:\n# label = label.upper()\n \n try:\n word= convert_lex(label)\n \n except IOError:\n pass # ignore error images\t\t\n\n if len(label) > len(longest_label):\n longest_label = label\n\t\t\t\t\n ''' \n feature = {}\n feature['image'] = _bytes_feature(img)\n feature['label'] = _bytes_feature(b(label))\n '''\n label = word\n label=''.join(map(str,label))\n\t\t\t\n feature = {}\n feature['image'] = _bytes_feature(img)\n feature['label'] = _bytes_feature(b(label))\n \n \t\t\t\n if save_filename:\n feature['comment'] = _bytes_feature(b(img_path))\n\n example = tf.train.Example(features=tf.train.Features(feature=feature))\n\n writer.write(example.SerializeToString())\n\n if idx % log_step == 0:\n logging.info('Processed %s pairs.', idx+1)\n\t\t\t\t\n\n\t\t\t\t\n if idx:\t\t\t\n logging.info('Dataset is ready: %i pairs.', idx+1)\n logging.info('Longest label (%i): %s', len(longest_label), longest_label)\n\n writer.close()\n\ndef convert_lex( lex):\n\n #if sys.version_info >= (3,):\n # lex = lex.decode('utf-8')\n #lex = lex.decode('iso-8859-1')\n \n #assert len(lex) < self.bucket_specs[-1][1]\n\t\t#return np.array(\n # [self.GO_ID] + [self.CHARMAP.index(char) for char in lex] + [self.EOS_ID],\n # dtype=np.int32)\n\n GO_ID = 1\n EOS_ID = 2\n CHR_BR = 3\n\t\n label_file = DEFAULT_LABEL_FILE\n with io.open(label_file, 'r', encoding='utf-8') as f:\n labels = f.read().splitlines()\n #print(labels)\n \t\n l_id=[] \n k=3\n s=\"\"\t\n n=0\n for i, l in enumerate(labels):\n n=i+k\n s=str(n)\t\t\t\t\n #print('i l k n s ', i , l, k, n, s)\n while ('1' in s) or('2' in s) or ('3' in s):\t\t\t\t\n k+=1\n n=i+k\n s=str(n)\t\t\t\t\t\t\t\t\t\t\t\n #print('while i l n k s: ', i, l , k, n, s)\n l_id.append(n)\t\n #print('i l k n s l_id', i , l, k, n, s, l_id)\t\t\n\n label_list=list(zip( (j for j in range(0,i+1)),l_id, labels)) \n #print('label_list, ' , label_list)\n\t\n lex_new=[] \n j=0\n for c in lex:\n #print('c ord(c) lex', c, ord(c), lex)\n for j, l_id, label in label_list:\t\t\n #for i, l in enumerate(labels):\n #print('c j l_id label', c, j , l_id, label)\t\n if c == label:\t\t\t\n lex_new.append(l_id)\n lex_new.append(3)\t\t\t\t \n \n \t\n return lex_new\n ''' \n return np.array(\n #[i for i in lex_new],\n [i for i in lex_new],\t \n #[GO_ID] + [i for i in lex_new] + [EOS_ID],\t \n #[GO_ID] + [EOS_ID],\t \t \n dtype=np.int32)\n\t''' ","sub_path":"aocr36/util/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"159180461","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\nimport numpy as np\nfrom statistics import *\nimport warnings\nwarnings.filterwarnings('ignore')\nd=pd.read_csv('E:\\\\cartrue.csv')\n\n\n# In[2]:\n\n\nd1 = d.drop(['Unnamed: 0', 'Description','ExteriorColor','InteriorColor','FuelType','CabType','BedLength','City'], axis = 1)\n\n\n# In[3]:\n\n\nd1['Price']=d1['Price'].str.replace(\"$\",\"\")\nd1['Price']=d1['Price'].str.replace(\",\",\"\")\nd1['Miles']=d1['Miles'].str.replace(\",\",\"\")\n\n\n# In[4]:\n\n\naccident =[]\nowner=[]\nusetype=[]\nfor q in range(0,9993):\n con = d['Condition'][q].split(\",\")\n if len(con)==5:\n can1 = con[0].replace(\"'\",\"\")\n acc = can1.replace(\"[\",\"\")\n accident.append(acc)\n own = con[2].replace(\" '\",\"\")\n owner.append(own)\n can2 = con[4].replace(\" '\",\"\")\n can3 = can2.replace(\"'\",\"\")\n ust = can3.replace(\"]\",\"\")\n usetype.append(ust)\n else:\n can1 = con[0].replace(\"'\",\"\")\n acc = can1.replace(\"[\",\"\")\n accident.append(acc)\n own = None\n owner.append(own)\n can2 = con[2].replace(\" '\",\"\")\n can3 = can2.replace(\"'\",\"\")\n ust = can3.replace(\"]\",\"\")\n usetype.append(ust)\n\n\nd1['Accidents']=accident\nd1['NoOfOwners']=owner\nd1['UseType']=usetype\n\n\n# In[5]:\n\n\nd1 = d1.drop(['Condition'], axis = 1)\nd2=d1\n\n\n# In[6]:\n\n\nfor v in range(0,9993):\n if d1['MPG'][v]=='6.2L V-8 Gas' or d1['MPG'][v]=='1.5L Inline-4 Plug-In Hybrid':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='4.3L V-6 Gas' or d1['MPG'][v]=='6.7L V-8 Diesel Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='6.0L V-8 Gas' or d1['MPG'][v]=='2.0L Inline-4 Hybrid Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='2.0L Inline-4 Plug-In Hybrid' or d1['MPG'][v]=='6.4L V-8 Gas':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='L - Hydrogen' or d1['MPG'][v]=='6.6L V-8 Diesel Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='2.0L Inline-4 Plug-In Hybrid Turbocharged' or d1['MPG'][v]=='6.7L V-6 Diesel Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='2.1L Inline-4 Diesel Turbocharged' or d1['MPG'][v]=='1.4L Inline-4 Plug-In Hybrid':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='5.7L V-8 Gas' or d1['MPG'][v]=='3.0L V-6 Plug-In Hybrid Turbocharged':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='3.0L V-6 Diesel Turbocharged' or d1['MPG'][v]=='6.8L V-10 Gas':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n elif d1['MPG'][v]=='7.2L V-8 Gas':\n d2['DriveType'][v]=d2['Engine'][v]\n d2['Engine'][v]=d2['MPG'][v]\n d2['MPG'][v]=None\n\n\n# In[7]:\n\n\nmpg= d2['MPG'].str.split(\"/\", expand=True)\nd2['MPG_cty']=mpg[0]\nd2['MPG_hwy']=mpg[1]\nd2 = d2.drop(['MPG'], axis = 1)\nd2['MPG_cty']=d1['MPG_cty'].str.replace(\" cty\",\"\")\nd2['MPG_hwy']=d1['MPG_hwy'].str.replace(\" hwy\",\"\")\n\n\n# In[8]:\n\n\neng = d2['Engine'].str.split(\"L\", expand=True)\nde = eng[1]\ngas = de.str.split(\"Gas\", expand=True)\nfor e in range(0,9993):\n if eng[0][e]=='':\n eng[0][e]='0.5'\n\nd2['Engine_L']=eng[0]\nd2['Engine_Gas']=gas[0]\nd2=d2.drop(['Engine'], axis = 1)\n\n\n# In[9]:\n\n\nfor t in range(0,9993):\n if d1['Transmission'][t]=='Crew Cab' or d1['Transmission'][t]=='Standard':\n d2['Transmission'][t]=d['FuelType'][t]\n elif d1['Transmission'][t]=='Extended Cab' or d1['Transmission'][t]=='Regular Cab':\n d2['Transmission'][t]=d['FuelType'][t]\n\n\n# In[10]:\n\n\ntype(d2['MPG_cty'][0])\n\n\n# In[11]:\n\n\nd2['CarBrand']=d2['CarBrand'].str.lower()\n#d2['City']=d2['City'].str.lower()\nd2['State']=d2['State'].str.lower()\nd2['ExteColor']=d2['ExteColor'].str.lower()\nd2['InterColor']=d2['InterColor'].str.lower()\nd2['style']=d2['style'].str.lower()\nd2['Transmission']=d2['Transmission'].str.lower()\nd2['UseType']=d2['UseType'].str.lower()\nd2['Engine_Gas']=d2['Engine_Gas'].str.lower()\nd2['Model']=d2['Model'].str.lower()\nd3=d2\n\n\n# In[12]:\n\n\nd2.replace([None],np.nan,inplace=True)\n\n\n# In[13]:\n\n\nTransmission_map = {'automatic':1,\n 'manual':0,\n}\n\nNoOfOwners_map = {'1 Owner':8,\n '2 Owners':7,\n '3 Owners':6,\n '4 Owners':5,\n '5 Owners':4,\n '6 Owners':3,\n '7 Owners':2,\n '8 Owners':1,\n '9 Owners':0,\n}\n\nd3['Transmission']=d3.Transmission.map(Transmission_map)\nd3['NoOfOwners']=d3.NoOfOwners.map(NoOfOwners_map)\n\n\n# In[14]:\n\n\n# KNN_imputation\nfrom sklearn.impute import KNNImputer\nimputer = KNNImputer(n_neighbors=3)\nd3_imputed = imputer.fit_transform(d3[['Transmission', 'NoOfOwners', 'MPG_cty', 'MPG_hwy']])\nd3_imputed\n\n\n# In[15]:\n\n\nd3_imputed.tolist()\nTran_imp=[]\nNoOf_imp=[]\nMPG_imp=[]\nMPGimp=[]\nfor i in range(0,9993):\n Tran_imp.append(round(d3_imputed[i][0]))\n NoOf_imp.append(round(d3_imputed[i][1]))\n MPG_imp.append(round(d3_imputed[i][2]))\n MPGimp.append(round(d3_imputed[i][3]))\nd3['Transmission']=Tran_imp\nd3['NoOfOwners']=NoOf_imp\nd3['MPG_cty']=MPG_imp\nd3['MPG_hwy']=MPGimp\n\nd3['fueleconomy'] =round((0.55 * d3['MPG_cty']) + (0.45 * d3['MPG_hwy']),2)\nd3 = d3.drop(['MPG_cty','MPG_hwy'], axis = 1)\n\n\n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# sns.boxplot(d3['Miles'])\n# plt.title('Boxplot')\n# plt.show()\n# \n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# sns.boxplot(d3['fueleconomy'])\n# plt.title('Boxplot')\n# plt.show()\n\n# In[16]:\n\n\nd3['Price']=d3['Price'].astype('int64')\nd3['Miles']=d3['Miles'].astype('int64')\nd3['Year']=d3['Year'].astype('string')\nd3['NoOfOwners']=d3['NoOfOwners'].astype('string')\n#d3['MPG_cty']=d3['MPG_cty'].astype('int')\n#d3['MPG_hwy']=d3['MPG_hwy'].astype('int')\nd3['Engine_L']=d3['Engine_L'].astype('string')\nd3['Transmission']=d3['Transmission'].astype('string')\n\nd3['Year']=d3['Year'].astype('object')\nd3['Engine_L']=d3['Engine_L'].astype('object')\nd3['NoOfOwners']=d3['NoOfOwners'].astype('object')\nd3['Transmission']=d3['Transmission'].astype('object')\n#d3['MPG_cty']=d3['MPG_cty'].astype('object')\n#d3['MPG_hwy']=d3['MPG_hwy'].astype('object')\n#d3.isnull().sum()\n\n\n# In[17]:\n\n\nd3.columns\n\n\n# In[ ]:\n\n\n\n\n\n# from sklearn.preprocessing import MinMaxScaler\n# from sklearn.preprocessing import StandardScaler\n# \n# scaler = MinMaxScaler()\n# num_vars = ['Miles']\n# d3[num_vars] = scaler.fit_transform(d3[num_vars])\n\n# In[18]:\n\n\ntype(d3.fueleconomy[0])\n\n\n# In[19]:\n\n\nfrom sklearn.model_selection import train_test_split\nnp.random.seed(0)\ndf_train, df_test = train_test_split(d3, train_size = 0.8, test_size = 0.2, random_state = 100)\n\n\n# In[20]:\n\n\ny_train = df_train.pop('Price')\nX_train = df_train\ny_test = df_test.pop('Price')\nX_test = df_test\n\n\n# In[21]:\n\n\ndf_train.shape\n\n\n# import seaborn as sns\n# import matplotlib.pyplot as plt\n# sns.boxplot(X_train['Miles'])\n# plt.title('Boxplot')\n# plt.show()\n# \n# \n# sns.boxplot(X_train['fueleconomy'])\n# plt.title('Boxplot')\n# plt.show()\n\n# In[22]:\n\n\nfrom feature_engine.outliers import Winsorizer\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfor i in X_train:\n if X_train[i].dtype==\"object\":\n continue\n else:\n windsoriser = Winsorizer(capping_method='gaussian',tail='both',fold=1.5,variables=i)\n X_train[i]= windsoriser.fit_transform(X_train[[i]])\n\n # we can inspect the minimum caps and maximum caps\n windsoriser.right_tail_caps_,windsoriser.left_tail_caps_\n\n # lets see boxplot\n sns.boxplot(X_train[i])\n plt.title('Boxplot')\n plt.show()\n\n\n# In[23]:\n\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import FunctionTransformer\n\nscaler = FunctionTransformer(np.log2, validate = True)\nnum_vars = ['Miles']\nX_train[num_vars] = scaler.fit_transform(X_train[num_vars])\nX_test[num_vars] = scaler.fit_transform(X_test[num_vars])\n\n\n# In[24]:\n\n\nX_train.Miles[0]\n\n\n# In[42]:\n\n\ncatego=['CarBrand', 'Model','Year','State','ExteColor','InterColor','style', 'DriveType', 'Accidents', 'UseType', 'Engine_Gas']\nfrom catboost import CatBoostRegressor\n\ncatboo = CatBoostRegressor(iterations=99,\n random_state = 2021, od_type = 'Iter',\n eval_metric=\"R2\",learning_rate=0.085,depth=16,l2_leaf_reg=5,bagging_temperature=1\n ,border_count=255,grow_policy='Lossguide',max_leaves=500)\ncatboo.fit(X_train, y_train,cat_features=catego,eval_set=(X_test, y_test),plot=True)\n\n\n# In[43]:\n\n\nfrom sklearn.metrics import r2_score\nx_pred = catboo.predict(X_train)\nr2_score(y_train,x_pred)\n\n\n# In[44]:\n\n\ny_pred = catboo.predict(X_test)\nr2_score(y_test,y_pred)\n\n\n# In[45]:\n\n\nparams = {'depth':[3,1,2,6,4,5,7,8,9,10],\n 'iterations':[250,100,500],\n 'learning_rate':[0.03,0.001,0.01,0.1,0.2,0.3], \n 'l2_leaf_reg':[3,1,5,10,100],\n 'border_count':[32,5,10,20,50],\n 'thread_count':[4]}\n\n\n# grid_cat = GridSearchCV(estimator = catboo, param_grid = params, scoring=\"neg_mean_squared_error\", cv = 3, verbose = 2)\n# grid_cat.fit(X_train, y_train, cat_features=catego,eval_set=(X_test, y_test),plot=True)\n\n# grid_cat.best_estimator_\n\n# In[46]:\n\n\nimport pickle\nfilename = 'prediction'\npickle.dump(catboo,open(filename,'wb'))\n\n\n# In[47]:\n\n\n(0.55*20)+(0.45*26)\n\n\n# In[51]:\n\n\nout = catboo.predict(np.array([['toyota', 'highlander', '2019', 10.3000, 'tx', 'blue',\n 'black', 'suv', 'FWD', 1, 'No accidents',\n 8, 'personal use', '3.0', 'inline-4', 22.7]]))\n\n\n# In[52]:\n\n\nout[0]\n\n\n# In[33]:\n\n\npd.DataFrame()\nd3['Miles'] = scaler.fit_transform(d3[['Miles']])\n\n\n# In[34]:\n\n\nd3['Miles'][0]\n\n\n# In[35]:\n\n\nplt.scatter(y_pred,y_test,color=\"blue\")\nplt.plot(x_pred,y_train,color=\"red\")\nx_pred = x_pred.reshape(-1,1)\n\n\n# In[ ]:\n\n\n\n\n\n# In[36]:\n\n\ntype('Miles')\n\n\n# In[37]:\n\n\nmodel = pickle.load(open('pricepred','rb'))\n\n\n# In[ ]:\n\n\nout1 = model.predict(np.array([['volvo', 'xc60', '2018', 40670, 'tx', 'white',\n 'black', 'suv', 'AWD', '1', 'No accidents',\n '7', 'personal use', '2.0', 'inline-4', '23.700000']]))\n\n\n# In[ ]:\n\n\nout1\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"car price prediction.py","file_name":"car price prediction.py","file_ext":"py","file_size_in_byte":10674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"114640442","text":"import random\nimport os\nimport sys\nimport time\n\nclass Parameters:# Static parameters class\n purple = '\\033[95m'\n blue = '\\033[94m'\n cyan = '\\033[96m'\n green = '\\033[92m'\n yellow = '\\033[93m'\n red = '\\033[91m'\n white = '\\033[0m'\n bold = '\\033[1m'\n reset = '\\u001b[0m'\n underline = '\\033[4m'\n abc = {0:\"A\",1:\"B\",2:\"C\"}\n abc_to_digit = {\"A\": 0, \"B\":1, \"C\":2}\n abc_list = [\"A\",\"B\",\"C\"]\n\nclass Global: # Global calass with global functions\n loading_time = 0.01\n a_i_time_sleep = 2\n\n end_count = 0\n\n def clear_screen(self):\n os.system(\"cls || clear\")\n \n def check_input(self,text):\n while True:\n user_input = input(text)\n if user_input.lower() == \"quit\":\n quit()\n break\n return user_input\n \n def print_board(self,board):\n print(\" 1 2 3\")\n for i in range(len(board)):\n if i < 2:\n print(Parameters.abc[i] + \" \" + board[i][0].symbol + \" | \" + board[i][1].symbol + \" | \" + board[i][2].symbol + \"\\n ---+---+---\")\n else:\n print(Parameters.abc[i] + \" \" + board[i][0].symbol + \" | \" + board[i][1].symbol + \" | \" + board[i][2].symbol + \"\\n\")\n\n def loading_game(self):\n procent = 0\n print(\"Loading...\")\n while procent <= 100:\n sys.stdout.write(u\"\\u001b[1000D\" + str(procent) + \"%\")\n sys.stdout.flush()\n time.sleep(self.loading_time)\n procent += 1\n print(\"\\nCompleted\")\n time.sleep(1)\n os.system(\"cls || clear\")\n \n def change_color(self,text,color):\n return color + text + Parameters.reset\n\nclass Inteligence(Global): # Parent inteligence class for Human and AI\n symbol = \".\"\n name = \"\"\n\n def finish(self,winner,tie,board):# Method is calling when someone win or medked tie\n self.clear_screen()\n self.print_board(board)\n if winner:\n print(self.name + \" is s Winner!\")\n elif tie:\n print(\"Tie\")\n Global.end_count = 0\n\n def get_file_path(self,file_name):# Method which get file path\n file_dir = os.path.dirname(os.path.abspath(__file__))\n my_file = os.path.join(file_dir, file_name)\n return my_file\n \n def mark(self, board , row, col):# Mark a game board\n if board[row][col] == Inteligence:\n board[row][col] = self\n Global.end_count += 1\n return board\n \n def check_finish(self,board,enemy):# Check the type game-end\n winner = self.has_won(board,enemy)\n tie = False\n if Global.end_count == 9:\n tie = True\n return winner,tie\n \n def has_won(self,board,player):# Сheck for possible winnings\n check_list1 = []\n check_list2 = []\n board_len = len(board)\n for i in range(board_len):\n for j in range(board_len):\n check_list1.append(board[i][j])\n check_list2.append(board[j][i])\n if (Inteligence not in check_list1 and player not in check_list1) or (Inteligence not in check_list2 and player not in check_list2):\n return True\n check_list1,check_list2 = [],[]\n for i in range(board_len):\n for j in range(board_len):\n if i == j:\n check_list1.append(board[i][j])\n if Inteligence not in check_list1 and player not in check_list1:\n return True\n check_list1 = []\n count = 2\n for i in range(board_len):\n check_list1.append(board[i][count])\n count -= 1\n if Inteligence not in check_list1 and player not in check_list1:\n return True\n return False\n\n\nclass Human(Inteligence): # Human class\n def __init__(self, name):\n self.name = name\n pass\n def get_move(self):# Get human move\n col = None\n row = None\n while col == None or row == None or col > 3:\n while True:\n row = input(\"Player \" + self.name + \" choose row (A-C): \" ).upper()\n if row not in Parameters.abc_list:\n continue\n break\n while True:\n try:\n col = int(input(\"Player \" + self.name + \" choose column (1-3): \"))\n if col > 3:\n continue\n else:\n break\n except ValueError:\n print(\"Please write a digit!\")\n return Parameters.abc_to_digit[row], col-1\n\n def move(self, enemy, board):# Human move\n self.clear_screen()\n self.print_board(board)\n row,col = self.get_move()\n board = self.mark(board,row,col)\n winner,tie = self.check_finish(board,enemy)\n return board,winner,tie\n\nclass Artificial_Intelligence(Inteligence): # AI class\n\n lines_file_name = \"\"\n\n def __init__(self):\n self.name = \"Compukter\"\n self.lines_file_name = self.get_file_path(\"Phrases.txt\")\n\n def check_two_cell(self, board, player):# Return True and coordinates of sell if the player has marked two out of three cells in the same row\n check_list1,check_list2 = [],[]\n board_len = len(board)\n for i in range(board_len):\n for j in range(board_len):\n check_list1.append(board[i][j])\n check,col,row = self.enemy_looking_two_cell(board,player,check_list1,\"first_simple\",i)\n if check:\n return check,row,col\n check_list2.append(board[j][i])\n check,row,col = self.enemy_looking_two_cell(board,player,check_list2,\"first_simple\",i)\n if check:\n return check,row,col\n check_list1,check_list2 = [],[]\n for i in range(board_len):\n for j in range(board_len):\n if i == j:\n check_list1.append(board[i][j])\n check,row,col = self.enemy_looking_two_cell(board,player,check_list1,\"second_simple\")\n if check:\n return check,row,col\n check_list1 = []\n count = 3\n for i in range(board_len):\n count -= 1\n check_list1.append(board[i][count])\n check,row,col = self.enemy_looking_two_cell(board,player,check_list1,\"third_simple\")\n if check:\n return check,row,col\n return False,0,0\n\n def enemy_looking_two_cell(self, board, player, check_list, mode, col = None):# Help method for \"check_two_cell\"\n if check_list.count(player) == 2 and Inteligence in check_list:\n if mode == \"first_simple\":\n return True,check_list.index(Inteligence),col\n elif mode == \"second_simple\":\n return True,check_list.index(Inteligence),check_list.index(Inteligence)\n elif mode == \"third_simple\":\n col = check_list.index(Inteligence)\n if col == 0:\n col = 2\n elif col == 2:\n col = 0\n return True,check_list.index(Inteligence),col\n return False,None,None\n \n def get_ai_move(self, board, enemy):# Get AI move\n row, col = 0, 0\n check, row, col = self.check_two_cell(board, self)\n if check:\n return row, col\n check, row, col = self.check_two_cell(board,enemy)\n if check:\n return row, col\n check, row, col = self.check_coreners(enemy,board)\n if check:\n return row, col\n while True:\n row = Parameters.abc_to_digit[Parameters.abc[random.randint(0,2)]]\n col = random.randint(0,2)\n if board[row][col] != Inteligence:\n continue\n else:\n return row, col\n \n def check_coreners(self, enemy, board):\n row, col = 0, 0\n right_list = []\n left_list = []\n for i in range(len(board)):\n right_list.append(board[0][i])\n left_list.append(board[i][0])\n if i == 2:\n for j in range(1,3):\n right_list.append(board[j][i])\n left_list.append(board[i][j])\n if enemy not in left_list or enemy not in right_list:\n if board[0][0] == Inteligence:\n return True, 0,0\n elif board[2][2] == Inteligence:\n return True, 2,2\n else:\n if board[0][0] == Inteligence:\n return True, 0,0\n elif board[2][2] == Inteligence:\n return True, 2,2\n if enemy not in left_list and Inteligence in left_list:\n if board[2][0] == Inteligence:\n return True, 2,0\n elif enemy not in right_list and Inteligence in right_list:\n if board[0][2] == Inteligence:\n return True, 0, 2\n\n return False, row, col\n\n \n def move(self,enemy,board):# AI move\n self.clear_screen()\n print(self.name + \": \" + self.get_random_line())\n self.print_board(board)\n time.sleep(self.a_i_time_sleep)\n row,col = self.get_ai_move(board,enemy)\n board = self.mark(board,row,col)\n winner,tie = self.check_finish(board,enemy)\n return board,winner,tie\n\n\n def get_random_line(self):# Get random line from file\n with open(self.lines_file_name) as file_word:\n return random.choice(list(file_word))\n\n","sub_path":"Clases.py","file_name":"Clases.py","file_ext":"py","file_size_in_byte":9510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"68654362","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef kernel(x, y, sigma=1):\n k = np.exp(-(np.linalg.norm((x-y), ord=2)**2)/(2*sigma**2))\n return k\n\ndef logReg(x, theta):\n # g = 1/(1 + np.exp(-kernel(x, theta)))\n g = 1/(1 + np.exp(-np.matmul(a=x, b=theta)))\n # g = np.matmul(a=x, b=theta)\n return g\n\ndef reshapeData(x, deg=0):\n m = len(x)\n x = np.append(arr=np.ones([m, 1]), values=x, axis=1)\n for i in range(deg-1):\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 1], i+2), newshape=[m, 1]), axis=1)\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 2], i+2), newshape=[m, 1]), axis=1)\n x = np.append(arr=x, values=np.reshape(x[::, 1]*x[::, 0], newshape=[m, 1]), axis=1)\n for j in range(deg-1):\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 1], i+2)*np.power(x[::, 0], j+3), newshape=[m, 1]), axis=1)\n x = np.append(arr=x, values=np.reshape(np.power(x[::, 0], i+2)*np.power(x[::, 1], j+3), newshape=[m, 1]), axis=1)\n return x\n\ndef gradDesc(x, y, n_iter=1000, alpha=0.01, regParam=1):\n m = len(x)\n x = reshapeData(x)\n theta = [np.ones([int(np.size(x)/np.size(y))])*0]\n regParam = np.ones([int(np.size(x)/np.size(y))])*regParam\n for i in range(n_iter):\n theta.append(theta[i] - alpha/m*(np.matmul(a=x.T, b=(logReg(x, theta[i]) - y)) - regParam))\n return theta[-1]\n\ndef classify(x, theta):\n x = reshapeData(x)\n pred = np.round(logReg(x, theta), 0)\n return pred\n\ndef getScore(x, theta):\n x = reshapeData(x)\n score = logReg(x, theta)\n return score\n\n\nif __name__ == '__main__':\n n0 = 10\n n1 = 10\n ntest = 100\n dim = 2\n x0 = np.random.randn(n0, dim) + 1 * np.ones([n0, dim])\n x1 = np.random.randn(n1, dim) - 1 * np.ones([n1, dim])\n x = np.concatenate([x0, x1])\n y = np.concatenate([np.zeros(n0), np.ones(n1)])\n # y[np.sqrt(x[::, 0]**2 + x[::, 1]**2)<1] = 0\n # y[np.sqrt(x[::, 0]**2 + x[::, 1]**2)>=1] = 1\n test = np.random.randn(ntest, dim)\n\n theta = gradDesc(x, y)\n pred = classify(x=test, theta=theta)\n \"\"\"\n from sklearn.linear_model import LogisticRegression\n model = LogisticRegression(C=1)\n model.fit(x, y)\n pred = model.predict(test)\n \"\"\"\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x[y==0, 0], x[y==0, 1], c='r', zorder=2, marker='o', alpha=0.3)\n ax.scatter(x[y==1, 0], x[y==1, 1], c='b', zorder=2, marker='o', alpha=0.3)\n ax.scatter(test[pred==0, 0], test[pred==0, 1], c='r', zorder=2, marker='x')\n ax.scatter(test[pred==1, 0], test[pred==1, 1], c='b', zorder=2, marker='x')\n\n xlim = (-5, 5)\n ylim = (-5, 5)\n xx, yy = np.meshgrid(np.linspace(xlim[0], xlim[1], 100),\n np.linspace(ylim[0], ylim[1], 100))\n Z = getScore(np.c_[xx.ravel(), yy.ravel()], theta)\n Z = Z.reshape(xx.shape)\n ax.contour(xx, yy, Z, [0.5], colors='k')\n # Z = model._predict_proba_lr(np.c_[xx.ravel(), yy.ravel()])\n # Z = Z[::, 1].reshape(xx.shape)\n # ax.contour(xx, yy, Z, [0.5], colors='k', alpha=0.10)\n plt.contourf(xx, yy, Z, cmap='jet', alpha=0.7)\n plt.colorbar()\n\n plt.show()\n","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":3150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"451256104","text":"#!/usr/bin/env python3\n\nimport datetime\nimport unittest\nimport urllib.request\n\nimport hw4 as t\n\n\nTEST_LOG = 'ftp://shannon.usu.edu.ru/python/hw4/test.log'\n\n\nclass Test(unittest.TestCase):\n def setUp(self):\n with urllib.request.urlopen(TEST_LOG) as f:\n self.data = f.read().decode('utf-8').split('\\n')\n\n self.stat = t.make_stat()\n\n def test(self):\n for line in filter(lambda s: 'OPTION' not in s, self.data):\n self.stat.add_line(line)\n\n self.assertDictEqual(self.stat.results(), TEST)\n\n\nTEST = {\n 'FastestPage': '/img/r.png',\n 'MostActiveClient': '192.168.12.155',\n 'MostActiveClientByDay': {datetime.date(2012, 7, 8): '192.168.12.155'},\n 'MostPopularBrowser': 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; '\n 'Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR '\n '3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; '\n 'Tablet PC 2.0; .NET4.0C; .NET4.0E; InfoPath.3; '\n 'MS-RTC LM 8)',\n 'MostPopularPage': '/img/ao.gif',\n 'SlowestAveragePage': '/call_centr.php',\n 'SlowestPage': '/menu-top.php'}\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"518681325","text":"import report\n\ndictionary=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/dict.txt\")\ncandidates=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/report_3_repeat_character\")\nblends=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/blends.txt\")\ncandidatesList=[]\nwordList=[]\nblendList=[]\nresultList=[]\nblendword = []\nfor token in candidates.readlines():\n token = token.rsplit()[0]\n candidatesList.append(token)\nfor word in dictionary.readlines():\n word = word.rsplit()[0]\n wordList.append(word)\n\nfor blend in blends.readlines():\n x=blend.split('\\t')\n blendword.append(x[0])\n x[2]=x[2].rsplit()[0]\n blendList.append(x)\n\njoResult=open(\"/home/breeze/Downloads/2019S2-COMP90049_proj1-data/3GramTotal.txt\",\"w\")\njaroResult = []\nfor token in candidatesList:\n\n maxDistance = 99\n maxWord =\"\"\n similar = []\n for word in wordList:\n dis = report.NGramDistance(token, word,3)\n if dis 0:\n # print(command)\n # print(res)\n mostRecent = sorted(res, key=itemgetter(1),reverse=True)[0]\n # print(mostRecent)\n return mostRecent\n else:\n return None\n # panFrame = pd.read_sql_query(command, self.conn)\n #return lanes open, lane time, lane delay\n\n pass\n\n\n\n\n\n\n","sub_path":"sqlLiteActions.py","file_name":"sqlLiteActions.py","file_ext":"py","file_size_in_byte":6246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"391333261","text":"#!/usr/bin/env python3\n\"\"\"\nfuzzable.py\n\n Binary Ninja helper plugin for fuzzable target discovery.\n\"\"\"\nimport os\n\nimport binaryninja\nimport binaryninja.log as log\nimport binaryninja.interaction as interaction\n\nfrom binaryninja.enums import SymbolType\nfrom binaryninja.plugin import BackgroundTaskThread, PluginCommand\nfrom binaryninja.settings import Settings\n\nfrom .analysis import FuzzableAnalysis\n\n# configurable settings to tune\nSettings().register_group(\"fuzzable\", \"Fuzzable\")\nSettings().register_setting(\n \"fuzzable.depth_threshold\",\n \"\"\"\n {\n \"title\" : \"Callgraph depth threshold\",\n \"description\" : \"Minimum number of levels in callgraph to be considered optimal for fuzzing.\",\n \"type\" : \"string\",\n \"default\" : \"100\"\n }\n\"\"\",\n)\n\nSettings().register_setting(\n \"fuzzable.loop_increase_score\",\n \"\"\"\n {\n \"title\" : \"Don't score natural loop presence\",\n \"description\" : \"Don't include natural loop as part of the fuzzability score\",\n \"type\" : \"boolean\",\n \"default\" : false\n }\n\"\"\",\n)\n\nSettings().register_setting(\n \"fuzzable.skip_stripped\",\n \"\"\"\n {\n \"title\" : \"Skip stripped functions for analysis\",\n \"description\" : \"Turn on if stripped functions are abundant and costly to analyze, and known to be irrelevant.\",\n \"type\" : \"boolean\",\n \"default\" : false\n }\n\"\"\",\n)\n\n\nclass WrapperTask(BackgroundTaskThread):\n def __init__(self, view):\n super(WrapperTask, self).__init__(\n \"Finding fuzzable targets in current binary view\"\n )\n self.view = view\n\n def run(self):\n funcs = self.view.functions\n log.log_info(f\"Starting target discovery against {len(funcs)} functions\")\n\n # final markdown table to be presented to user, with headers created first\n markdown_result = \"# Fuzzable Targets\\n | Function Name | Fuzzability | Coverage Depth | Has Loop? | Recursive Func? |\\n| :--- | :--- | :--- | :--- |\\n\"\n\n # append to CSV buffer if user chooses to export after analysis\n csv_out = '\"Name\", \"Stripped\", \"Interesting Name\", \"Interesting Args\", \"Depth\", \"Cycles\", \"Fuzzability\"\\n'\n\n # stores all parsed analysis objects\n parsed = []\n\n # iterate over each symbol\n for func in funcs:\n name = func.name\n symbol = func.symbol.type\n\n # ignore imported functions from other libraries, ie glibc or win32api\n if (symbol is SymbolType.ImportedFunctionSymbol) or (\n symbol is SymbolType.LibraryFunctionSymbol\n ):\n log.log_info(f\"Skipping analysis for known function {name}\")\n continue\n\n # ignore targets with patterns that denote some type of profiling instrumentation, ie stack canary\n if name.startswith(\"_\"):\n log.log_info(f\"Skipping analysis for function {name}\")\n continue\n\n # if set, ignore all stripped functions for faster analysis\n if (\"sub_\" in name) and Settings().get_bool(\"fuzzable.skip_stripped\"):\n log.log_info(f\"Skipping analysis for stripped function {name}\")\n continue\n\n # instantiate analysis of the given target\n analysis = FuzzableAnalysis(func)\n\n # if a loop is detected in the target, and it exists as part a callgraph,\n # set has_loop for that parent as well\n # TODO: cleanup and encapsulate in FuzzableAnalysis\n for prev in parsed:\n if analysis.has_loop and analysis.name in prev.visited:\n prev.has_loop = True\n\n parsed += [analysis]\n\n # sort parsed by highest fuzzability score and coverage depth\n parsed = sorted(parsed, key=lambda x: (x.fuzzability, x.depth), reverse=True)\n\n # add ranked results as rows to final markdown table and CSV if user chooses to export\n for analysis in parsed:\n markdown_result += analysis.markdown_row()\n csv_out += analysis.csv_row()\n\n # store CSV output to memory\n self.view.store_metadata(\"csv\", csv_out)\n\n # output report back to user\n self.view.show_markdown_report(\"Fuzzable targets\", markdown_result)\n\n\ndef run_fuzzable(view):\n \"\"\"Callback used to instantiate thread and start analysis\"\"\"\n task = WrapperTask(view)\n task.start()\n\n\ndef run_export_report(view):\n \"\"\"Generate a report from a previous analysis, and export as CSV\"\"\"\n log.log_info(\"Attempting to export results to CSV\")\n try:\n csv_output = view.query_metadata(\"csv\")\n except KeyError:\n interaction.show_message_box(\n \"Error\", \"Cannot export without running an analysis first.\"\n )\n return\n\n # write last analysis to filepath\n csv_file = interaction.get_save_filename_input(\"Filename to export as CSV?\", \"csv\")\n csv_file = csv_file.decode(\"utf-8\") + \".csv\"\n\n log.log_info(f\"Writing to filepath {csv_file}\")\n with open(csv_file, \"w+\") as fd:\n fd.write(csv_output)\n\n interaction.show_message_box(\"Success\", f\"Done, exported to {csv_file}\")\n\n\ndef run_harness_generation(view, func):\n \"\"\"Experimental automatic fuzzer harness generation support\"\"\"\n\n template_file = os.path.join(binaryninja.user_plugin_path(), \"fuzzable\")\n if view.view_type == \"ELF\":\n template_file += \"/templates/linux.cpp\"\n else:\n interaction.show_message_box(\n \"Error\",\n \"Experimental harness generation is only supported for ELFs at the moment\",\n )\n return\n\n # parse out template based on executable format, and start replacing\n with open(template_file, \"r\") as fd:\n template = fd.read()\n\n log.log_info(\"Replacing elements in template\")\n template = template.replace(\"{NAME}\", func.name)\n template = template.replace(\"{RET_TYPE}\", str(func.return_type))\n\n harness = interaction.get_save_filename_input(\"Filename to write to?\", \"cpp\")\n harness = csv_file.decode(\"utf-8\") + \".cpp\"\n\n log.log_info(\"Writing new template to workspace\")\n with open(harness, \"w+\") as fd:\n fd.write(template)\n\n interaction.show_message_box(\"Success\", f\"Done, wrote fuzzer harness to {harness}\")\n\n\nPluginCommand.register(\n \"Fuzzable\\\\Analyze fuzzable targets\",\n \"Identify and generate targets for fuzzing\",\n run_fuzzable,\n)\n\nPluginCommand.register(\n \"Fuzzable\\\\Export fuzzability report as CSV\",\n \"Identify and generate targets for fuzzing\",\n run_export_report,\n)\n\nPluginCommand.register_for_function(\n \"Fuzzable\\\\Generate fuzzing harness (EXPERIMENTAL, C/C++ ONLY)\",\n \"For a target function, generate a AFL/libFuzzer C++ harness\",\n run_harness_generation,\n)\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"300540905","text":"from networking_p4.services.service_drivers.default.services.abstract import AbstractService\nfrom oslo_log import log as logging\n\nLOG = logging.getLogger(__name__)\n\n\nclass UnconfigureModuleService(AbstractService):\n\n def __init__(self, rpc_client):\n super(UnconfigureModuleService, self).__init__(rpc_client)\n\n def handle(self, context):\n configuration = context.additional_context\n\n LOG.info(\"Handling ConfigureModule\")\n LOG.info(\"Config: \" + str(configuration['flow_rules']))\n for flow_rule in configuration['flow_rules']:\n table_id = flow_rule['table_id']\n table_entry = flow_rule['entry']\n self.rpc_client.ask_agent_to_delete_table_entry(self.rpc_ctx, table_id=table_id, table_entry=table_entry)\n","sub_path":"networking_p4/services/service_drivers/default/services/unconfigure_module.py","file_name":"unconfigure_module.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"641458725","text":"import torch, numpy\nimport os\nimport torch.nn as nn\nimport torchvision.models as models\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable \nfrom PIL import Image\nmodel = models.resnet34(pretrained=True)\nlayer = model._modules.get('avgpool')\nmodel.eval()\nscaler = transforms.Scale((224, 224))\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\nto_tensor = transforms.ToTensor()\n\ndef get_vector(image_name):\n\timg = Image.open(image_name).convert('RGB')\n\tt_img = Variable(normalize(to_tensor(scaler(img))).unsqueeze(0))\n\tmy_embedding = torch.zeros(1, 512, 1, 1)\n\tdef copy_data(m, i, o):\n\t\tmy_embedding.copy_(o.data)\n\th = layer.register_forward_hook(copy_data)\n\tmodel(t_img)\n\th.remove()\n\treturn my_embedding\n\nmodel = models.resnet34(pretrained=True)\nlayer = model._modules.get('avgpool')\nmodel.eval()\nscaler = transforms.Resize((224, 224))\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\nto_tensor = transforms.ToTensor()\nfor file in os.listdir('testing7/English')[21:]:\n print(file)\n if (\"json\" not in file):\n l = []\n with open('testing7/English/'+file+'/word.txt') as w:\n word = w.readline().strip().replace(' ', '_')\n print(word)\n for f in os.listdir('testing7/English/'+file):\n if (\"json\" not in f) and (\"txt\" not in f):\n try:\n x = get_vector('testing7/English/'+file+'/'+f).data.numpy()[0, :, 0, 0]\n l.append(x)\n with open('100/'+word, 'a') as f:\n numpy.savetxt(f, x.reshape(1, 512), fmt=\"%s\")\n except:\n continue\n average = numpy.zeros(512,)\n for embedding in l:\n average += embedding\n average = average / len(l)\n with open('100avg/'+word, 'a') as f:\n numpy.savetxt(f, x.reshape(1, 512), fmt=\"%s\")\n\n\n\n\t\t\n","sub_path":"extractembeddings.py","file_name":"extractembeddings.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"8857968","text":"import glob\nimport os\n\nf = open(\"data/test.txt\", \"a+\")\nf2 = open(\"data/train.txt\", \"a+\")\n\nfor files in glob.glob(\"data/test/*.jpg\"):\n #print(files)\n f.write(\"%s\\n\" % files.strip().replace(\"test\\\\\", \"images/\"))\n\nfor files in glob.glob(\"data/img/*.jpg\"):\n #print(files)\n f2.write(\"%s\\n\" % files.strip().replace(\"\\\\\", \"/\"))\n","sub_path":"x64/Release/label_generator.py","file_name":"label_generator.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"585966643","text":"import base64\r\nwith open(\"object-detection.jpg\", \"rb\") as img_file:\r\n my_string = base64.b64encode(img_file.read())\r\n#print(\"data:image/jpeg;base64,\"+str(my_string))\r\nimport requests\r\nphonenumber = \"917845671280\",\"919003366217\",\"919884915977\"\r\nbody = str(my_string,'utf-8')\r\nfor phone in phonenumber:\r\n print (phone)\r\n url = \"https://api.chat-api.com/instance241711/sendFile?token=xtomvz79mpfs1fer\"\r\n data = {\r\n \"phone\": phone,\r\n \"body\": \"data:image/jpeg;base64,\"+body,\r\n \"caption\": \"RED ALERT!! Intruder detected Please check the mail for more info\",\r\n \"filename\": \"detection.jpg\"\r\n }\r\n req = requests.post(url, json=data)\r\n print(req)","sub_path":"whatsappwithatt.py","file_name":"whatsappwithatt.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"539137888","text":"#!\"C:\\Program Files (x86)\\Ampps\\python\\python.exe\"\nprint (\"Content-type: text/html\\n\\n\")\n\nfrom functions import *\nimport os\n\nclass MyHTMLParser(HTMLParser):\n lsStartTags = list()\n lsEndTags = list()\n lsStartEndTags = list()\n lsComments = list()\n lsData = list()\n\n\n inRow=False\n inCell=False\n rows = []\n course = []\n schedule = []\n currentRow = []\n currentRowType = ''\n currentCRN=''\n colNum=0\n currentTag=''\n currentCell=''\n lineNum=0\n\n subDiv = 0\n inDiv = False\n currentDiv = ''\n currentDivNum = 0\n divs = []\n\n def handle_starttag(self, tag, attrs):\n print(\"Starttag\", tag)\n if self.inDiv:\n if tag==\"div\":\n self.subDiv+=1\n print(\"subdiv add:\", self.subDiv)\n\n self.currentTag=tag\n # self.lsStartTags.append(tag)\n if tag==\"div\":\n self.inDiv=True\n\n\n if self.inDiv:\n print(\"Attrs\", attrs)\n print(\"current div\", self.currentDiv)\n strAttrs=\"\"\n for a in attrs:\n strAttrs=a[0] + \"='\" + a[1] + \"'\"\n self.currentDiv += \"<\"+tag+\" \" + strAttrs+ \">\"\n self.currentDivNum+=1\n\n # if tag==\"tr\":\n # self.currentRow=[]\n # self.inRow=True\n # self.colNum=0\n # self.currentRowType=''\n # myPrint('IN ROW, length: ', len(self.rows))\n #\n # if self.inRow:\n # if tag==\"td\":\n # self.inCell=True\n #\n # for attr in attrs:\n # self.lsStartTags.append(tag)\n #\n # if tag == \"div class\":\n # input(\"found href....\\n\")\n\n def handle_endtag(self, tag):\n # myPrint(\"End tag :\", tag)\n\n if self.inDiv:\n self.currentDiv+= \"\" + tag + \">\"\n\n if tag==\"div\":\n if self.inDiv:\n if self.subDiv > 10:\n self.subDiv -= 1\n print(\"Subdiv minus:\", self.subDiv)\n else:\n self.divs.append(self.currentDiv)\n self.currentDiv=\"\"\n\n\n def handle_data(self, data):\n # print(\"Data:\", data)\n ignoreWhole = ['\\n', 'Click name to see CV', 'MM', 'DD', '/', '(', ')', 'P', '']\n ignoreIn = ['=', '\\n', '\\t', '/',\n 'Search']\n self.currentDiv+=data\n\n divNum=-1\n if self.inDiv:\n divNum=self.currentDivNum\n\n\n self.lsData.append((divNum, data))\n\n # myPrint(\"Data :\", data)\n if self.inCell:\n # input(\"here1:\" + data)\n self.currentCell += data.strip() #remove whitespace from begin and end of data\n # data=fixArray(data, ignoreIn, ignoreWhole)\n # self.currentRow = fixArray(self.currentRow, ignoreIn, ignoreWhole)\n # if 1 < len(data) < 20:\n # input(\"Press enter to continue...\")\n\n def handle_comment(self, data):\n # myPrint(\"Comment :\", data)\n pass\n\n\nhtmlFile = '0.html'\ndatFile = '0.dat'\n\nparser = MyHTMLParser()\nfixHTML(datFile)\n\nwith open (htmlFile) as f:\n htmlText = f.read()\n\n# website.batchValue(os.getcwd().replace('\\\\', '\\\\\\\\'))\n\nparser.feed(htmlText)\n\ndata = [x[1] for x in parser.lsData]\ndataNum = [x[0] for x in parser.lsData]\n\n\nremove = [\"\\n\"]\n\ndata = [x.replace(\"\\n\", \"\") for x in data]\ndata = fixArray(data, ['\\n', \"$\", \"@\"], [' ', '', '\\t', '\\t\\t'])\nmyPrint(data)\nmyPrint(len(data))\n# print(data)\n\n\nbool = False\n\nlookfor = ['jan', \"Jan\", \"feb\", \"Feb\", \"mar\", \"Mar\", \"apr\", \"Apr\", \"may\", \"May\", \"jun\", \"Jun\", \"jul\", \"Jul\", 'aug', \"Aug\", \"sep\", \"Sep\", 'oct', \"Oct\", 'nov', \"Nov\", \"dec\", \"Dec\", \"1/\", \"2/\", \"3/\", \"4/\", \"5/\", \"6/\", \"7/\", \"8/\", \"9/\", \"10/\", \"11/\", \"12/\"]\ndateList = []\n\n# print(\"Len Divs:\", len(parser.divs))\n\nfor counter ,text in enumerate(data):\n if (bool == False) and ((\"due\" in text) or (\"Due\" in text)):\n bool = True\n\n if bool:\n for month in lookfor: # Start point\n if month in text:\n # print(\"Count:\", counter)\n # dateList.append(text + \", \" + parser.divs[counter-10])\n dateList.append(text)\n bool = False\n\nmyPrint(dateList)\nmyPrint(len(dateList))\n\nfor i in dateList:\n print(i)\n\nprint()\nprint()\n\n\ndef getDateList():\n return dateList\n# print(parser.lsStartTags)\n","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"170003442","text":"import tweepy\n\nfrom bot import timetool, loggingservice, grabber\nfrom secret import keys\n\nbot_username = 'Tagesenergie-Twitterbot'\nlogfile_name = bot_username + \".log\"\n\n\ndef create_tweet():\n \"\"\"Creates the text of the tweet.\"\"\"\n\n try:\n text = \"Die Tagesenergie-Werte vom \" + timetool.get_date()\n text = text + \"\\nMagie-O-Meter: \" + grabber.get_magicvalue()\n text = text + \"\\nEnergie Impulswert: \" + grabber.get_energyimpulsvalue()\n text = text + \"\\nBewusstwerdungsindex: \" + grabber.get_consiousvalue()\n except AttributeError as ae:\n loggingservice.log(repr(ae), logfile_name)\n text = grabber.get_errortext()\n return text\n\n\ndef tweet(text):\n \"\"\"Send out the text as a tweet.\"\"\"\n # Twitter authentication\n auth = tweepy.OAuthHandler(keys.CONSUMER_KEY, keys.CONSUMER_SECRET)\n auth.set_access_token(keys.ACCESS_TOKEN, keys.ACCESS_SECRET)\n api = tweepy.API(auth)\n\n # Send the tweet and log success or failure\n try:\n api.update_status(text)\n except tweepy.error.TweepError as e:\n loggingservice.log(repr(e), logfile_name)\n else:\n loggingservice.log(\"Tweeted:\\n\" + text + \"\\n\", logfile_name)\n\n\nif __name__ == \"__main__\":\n tweet_text = create_tweet()\n tweet(tweet_text)\n","sub_path":"bot/botmain.py","file_name":"botmain.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"13197555","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom parse import parse\nimport sys\nimport os\ntry:\n import cPickle as pickle\nexcept:\n import pickle\nimport uuid\nimport io\nimport tarfile\nimport shutil\nimport numpy as np\nfrom . import cfg\n\nfrom .util import docker_images_available, is_running_in_docker, \\\n get_docker_client\n\nclass MissingRequiredParameterError(Exception):\n\n \"\"\"Required parameter is not provided in feature function call.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return str(self.value)\n\n\nclass MissingRequiredReturnKeyError(Exception):\n\n \"\"\"Required return value is not provided in feature definition.\"\"\"\n\n def __init__(self, value):\n self.value = value\n\n def __str__(self):\n return str(self.value)\n\n\nclass myFeature(object):\n\n \"\"\"Decorator for custom-defined time series feature(s) function.\n\n Applies function wrapper that ensures required parameters and\n return values are present before executing, raising an exception if\n not.\n\n Attributes\n ----------\n requires : list\n List of names of features required for decorated function to\n execute.\n provides : list\n List of names of features generated by decorated function.\n\n \"\"\"\n\n def __init__(self, requires, provides):\n \"\"\"Instantiates object, sets args as attributes.\n\n Parameters\n ----------\n requires : list\n List of variable names required by the function.\n provides : list\n List of the key names of the returned dictionary - the\n features calculated by a particular function.\n\n \"\"\"\n self.requires = requires\n self.provides = provides\n\n def __call__(self, f):\n \"\"\"Wrap decorated function.\n\n Wrap decorated function with a check to ensure that required\n parameters (specified in decorator expression) are provided\n upon function call (raises MissingRequiredParameterError if\n not) and that all features reportedly returned (specified in\n decorator expression) are in fact returned (raises\n MissingRequiredReturnKeyError if not).\n\n Returns\n -------\n function\n The wrapped function.\n\n \"\"\"\n def wrapped_f(*args, **kwargs):\n for required_arg in self.requires:\n if required_arg not in args and required_arg not in kwargs:\n raise MissingRequiredParameterError(\n \"Required arg %s not provided in function call.\" %\n required_arg)\n result_dict = f(*args, **kwargs)\n for provided in self.provides:\n if provided not in result_dict:\n raise MissingRequiredReturnKeyError(\n \"Key %s not present in function return value.\" %\n provided)\n return result_dict\n return wrapped_f\n\n\nclass DummyFile(object):\n\n \"\"\"Used as a file object to temporarily redirect/suppress output.\"\"\"\n\n def write(self, x):\n pass\n\n\ndef parse_csv_file(fname, sep=',', skip_lines=0):\n \"\"\"Parse 2- or 3-column CSV file and return a list of its columns.\n\n Parameters\n ----------\n fname : str\n Absolute path to the CSV file.\n sep : str, optional\n Delimiter in TS data file, defaults to \",\".\n skip_lines : int, optional\n Number of leading lines to skip in file, defaults to 0.\n\n Returns\n -------\n list of list\n Two- or three-element list of lists of each of the columns. If\n `fname` is not a 2- or 3-column CSV file, returns list of three\n empty lists.\n\n \"\"\"\n with open(fname, \"r\") as f:\n ts_data = np.loadtxt(f, delimiter=\",\", skiprows=skip_lines)\n ts_data = ts_data[:, :3].tolist() # Only using T, M, E; convert to list\n for row in ts_data:\n if len(row) < 2:\n raise custom_exceptions.DataFormatError(\n \"Incomplete or improperly formatted time \"\n \"series data file provided.\")\n tme = list(map(list, zip(*ts_data))) # Need t, m, and e in separate lists\n if len(tme) == 2:\n tme.append([]) # Add empty err col\n return tme\n\n\ndef parse_for_req_prov_params(script_fpath):\n \"\"\"\n \"\"\"\n with open(script_fpath, \"r\") as f:\n all_lines = f.readlines()\n fnames_req_prov_dict = {}\n all_required_params = []\n all_provided_params = []\n for i in range(len(all_lines) - 1):\n if \"@myFeature\" in all_lines[i] and \"def \" in all_lines[i + 1]:\n reqs_provs_1 = parse(\n \"@myFeature(requires={requires}, provides={provides})\",\n all_lines[i].strip())\n func_name = parse(\n \"def {funcname}({args}):\", all_lines[i + 1].strip())\n fnames_req_prov_dict[func_name.named['funcname']] = {\n \"requires\": eval(reqs_provs_1.named[\"requires\"]),\n \"provides\": eval(reqs_provs_1.named[\"provides\"])}\n all_required_params = list(set(\n all_required_params +\n list(set(eval(reqs_provs_1.named[\"requires\"])))))\n all_provided_params = list(set(\n all_provided_params +\n list(set(eval(reqs_provs_1.named[\"provides\"])))))\n return (fnames_req_prov_dict, all_required_params, all_provided_params)\n\n\ndef listify_feats_known_dict(features_already_known):\n \"\"\"\n \"\"\"\n if isinstance(features_already_known, dict):\n return [features_already_known]\n elif isinstance(features_already_known, list):\n return features_already_known\n else:\n raise ValueError(\"custom_feature_tools.py - features_already_known\"\n \" is of an invalid type (%s).\" %\n str(type(features_already_known)))\n\n\ndef call_custom_functions(features_already_known_list, all_required_params,\n all_provided_params, fnames_req_prov_dict):\n \"\"\"\n \"\"\"\n # import the custom feature defs\n try:\n from .custom_feature_scripts import custom_feature_defs\n except ImportError:\n try:\n import custom_feature_defs\n except ImportError:\n raise\n\n # temporarily redirect stdout:\n save_stdout = sys.stdout\n sys.stdout = DummyFile()\n\n all_extracted_features_list = []\n for features_already_known in features_already_known_list:\n all_required_params_copy = [x for x in all_required_params\n if x not in features_already_known]\n for reqd_param in all_required_params_copy:\n if reqd_param not in all_provided_params:\n raise Exception((\n \"Not all of the required parameters are provided by the \"\n \"functions in this script (required parameter '%s').\") %\n str(reqd_param))\n funcs_round_1 = []\n func_queue = []\n funcnames = list(fnames_req_prov_dict.keys())\n i = 0\n func_rounds = {}\n all_extracted_features = {}\n while len(funcnames) > 0:\n func_rounds[str(i)] = []\n for funcname in funcnames:\n reqs_provs_dict = fnames_req_prov_dict[funcname]\n reqs = reqs_provs_dict['requires']\n provs = reqs_provs_dict['provides']\n if len(set(all_required_params_copy) & set(reqs)) > 0:\n func_queue.append(funcname)\n else:\n func_rounds[str(i)].append(funcname)\n all_required_params_copy = [x for x in all_required_params_copy\n if x not in provs]\n arguments = {}\n for req in reqs:\n if req in features_already_known:\n arguments[req] = features_already_known[req]\n elif req in all_extracted_features:\n arguments[req] = all_extracted_features[req]\n func_result = getattr(\n custom_feature_defs, funcname)(**arguments)\n all_extracted_features = dict(\n list(all_extracted_features.items()) +\n list(func_result.items()))\n funcnames.remove(funcname)\n i += 1\n all_extracted_features_list.append(all_extracted_features)\n # revert to original stdout\n sys.stdout = save_stdout\n return all_extracted_features_list\n\n\ndef execute_functions_in_order(\n script_fpath,\n features_already_known={\n \"t\": [1, 2, 3], \"m\": [1, 23, 2], \"e\": [0.2, 0.3, 0.2],\n \"coords\": [22, 33]},\n multiple_sources=False):\n \"\"\"Generate custom features defined in script_fpath.\n\n Parses the script (which must have function definitions with\n decorators specifying the required parameters and those which are\n provided by each function) and executes the functions defined in\n that script such that all functions whose outputs are required\n as inputs of other functions are called first, if possible,\n otherwise raises an Exception.\n\n Parameters\n ----------\n script_fpath : str\n Path to custom feature definitions script.\n features_already_known : dict\n Dictionary providing all time-series data (time (\"t\"), magnitude\n (\"m\"), error (\"e\") as keys) and any meta-features.\n Example:\n {\"t\": [1, 2, 3], \"m\": [10.32, 11.41, 11.06],\n \"e\": [0.2015,0.3134,0.2953], \"coords\": [22.55,33.01]}\n\n Returns\n -------\n dict\n Dictionary of all extracted features (key-value pairs are\n feature name and feature value respectively).\n\n \"\"\"\n # For when run inside Docker container:\n try:\n sys, os\n except NameError:\n import sys\n import os\n\n fnames_req_prov_dict, all_required_params, all_provided_params = \\\n parse_for_req_prov_params(script_fpath)\n features_already_known_list = listify_feats_known_dict(\n features_already_known)\n\n all_extracted_features_list = call_custom_functions(\n features_already_known_list, all_required_params, all_required_params,\n fnames_req_prov_dict)\n\n return all_extracted_features_list\n\n\ndef parse_tsdata_to_lists(ts_data):\n \"\"\"\n \"\"\"\n tme = []\n if isinstance(ts_data, list):\n if len(ts_data) > 0:\n if isinstance(ts_data[0], (list, tuple)):\n # ts_data already in desired format\n tme = ts_data\n elif isinstance(ts_data[0], (str, type(u''))) and \\\n \",\" in ts_data[0]:\n for el in ts_data:\n if str(el) not in [\"\\n\", \"\"]:\n tme.append(el.split(\",\"))\n else:\n raise ValueError(\"ts_data is an empty list\")\n elif isinstance(ts_data, (str, unicode)):\n all_lines = str(ts_data).strip().split(\"\\n\")\n for i in range(len(all_lines)):\n if all_lines[i].strip() == \"\":\n continue\n else:\n tme.append([x.strip()\n for x in all_lines[i].strip().split(\",\")])\n else:\n try:\n all_lines = str(ts_data).strip().split(\"\\n\")\n for i in range(len(all_lines)):\n if all_lines[i].strip() == \"\":\n continue\n else:\n tme.append([x.strip()\n for x in all_lines[i].strip().split(\",\")])\n except:\n pass\n return tme\n\n\ndef parse_tsdata_from_file(ts_datafile_path):\n \"\"\"\n \"\"\"\n with open(ts_datafile_path, \"r\") as f:\n ts_data = np.loadtxt(f, delimiter=\",\")\n ts_data = ts_data[:, :3].tolist() # Only using T, M, E; convert to list\n for row in ts_data:\n if len(row) < 2:\n raise custom_exceptions.DataFormatError(\n \"Incomplete or improperly formatted time \"\n \"series data file provided.\")\n return ts_data\n\n\ndef add_tsdata_to_feats_known_dict(features_already_known_list,\n ts_datafile_paths, ts_data_list):\n \"\"\"\n \"\"\"\n if ts_datafile_paths is None:\n ts_datafile_paths = [None] * len(features_already_known_list)\n elif ts_data_list is None:\n ts_data_list = [None] * len(features_already_known_list)\n for i in range(len(features_already_known_list)):\n if \"t\" not in features_already_known_list[i] or \\\n \"m\" not in features_already_known_list[i]:\n # Get TS data and put into features_already_known_list\n if ts_datafile_paths[i] is None and ts_data_list[i] is None:\n raise ValueError(\"No time series data provided! ts_datafile_paths \"\n \"is None and ts_data_list is None !!\")\n if ts_datafile_paths[i] is not None: # path to ts data file\n # parse ts data and put t,m(,e) into features_already_known\n tme = parse_tsdata_from_file(ts_datafile_paths[i])\n else: # ts_data passed directly\n tme = parse_tsdata_to_lists(ts_data_list[i])\n if len(tme) > 0:\n if all(len(this_tme) == 3 for this_tme in tme):\n T, M, E = list(zip(*tme))\n T = [float(el) for el in T]\n M = [float(el) for el in M]\n E = [float(el) for el in E]\n features_already_known_list[i][\"t\"] = T\n features_already_known_list[i][\"m\"] = M\n features_already_known_list[i][\"e\"] = E\n elif all(len(this_tme) == 2 for this_tme in tme):\n T, M = list(zip(*tme))\n T = [float(el) for el in T]\n M = [float(el) for el in M]\n features_already_known_list[i][\"t\"] = T\n features_already_known_list[i][\"m\"] = M\n else:\n raise Exception(\"custom_feature_tools.py - \"\n \"docker_extract_features() - not all elements \"\n \"of tme are the same length.\")\n\n\ndef make_tmp_dir():\n \"\"\"\n \"\"\"\n if os.path.exists(cfg.PROJECT_PATH_LINK):\n proj_path = cfg.PROJECT_PATH_LINK\n else:\n proj_path = cfg.PROJECT_PATH\n path_to_tmp_dir = os.path.join(proj_path, \"tmp\",\n str(uuid.uuid4())[:10])\n os.makedirs(path_to_tmp_dir)\n return path_to_tmp_dir\n\n\ndef generate_random_str():\n \"\"\"Generate random 10-character string using uuid.uuid4.\n \"\"\"\n return str(uuid.uuid4())[:10]\n\n\ndef copy_data_to_tmp_dir(path_to_tmp_dir, script_fpath,\n features_already_known_list):\n \"\"\"\n \"\"\"\n shutil.copy(script_fpath,\n os.path.join(path_to_tmp_dir, \"custom_feature_defs.py\"))\n with open(os.path.join(path_to_tmp_dir, \"features_already_known_list.pkl\"),\n \"wb\") as f:\n pickle.dump(features_already_known_list, f, protocol=2)\n # Create __init__.py file so that custom feats script can be imported\n open(os.path.join(path_to_tmp_dir, \"__init__.py\"), \"w\").close()\n return\n\n\ndef docker_copy(docker_client, container_id, path, target=\".\"):\n \"\"\"Copy file from docker container to host machine.\n\n Parameters\n ----------\n docker_client : docker.Client object\n The connected Docker client.\n container_id : str\n ID of the container to copy from.\n path : str\n Path to the file in the container.\n target : str\n Folder where to put the file.\n\n \"\"\"\n response = docker_client.copy(container_id, path)\n buffer = io.BytesIO()\n buffer.write(response.data)\n buffer.seek(0)\n tar = tarfile.open(fileobj=buffer, mode='r|')\n tar.extractall(path=target)\n\n\ndef extract_feats_in_docker_container(container_name, path_to_tmp_dir):\n \"\"\"\n \"\"\"\n tmp_data_dir = path_to_tmp_dir\n try:\n # Spin up Docker contain and extract custom feats\n # Instantiate Docker client\n client = get_docker_client()\n\n # Use symlink if one was created (in which case this is probably\n # being run in a Disco worker)\n if os.path.exists(cfg.PROJECT_PATH_LINK):\n proj_mount_path = cfg.PROJECT_PATH_LINK\n else:\n proj_mount_path = cfg.PROJECT_PATH\n # Create container\n cont_id = client.create_container(\n image=\"mltsp/base_disco\",\n command=\"python {}/run_script_in_container.py --{} --tmp_dir={}\".format(\n proj_mount_path, \"extract_custom_feats\", tmp_data_dir),\n tty=True,\n volumes=\"{}:{}\".format(\"\", proj_mount_path))[\"Id\"]\n\n # Start container\n client.start(cont_id,\n binds={proj_mount_path: {\"bind\": proj_mount_path,\n \"ro\": True}})\n # Wait for process to complete\n client.wait(cont_id)\n stdout = client.logs(container=cont_id, stdout=True)\n stderr = client.logs(container=cont_id, stderr=True)\n if str(stderr).strip() != \"\" and stderr != b'':\n print(\"\\n\\ndocker container stderr:\\n\\n\", str(stderr).strip(), \"\\n\\n\")\n # Copy pickled results data from Docker container to host\n docker_copy(client, cont_id, \"/tmp/results_list_of_dict.pkl\",\n target=path_to_tmp_dir)\n print(\"/tmp/results_list_of_dict.pkl copied to host machine.\")\n # Load pickled results data\n with open(os.path.join(path_to_tmp_dir, \"results_list_of_dict.pkl\"),\n \"rb\") as f:\n results_list_of_dict = pickle.load(f)\n return results_list_of_dict\n except:\n raise\n finally:\n # Kill and remove the container\n try:\n client.remove_container(container=cont_id, force=True)\n except UnboundLocalError:\n print(\"Error occurred in running Docker container.\")\n\n\ndef remove_tmp_files(path_to_tmp_dir):\n \"\"\"\n \"\"\"\n # Remove tmp dir\n shutil.rmtree(path_to_tmp_dir, ignore_errors=True)\n for tmp_file in (os.path.join(cfg.TMP_CUSTOM_FEATS_FOLDER,\n \"custom_feature_defs.py\"),\n os.path.join(cfg.TMP_CUSTOM_FEATS_FOLDER,\n \"custom_feature_defs.pyc\"),\n os.path.join(cfg.TMP_CUSTOM_FEATS_FOLDER,\n \"__init__.pyc\")):\n try:\n os.remove(tmp_file)\n except OSError:\n pass\n return\n\n\ndef docker_extract_features(\n script_fpath, features_already_known_list=[{}],\n ts_datafile_paths=None, ts_data_list=None):\n \"\"\"Extract custom features in a Docker container.\n\n Spins up a docker container in which custom script\n excecution/feature extraction is done inside. Resulting data are\n copied to host machine and returned as a dict.\n\n Parameters\n ----------\n script_fpath : str\n Path to script containing custom feature definitions.\n features_already_known_list : list of dict, optional\n List of dictionaries containing time series data (t,m,e) and\n any meta-features to be used in generating custom features.\n Defaults to []. NOTE: If omitted, or if \"t\" or \"m\" are not\n among contained dict keys, either (a) respective element of\n `ts_datafile_paths` or (b) `ts_data_list` (see below) MUST not\n be None, otherwise raises ValueError.\n ts_datafile_paths : list of str, optional\n List of paths to time-series CSV files. Defaults to None. NOTE:\n If None, either (a) corresponding element of\n `features_already_known_list` (see above) must contain \"t\"\n (time) and \"m\" (magnitude, or the measurement at each time)\n among its keys, OR (b) `ts_data_list` (see below) must be\n provided, otherwise raises ValueError.\n ts_data_list : list of list OR str, optional\n List of either (a) list of lists/tuples each containing t,m(,e)\n for each epoch, or (b) string containing equivalent comma-\n separated lines, each line being separated by a newline\n character (\"\\n\"). Defaults to None. NOTE: If None, either\n `ts_datafile_paths` must not be None or \"t\" (time) and \"m\"\n (magnitude/measurement) must be among the keys of\n respective element of `features_already_known_list` (see\n above), otherwise raisesValueError.\n\n Returns\n -------\n list of dict\n List of dictionaries of all generated features.\n\n \"\"\"\n if isinstance(features_already_known_list, dict):\n features_already_known_list = [features_already_known_list]\n add_tsdata_to_feats_known_dict(features_already_known_list,\n ts_datafile_paths, ts_data_list)\n container_name = generate_random_str()\n path_to_tmp_dir = make_tmp_dir()\n\n copy_data_to_tmp_dir(path_to_tmp_dir, script_fpath,\n features_already_known_list)\n\n try:\n results_list_of_dict = extract_feats_in_docker_container(\n container_name, path_to_tmp_dir)\n except:\n raise\n finally:\n remove_tmp_files(path_to_tmp_dir)\n return results_list_of_dict\n\n\ndef assemble_test_data():\n \"\"\"\n \"\"\"\n features_already_known_list = []\n fname = os.path.join(cfg.SAMPLE_DATA_PATH, \"dotastro_215153.dat\")\n t, m, e = parse_csv_file(fname)\n features_already_known_list.append(\n {\"t\": t, \"m\": m, \"e\": e, \"coords\": [0, 0]})\n features_already_known_list.append(\n {\"t\": [1, 2, 3], \"m\": [50, 51, 52], \"e\": [0.3, 0.2, 0.4],\n \"coords\": [-11, -55]})\n features_already_known_list.append(\n {\"t\": [1], \"m\": [50], \"e\": [0.3], \"coords\": 2})\n return features_already_known_list\n\n\ndef verify_new_script(script_fpath, docker_container=False):\n \"\"\"Test custom features script and return generated features.\n\n Performs test run on custom feature def script with trial time\n series data sets and returns list of dicts containing extracted\n features if successful, otherwise raises an exception.\n\n Parameters\n ----------\n script_fpath : str\n Path to custom feature definitions script.\n docker_container : bool, optional\n Boolean indicating whether function is being called from within\n a Docker container.\n\n Returns\n -------\n list of dict\n List of dictionaries of extracted features for each of the trial\n time-series data sets.\n\n \"\"\"\n features_already_known_list = assemble_test_data()\n\n all_extracted_features_list = []\n if docker_images_available():\n print(\"Extracting features inside docker container...\")\n all_extracted_features_list = docker_extract_features(\n script_fpath=script_fpath,\n features_already_known_list=features_already_known_list)\n else:\n print(\"Docker not installed - running custom features script could be \"\n \"unsafe. Skipping generation of custom features.\")\n return []\n return all_extracted_features_list\n\n\ndef list_features_provided(script_fpath):\n \"\"\"Parses script and returns a list of all features it provides.\n\n Parses decorator expression in custom feature definitions script,\n returning a list of all feature names generated by the various\n definitions in that script.\n\n Parameters\n ----------\n script_fpath : str\n Path to custom features definition script.\n\n Returns\n -------\n list of str\n List of feature names that the script will generate.\n\n \"\"\"\n with open(script_fpath, \"r\") as f:\n all_lines = f.readlines()\n fnames_req_prov_dict = {}\n all_required_params = []\n all_provided_params = []\n for i in range(len(all_lines) - 1):\n if \"@myFeature\" in all_lines[i] and \"def \" in all_lines[i + 1]:\n reqs_provs_1 = parse(\n \"@myFeature(requires={requires}, provides={provides})\",\n all_lines[i].strip())\n func_name = parse(\n \"def {funcname}({args}):\", all_lines[i + 1].strip())\n fnames_req_prov_dict[func_name.named['funcname']] = {\n \"requires\": eval(reqs_provs_1.named[\"requires\"]),\n \"provides\": eval(reqs_provs_1.named[\"provides\"])}\n all_required_params = list(set(\n all_required_params +\n list(set(eval(reqs_provs_1.named[\"requires\"])))))\n all_provided_params = list(set(\n all_provided_params +\n list(set(eval(reqs_provs_1.named[\"provides\"])))))\n return all_provided_params\n\n\ndef generate_custom_features(\n custom_script_path, path_to_csv=None, features_already_known={},\n ts_data=None):\n \"\"\"Generate custom features for provided TS data and script.\n\n Parameters\n ----------\n custom_script_path : str\n Path to custom features script.\n path_to_csv : str, optional\n Path to CSV file containing time-series data. Defaults to None.\n If None, ts_data (see below) must not be None, otherwise\n raises an Exception.\n features_already_known : dict, optional\n List of dicts containing any meta-features associated with\n provided time-series data. Defaults to [].\n ts_data : list OR tuple, optional\n List (or tuple) of lists (or tuples) containing time,\n measurement (and optionally associated error values) data.\n Defaults to None. If None, path_to_csv must not be None,\n otherwise raises an Exception.\n\n Returns\n -------\n list of dict\n List of dictionaries containing newly-generated features.\n\n \"\"\"\n if path_to_csv:\n t, m, e = parse_csv_file(path_to_csv)\n elif ts_data:\n if len(ts_data[0]) == 3:\n t, m, e = list(zip(*ts_data))\n if len(ts_data[0]) == 2:\n t, m = list(zip(*ts_data))\n elif \"t\" not in features_already_known or \"m\" not in features_already_known:\n print(\"predict_class.predict:\")\n print(\"path_to_csv:\", path_to_csv)\n print(\"ts_data:\", ts_data)\n raise Exception(\"Neither path_to_csv nor ts_data provided...\")\n if \"t\" not in features_already_known:\n features_already_known['t'] = t\n if \"m\" not in features_already_known:\n features_already_known['m'] = m\n if e and len(e) == len(m) and \"e\" not in features_already_known:\n features_already_known['e'] = e\n\n if is_running_in_docker():\n all_new_features = execute_functions_in_order(\n features_already_known=features_already_known,\n script_fpath=custom_script_path)\n else:\n if docker_images_available():\n print(\"Generating custom features inside docker container...\")\n all_new_features = docker_extract_features(\n script_fpath=custom_script_path,\n features_already_known_list=features_already_known)\n else:\n print(\"Generating custom features WITHOUT docker container...\")\n all_new_features = execute_functions_in_order(\n features_already_known=features_already_known,\n script_fpath=custom_script_path)\n\n return all_new_features\n","sub_path":"mltsp/custom_feature_tools.py","file_name":"custom_feature_tools.py","file_ext":"py","file_size_in_byte":27333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"141239718","text":"import os.path\nfrom os.path import basename\nimport math\nimport numpy as np\nimport pandas as pd\nimport logging\n\nfrom keras.models import Model\nfrom keras.layers import Input, Dense\nfrom keras import optimizers\n\nfrom sklearn.model_selection import train_test_split\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom dfpl import options\nfrom dfpl import history as ht\nfrom dfpl import settings\n\n\ndef define_ac_model(\n input_size: int = 2048,\n encoding_dim: int = 256,\n my_loss: str = \"binary_crossentropy\",\n my_lr: float = 0.001,\n my_decay: float = 0.01) -> (Model, Model):\n \"\"\"\n This function provides an autoencoder model to reduce a certain input to a compressed version.\n\n :param encoding_dim: Size of the compressed representation. Default: 85\n :param input_size: Size of the input. Default: 2048\n :param my_loss: Loss function, see Keras Loss functions for potential values. Default: binary_crossentropy\n :param my_lr:\n :param my_decay:\n :return: a tuple of autoencoder and encoder models\n \"\"\"\n\n ac_optimizer = optimizers.Adam(learning_rate=my_lr,\n decay=my_decay)\n\n # get the number of meaningful hidden layers (latent space included)\n hidden_layer_count = round(math.log2(input_size / encoding_dim))\n\n # the input placeholder\n input_vec = Input(shape=(input_size,))\n\n # 1st hidden layer, that receives weights from input layer\n # equals bottle neck layer, if hidden_layer_count==1!\n encoded = Dense(units=int(input_size / 2),\n activation='relu')(input_vec)\n\n if hidden_layer_count > 1:\n # encoding layers, incl. bottle neck\n for i in range(1, hidden_layer_count):\n factor_units = 2 ** (i + 1)\n # print(f'{factor_units}: {int(input_size / factor_units)}')\n encoded = Dense(units=int(input_size / factor_units),\n activation='relu')(encoded)\n\n # 1st decoding layer\n factor_units = 2 ** (hidden_layer_count - 1)\n decoded = Dense(units=int(input_size / factor_units),\n activation='relu')(encoded)\n\n # decoding layers\n for i in range(hidden_layer_count - 2, 0, -1):\n factor_units = 2 ** i\n # print(f'{factor_units}: {int(input_size/factor_units)}')\n decoded = Dense(units=int(input_size / factor_units),\n activation='relu')(decoded)\n\n # output layer\n # The output layer needs to predict the probability of an output which needs\n # to either 0 or 1 and hence we use sigmoid activation function.\n decoded = Dense(units=input_size,\n activation='sigmoid')(decoded)\n\n else:\n # output layer\n decoded = Dense(units=input_size,\n activation='sigmoid')(encoded)\n\n autoencoder = Model(input_vec, decoded)\n encoder = Model(input_vec, encoded)\n\n autoencoder.summary(print_fn=logging.info)\n encoder.summary(print_fn=logging.info)\n\n # We compile the autoencoder model with adam optimizer.\n # As fingerprint positions have a value of 0 or 1 we use binary_crossentropy as the loss function\n autoencoder.compile(optimizer=ac_optimizer,\n loss=my_loss)\n\n return autoencoder, encoder\n\n\ndef autoencoder_callback(checkpoint_path: str) -> list:\n \"\"\"\n Callbacks for fitting the autoencoder\n\n :param checkpoint_path: The output directory to store the checkpoint weight files\n :return: List of ModelCheckpoint and EarlyStopping class.\n \"\"\"\n\n # enable this checkpoint to restore the weights of the best performing model\n checkpoint = ModelCheckpoint(checkpoint_path,\n verbose=1,\n period=settings.ac_train_check_period,\n save_best_only=True,\n mode='min',\n save_weights_only=True)\n\n # enable early stopping if val_loss is not improving anymore\n early_stop = EarlyStopping(patience=settings.ac_train_patience,\n min_delta=settings.ac_train_min_delta,\n verbose=1,\n restore_best_weights=True)\n\n return [checkpoint, early_stop]\n\n\ndef train_full_ac(df: pd.DataFrame, opts: options.TrainOptions) -> Model:\n \"\"\"\n Train an autoencoder on the given feature matrix X. Response matrix is only used to\n split meaningfully in test and train data set.\n\n :param opts: Command line arguments as defined in options.py\n :param df: Pandas dataframe that contains the smiles/inchi data for training the autoencoder\n :return: The encoder model of the trained autoencoder\n \"\"\"\n\n # Set up the model of the AC w.r.t. the input size and the dimension of the bottle neck (z!)\n (autoencoder, encoder) = define_ac_model(input_size=opts.fpSize,\n encoding_dim=opts.encFPSize)\n\n # define output file for autoencoder and encoder weights\n if opts.ecWeightsFile == \"\":\n logging.info(\"No AC encoder weights file specified\")\n base_file_name = os.path.splitext(basename(opts.inputFile))[0]\n logging.info(f\"(auto)encoder weights will be saved in {base_file_name}.[auto]encoder.hdf5\")\n ac_weights_file = os.path.join(opts.outputDir, base_file_name + \".autoencoder.hdf5\")\n ec_weights_file = os.path.join(opts.outputDir, base_file_name + \".encoder.hdf5\")\n else:\n logging.info(f\"AC encoder will be saved in {opts.ecWeightsFile}\")\n base_file_name = os.path.splitext(basename(opts.ecWeightsFile))[0]\n ac_weights_file = os.path.join(opts.outputDir, base_file_name + \".autoencoder.hdf5\")\n ec_weights_file = os.path.join(opts.outputDir, opts.ecWeightsFile)\n\n # collect the callbacks for training\n callback_list = autoencoder_callback(checkpoint_path=ac_weights_file)\n\n # Select all fps that are valid and turn them into a numpy array\n # This step is crucial for speed!!!\n fp_matrix = np.array(df[df[\"fp\"].notnull()][\"fp\"].to_list(),\n dtype=settings.ac_fp_numpy_type,\n copy=settings.numpy_copy_values)\n logging.info(f\"Training AC on a matrix of shape {fp_matrix.shape} with type {fp_matrix.dtype}\")\n\n # split data into test and training data\n x_train, x_test = train_test_split(fp_matrix,\n test_size=0.2,\n random_state=42)\n logging.info(f\"AC train data shape {x_train.shape} with type {x_train.dtype}\")\n logging.info(f\"AC test data shape {x_test.shape} with type {x_test.dtype}\")\n\n auto_hist = autoencoder.fit(x_train, x_train,\n callbacks=callback_list,\n epochs=opts.epochs,\n batch_size=256,\n verbose=opts.verbose,\n validation_data=(x_test, x_test))\n logging.info(f\"Autoencoder weights stored in file: {ac_weights_file}\")\n\n ht.store_and_plot_history(base_file_name=os.path.join(opts.outputDir, base_file_name + \".AC\"),\n hist=auto_hist)\n\n encoder.save_weights(ec_weights_file)\n logging.info(f\"Encoder weights stored in file: {ec_weights_file}\")\n\n return encoder\n\n\ndef compress_fingerprints(dataframe: pd.DataFrame,\n encoder: Model) -> pd.DataFrame:\n \"\"\"\n Adds a column of the compressed version of the fingerprints to the original dataframe.\n\n :param dataframe: Dataframe containing a column named 'fp' with the fingerprints\n :param encoder: The trained autoencoder that is used for compressing the fingerprints\n :return: The input dataframe extended by a column containing the compressed version of the fingerprints\n \"\"\"\n logging.info(\"Adding compressed fingerprints\")\n idx = dataframe[dataframe[\"fp\"].notnull()].index\n fp_matrix = np.array(dataframe[dataframe[\"fp\"].notnull()][\"fp\"].to_list(),\n dtype=settings.ac_fp_numpy_type,\n copy=settings.numpy_copy_values)\n logging.info(f\"Using input matrix of shape {fp_matrix.shape} with type {fp_matrix.dtype}\")\n logging.info(\"Compressed fingerprints are added to input dataframe.\")\n dataframe['fpcompressed'] = pd.DataFrame({'fpcompressed': [s for s in encoder.predict(fp_matrix)]}, idx)\n\n return dataframe\n","sub_path":"dfpl/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":8548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"163223545","text":"import sys\r\n\r\nsys.path.append('./')\r\nimport os\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '2'\r\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\r\nos.environ['KMP_WARNINGS'] = '0'\r\nimport logging\r\n\r\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\r\n\r\nfrom src_gat_fusion2.data_loader.patient_loader_supra_hiv import PatientLoader\r\nfrom src_gat_fusion2.models.gat_fusion2 import GAT\r\nfrom src_gat_fusion2.trainers.fusion2_trainer import GraphTrainer\r\nfrom src_gat_fusion2.utils.config import get_config_from_json, update_config_by_summary, update_config_by_datasize\r\nfrom src_gat_fusion2.utils.dirs import create_dirs\r\nfrom src_gat_fusion2.utils.logger import Logger\r\nfrom src_gat_fusion2.utils.utils import get_args\r\nfrom pathlib import Path\r\nimport shutil\r\nimport pickle as pkl\r\n\r\nimport tensorflow as tf\r\n\r\ntf.compat.v1.random.set_random_seed(1234)\r\n\r\n\r\ndef main():\r\n # capture the config path from the run arguments\r\n # then process the json configuration file\r\n\r\n args = get_args()\r\n print(\"getting config from {}\".format(args.config))\r\n config, _ = get_config_from_json(args.config)\r\n config = update_config_by_summary(config) # add summary and model directory\r\n # if remove the previous results, set -d 1\r\n print(\"If delete previous checkpoints {}\".format(args.delete))\r\n if args.delete == '1':\r\n # delete existing model and summaries\r\n print('Deleting existing models and logs from:')\r\n # best_model_dir is under model dir\r\n print(config.summary_dir, config.model_dir, config.best_model_dir)\r\n path = Path(config.summary_dir)\r\n shutil.rmtree(path)\r\n path = Path(config.model_dir)\r\n shutil.rmtree(path)\r\n path = Path(config.best_model_dir)\r\n shutil.rmtree(path)\r\n\r\n # create the experiments dirs\r\n # summary dir, model dir defined in json ?\r\n create_dirs([config.summary_dir, config.model_dir, config.best_model_dir])\r\n\r\n # create your data generator to load train data\r\n\r\n print(\"Training using {}\".format(config.model_version))\r\n\r\n Model = GAT\r\n Trainer = GraphTrainer\r\n\r\n feature_path = config.exp_dir + config.ind_feature_path\r\n train_mask_path = config.exp_dir + config.train_mask_path\r\n test_mask_path = config.exp_dir + config.test_mask_path\r\n sex_adj_path = config.sex_adj_path\r\n venue_adj_path = config.venue_adj_path\r\n graph_feature_path = config.exp_dir + config.graph_feature_path\r\n psk2index_path = config.exp_dir + config.psk2index_path\r\n\r\n # 10 random realizations of train-test split and average, no valid is needed\r\n train_loader = PatientLoader(config, feature_path, sex_adj_path, venue_adj_path, train_mask_path,\r\n graph_feature_path, psk2index_path, is_train=True)\r\n train_loader.load()\r\n\r\n test_loader = PatientLoader(config, feature_path, sex_adj_path, venue_adj_path, test_mask_path, graph_feature_path,\r\n psk2index_path, is_train=False)\r\n test_loader.load()\r\n\r\n # add num_iter_per_epoch to config for trainer\r\n config = update_config_by_datasize(config, train_loader.get_datasize(),\r\n test_loader.get_datasize(),\r\n train_loader.get_feature_size())\r\n\r\n # tfconfig = tf.ConfigProto(device_count={'CPU': 0})\r\n tfconfig = tf.ConfigProto()\r\n tfconfig.gpu_options.allow_growth = True\r\n tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.4\r\n\r\n # create tensorflow session\r\n with tf.Session(config=tfconfig) as sess:\r\n # create an instance of the model you want\r\n model = Model(config)\r\n # create tensorboard logger\r\n logger = Logger(sess, config)\r\n # create trainer and pass all the previous components to it\r\n trainer = Trainer(sess, model, train_loader, test_loader, config, logger)\r\n # load model if exists\r\n # model.load(sess)\r\n # here you train your model\r\n trainer.train()\r\n\r\n # tester\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"src_gat_fusion2/mains/fusion2_main_for_hiv.py","file_name":"fusion2_main_for_hiv.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"512568131","text":"import unittest\nfrom lost_hat_login_tests import LostHatLoginTests\nfrom lost_hat_front_page_tests import LostHatFrontPageTests\n\n\ndef sanity_suite():\n test_suite = unittest.TestSuite()\n test_suite.addTest(LostHatLoginTests('test_logging_positive'))\n test_suite.addTest(unittest.makeSuite(LostHatFrontPageTests))\n return test_suite\n\n\nif __name__ == '__main__':\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(sanity_suite())\n","sub_path":"pt4_selenium_tests/testsuite_sanity_tests.py","file_name":"testsuite_sanity_tests.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"184159351","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019-11-07 09:40\n# @Author : Lqq/linqingqing\n# @Site : \n# @File : web_chinaz.py\n# @Software: PyCharm\n\nfrom urllib.parse import urlencode\n\nclass WebUrl:\n def __init__(self):\n # 设置请求头,模拟浏览器访问\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',\n }\n\n def get_url(self, keyword, page):\n data = {\n 'wd': 'site:' + keyword, # 修改关键字\n 'pn': page * 10, # 页数\n }\n # 把字典对象转化为url的请求参数\n url = 'https://www.baidu.com/s?' + urlencode(data)\n return url\n\nif __name__ == \"__main__\":\n WebUrl().get_web()","sub_path":"util/web_subdomain.py","file_name":"web_subdomain.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"178071316","text":"import inspect\nimport os\nimport scipy.io as sio\nimport subprocess\nimport xarray as xr\n\nfrom pathlib import Path\n\nimport aurora\nimport mt_metadata\n\ninit_file = inspect.getfile(aurora)\nAURORA_PATH = Path(init_file).parent.parent\nTEST_PATH = AURORA_PATH.joinpath(\"tests\")\nSANDBOX = AURORA_PATH.joinpath(\"aurora\", \"sandbox\")\nCONFIG_PATH = AURORA_PATH.joinpath(\"aurora\", \"config\")\nBAND_SETUP_PATH = CONFIG_PATH.joinpath(\"emtf_band_setup\")\nDATA_PATH = SANDBOX.joinpath(\"data\")\nDATA_PATH.mkdir(exist_ok=True, parents=True)\nFIGURES_PATH = DATA_PATH.joinpath(\"figures\")\nFIGURES_PATH.mkdir(exist_ok=True, parents=True)\nTEST_BAND_FILE = DATA_PATH.joinpath(\"bandtest.nc\")\nmt_metadata_init = inspect.getfile(mt_metadata)\nMT_METADATA_DATA = Path(mt_metadata_init).parent.parent.joinpath(\"data\")\n\n\ndef execute_subprocess(cmd, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n cmd : string\n command as it would be typed in a terminal\n kwargs\n\n Returns\n -------\n\n \"\"\"\n \"\"\"\n A wrapper for subprocess.call\n \"\"\"\n exit_status = subprocess.call([cmd], shell=True, **kwargs)\n if exit_status != 0:\n raise Exception(\"Failed to execute \\n {}\".format(cmd))\n return\n\n\ndef execute_command(cmd, **kwargs):\n \"\"\"\n Executes command in terminal from script.\n\n Parameters:\n cmd (str): command to exectute from a terminal\n kwargs: exec_dir (str): the directory from which to execute\n kwargs: no_exception: suppress output if exception\n\n Other Parameters:\n exit_status: :code:`0` is good, otherwise there is some problem\n\n .. note:: When executing :code:`rm *` this crashes if the directory we are removing\n from is empty\n\n .. note:: if you can you should probably use execute_subprocess() instead\n \"\"\"\n exec_dir = kwargs.get(\"exec_dir\", os.path.expanduser(\"~/\"))\n allow_exception = kwargs.get(\"allow_exception\", True)\n print(\"executing from {}\".format(exec_dir))\n cwd = os.getcwd()\n os.chdir(exec_dir)\n exit_status = os.system(cmd)\n if exit_status != 0:\n print(f\"exit_status of {cmd} = {exit_status}\")\n if allow_exception:\n raise Exception(f\"Failed to successfully execute \\n {cmd}\")\n os.chdir(cwd)\n\n\n# \ndef save_complex(data_array, *args, **kwargs):\n \"\"\"\n netcdf and h5 do not handle complex values. This method is a workaround.\n https://stackoverflow.com/questions/47162983/how-to-save-xarray-dataarray-with-complex128-data-to-netcdf\n Example Usage:\n band_da is an xarray\n save_complex(band_da, TEST_BAND_FILE)\n band_da = read_complex(TEST_BAND_FILE)\n\n Parameters\n ----------\n data_array\n args\n kwargs\n\n Returns\n -------\n\n \"\"\"\n ds = xr.Dataset({\"real\": data_array.real, \"imag\": data_array.imag})\n return ds.to_netcdf(*args, **kwargs)\n\n\ndef read_complex(*args, **kwargs):\n ds = xr.open_dataset(*args, **kwargs)\n return ds[\"real\"] + ds[\"imag\"] * 1j\n\n\n# \n\n\ndef save_to_mat(data, variable_name, filename):\n \"\"\"\n Example Usage:\n x = X.to_array(dim=\"channel\")\n save_to_mat(x.data, \"x\", \"x.mat\")\n\n Reading into matlab or Octave:\n tmp = load(\"x.mat\");\n data = tmp.x;\n\n Parameters\n ----------\n data : numpy array\n the data to save to file. its fine if this is complex-valued.\n variable_name : string\n The name that we use to reference the variable within the struct in the matfile.\n filename : string\n The filepath to output\n\n Returns\n -------\n\n \"\"\"\n sio.savemat(filename, {variable_name: data})\n return\n","sub_path":"aurora/general_helper_functions.py","file_name":"general_helper_functions.py","file_ext":"py","file_size_in_byte":3618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"303777303","text":"# -*- coding: utf-8 -*-\n# @Time : Thu Mar 7 14:52:23 2019\n# @Author : Yao Qiang\n# @Email : qiangyao1988wsu@gmail.com\n# @File : TrainSet.py\n# @Software: Spyder\n# @Pythpon Version: python3.6\n\n\nimport numpy as np\nimport torch\nimport torch.utils.data as data\n\n\nclass TrainSet(data.Dataset):\n '''\n Create data loader\n '''\n def __init__(self, eval=False):\n \n # load data and label\n datas = np.load('../dataset/data.npy')\n labels = np.load('../dataset/label.npy')\n \n index = np.arange(0, len(datas), 1, dtype=np.int)\n \n # set random seed to make sure everytime we get the same subset\n np.random.seed(123)\n np.random.shuffle(index)\n \n # if eval is true, get 10% of data as cross validation dataset\n if eval:\n index = index[:int(len(datas) * 0.1)]\n else:\n index = index[int(len(datas) * 0.1):]\n \n self.data = datas[index]\n self.label = labels[index]\n np.random.seed()\n\n def __getitem__(self, index):\n return torch.from_numpy(self.data[index]),torch.from_numpy(self.label[index])\n\n def __len__(self):\n return len(self.data)\n ","sub_path":"scripts/TrainSet.py","file_name":"TrainSet.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"205528741","text":"from nnf import Var,true\nfrom lib204 import Encoding\n\nfrom nnf import NNF\nfrom nnf.operators import iff\n\ndef implication(l, r):\n return l.negate() | r\n\ndef neg(f):\n return f.negate()\n\nNNF.__rshift__ = implication\nNNF.__invert__ = neg\n\ndef iff(left, right):\n return (left.negate() | right) & (right.negate() | left)\n\n# Variable Declerations of the three possible outcomes in a model of Connect Four\nBlackWin = Var(\"Black has Won the Game\")\nRedWin = Var(\"Red has Won the Game\")\nNoWin = Var(\"No one has Won the Game\")\n\n# ConnectFour Game Board Dimestions\nrowNum = 6\ncolumnNum = 7\n\n# Creating variable boards for each color peice, empty peice, and partialCount variables. \nblackBoard=[]\nredBoard=[]\nemptyBoard=[]\nblackPartialCount=[]\nredPartialCount=[]\nfor i in range(rowNum): \n blackBoard.append([])\n redBoard.append([])\n emptyBoard.append([])\n blackPartialCount.append([])\n redPartialCount.append([])\n for j in range(columnNum):\n blackBoard[i].append(Var(f\"Black({i},{j})\"))\n redBoard[i].append(Var(f\"Red({i},{j})\"))\n emptyBoard[i].append(Var(f\"Empty({i},{j})\"))\n blackPartialCount[i].append([])\n redPartialCount[i].append([])\n for k in range(rowNum * columnNum + 1):\n blackPartialCount[i][j].append(Var(f\"Black Count at({i},{j}) is {k}\"))\n redPartialCount[i][j].append(Var(f\"Red Count at({i},{j}) is {k}\"))\n\n# Creating total piece count varaiable board\ntotalCount=[] \nfor i in range(rowNum * columnNum + 1):\n totalCount.append(Var(f\"Total Black Count is {i}\"))\n\n# Creating red and black row wins varaiable boards\nblackRow=[]\nredRow=[]\nfor i in range(rowNum): \n blackRow.append([])\n redRow.append([])\n for j in range(columnNum - 3):\n blackRow[i].append(Var(f\"BlackWinningRow({i},{j})\"))\n redRow[i].append(Var(f\"RedWinningRow({i},{j})\"))\n\n\n# Creating red and black diagonal wins varaiable boards\nleftBlackDiagonal=[]\nrightBlackDiagonal=[]\nleftRedDiagonal=[]\nrightRedDiagonal=[]\nfor i in range(rowNum- 3): \n leftBlackDiagonal.append([])\n rightBlackDiagonal.append([])\n leftRedDiagonal.append([])\n rightRedDiagonal.append([])\n for j in range(columnNum - 3):\n leftBlackDiagonal[i].append(Var(f\"LeftBlackWinningDiagonal({i},{j})\"))\n rightBlackDiagonal[i].append(Var(f\"RightBlackWinningDiagonal({i},{j})\"))\n leftRedDiagonal[i].append(Var(f\"LeftRedWinningDiagonal({i},{j})\"))\n rightRedDiagonal[i].append(Var(f\"RightRedWinningDiagonal({i},{j})\"))\n\n# Creating red and black column wins varaiable boards\nblackColumn=[]\nredColumn=[]\nfor i in range(rowNum- 3): \n blackColumn.append([])\n redColumn.append([])\n for j in range(columnNum):\n blackColumn[i].append(Var(f\"BlackWinningColumn({i},{j})\"))\n redColumn[i].append(Var(f\"RedWinningColumn({i},{j})\"))\n\n# Adds/creates constriants for a row of color, boardColor \ndef rowWin(E, winRowColor, boardColor):\n for i in range(rowNum):\n for j in range(columnNum - 3):\n #Winning row and its position of either 4 red or 4 black slots within the row. \n E.add_constraint(iff(winRowColor[i][j], (boardColor[i][j] & boardColor[i][j + 1] & boardColor[i][j + 2] & boardColor[i][j + 3]))) \n\n #Checks that there is at least one possible route to play in order to win by a row unless top row\n if (i > 0):\n E.add_constraint(winRowColor[i][j] >> (emptyBoard[i-1][j] | emptyBoard[i-1][j + 1] | emptyBoard[i-1][j + 2] | emptyBoard[i-1][j + 3]))\n\n\n #Checks that only a single row channel can be a winning row channel\n special = winRowColor[i][j]\n false = ~true\n for i2 in range(rowNum):\n for j2 in range(columnNum - 3):\n if (i != i2):\n false |= winRowColor[i2][j2]\n E.add_constraint(special >> ~false)\n return E\n \n# Adds/creates constriants for a column of color, boardColor\ndef columnWin(E, winColumnColor, boardColor):\n for i in range(rowNum - 3):\n for j in range(columnNum):\n # Winning column and its position of either 4 red or 4 black slots within the column. \n E.add_constraint(iff(winColumnColor[i][j], (boardColor[i][j] & boardColor[i+1][j] & boardColor[i+2][j] & boardColor[i+3][j])))\n\n # Checks that there is a possible route to play in order to win by a column unless top row\n if (i > 0):\n E.add_constraint(winColumnColor[i][j] >> (emptyBoard[i-1][j]))\n \n #Checks that only one column can win\n special = winColumnColor[i][j]\n false = ~true\n for i2 in range(rowNum - 3):\n for j2 in range(columnNum):\n if (i != i2) | (j != j2):\n false |= winColumnColor[i2][j2]\n E.add_constraint(special >> ~false)\n return E\n\n# Adds/creates contraints that limits the position where a color's column can be, \n# if other rows or diagonals are also true for that same color. \ndef columnRules(E, winColumnColor, winRowColor, leftWinDiagonalColor, rightWinDiagonalColor):\n for i in range(rowNum - 3):\n for j in range(columnNum):\n special = winColumnColor[i][j]\n false = ~true\n for i2 in range(rowNum):\n for j2 in range(columnNum):\n if (j2 < columnNum - 3):\n if ((i != i2) | ((j - j2) >= 4)):\n false |= winRowColor[i2][j2] # Last piece of color's column must be somewhere in color's row\n if (j2 < (columnNum - 3)):\n if (i2 < rowNum - 3):\n if (((i != i2) | (j != j2)) & ((i-1 != i2) | (j-1 != j2)) & ((i-2 != i2) | (j-2 != j2)) & ((i-3 != i2) | (j-3 != j2))):\n false |= leftWinDiagonalColor[i2][j2] # Last piece of color's column must be somewhere in color's left diagonal\n\n if (((i != i2) | (j != j2 + 3)) & ((i-1 != i2) | (j+1 != j2 + 3)) & ((i-2 != i2) | (j+2 != j2 + 3)) & ((i-3 != i2) | (j+3 != j2 + 3))):\n false |= rightWinDiagonalColor[i2][j2] # Last piece of color's column must be somewhere in color's right diagonal\n E.add_constraint(special >> ~false)\n return E\n\n# Adds/creates constriants for a right facing diagonal of color, boardColor\ndef leftDiagonalWin(E, leftWinDiagonalColor, boardColor):\n for i in range(rowNum - 3):\n for j in range(columnNum - 3):\n\n #Winning diagonal going right and down.\n E.add_constraint(iff(leftWinDiagonalColor[i][j], (boardColor[i][j] & boardColor[i+1][j+1] & boardColor[i+2][j+2] & boardColor[i+3][j+3])))\n\n # Checks that there is a possible route to play in order to win by a left diagonal unless top row\n if (i > 0):\n E.add_constraint(leftWinDiagonalColor[i][j] >> (emptyBoard[i-1][j] | emptyBoard[i][j+1] | emptyBoard[i+1][j+2] | emptyBoard[i+2][j+3])) \n \n #Only one left facing diagonal channel can be a winning diagonal channel\n special = leftWinDiagonalColor[i][j]\n false = ~true\n for i2 in range(rowNum - 3):\n for j2 in range(columnNum - 3):\n if (i != i2) | (j != j2):\n if (i + 1 != i2) | (j + 1 != j2):\n if (i + 2 != i2) | (j + 2 != j2):\n if (i - 1 != i2) | (j - 1 != j2):\n if (i - 2 != i2) | (j - 2 != j2):\n false |= leftWinDiagonalColor[i2][j2]\n E.add_constraint(special >> ~false)\n return E\n\n# Adds/creates constriants for a right facing diagonal of color, boardColor\ndef rightDiagonalWin(E, rightWinDiagonalColor, boardColor):\n for i in range(rowNum - 3):\n for j in range(columnNum - 4, columnNum):\n #Winning diagonal going left and down.\n E.add_constraint(iff(rightWinDiagonalColor[i][j-3], (boardColor[i][j] & boardColor[i+1][j-1] & boardColor[i+2][j-2] & boardColor[i+3][j-3])))\n\n # Checks that there is a possible route to play in order to win by a right diagonal unless top row\n if (i > 0):\n E.add_constraint(rightWinDiagonalColor[i][j-3] >> (emptyBoard[i-1][j] | emptyBoard[i][j-1] | emptyBoard[i+1][j - 2] | emptyBoard[i+2][j - 3]))\n\n # Only one right facing diagonal channel can be a winning diagonal channel\n special = rightWinDiagonalColor[i][j-3]\n false = ~true\n for i2 in range(rowNum - 3):\n for j2 in range(columnNum - 4, columnNum):\n if (i != i2) | (j != j2):\n if (i - 1 != i2) | (j + 1 != j2):\n if (i - 2 != i2) | (j + 2 != j2):\n if (i + 1 != i2) | (j - 1 != j2):\n if (i + 2 != i2) | (j - 2 != j2):\n false |= rightWinDiagonalColor[i2][j2-3]\n E.add_constraint(special >> ~false)\n return E\n\n# Add constriants to check if all occupied position below current is occupied position\ndef gravityRule(i, j):\n f = true\n for slot in range(i + 1, rowNum):\n f &= ~emptyBoard[slot][j]\n return f\n\n# Holds constraints/rules that make up a valid connect four board.\ndef validBoard(E):\n for i in range(rowNum):\n for j in range(columnNum): \n\n # If position(i, j) is empty, then neither black or red can occupy position(i, j).\n E.add_constraint(emptyBoard[i][j] >> (~redBoard[i][j] & ~blackBoard[i][j]))\n\n # If position(i, j) is red, then neither black and empty can occupy position(i, j)\n # Calls gravity constraint\n E.add_constraint(redBoard[i][j] >> (~blackBoard[i][j] & ~emptyBoard[i][j] & gravityRule(i, j)))\n\n # If position(i, j) is black, then neither red and empty can occupy position(i, j)\n # Calls gravity constraint\n E.add_constraint(blackBoard[i][j] >> (~redBoard[i][j] & ~emptyBoard[i][j] & gravityRule(i, j)))\n\n # Here to make sure implication works properly above, exactly one has to be true.\n E.add_constraint(emptyBoard[i][j] | redBoard[i][j] | blackBoard[i][j])\n\n # General: ColorWin if and only if there is ColorRowWin, or \n # ColorColumnWin, or ColorDiagonalWin.\n E.add_constraint(iff(BlackWin, (BlackColumnWin() | BlackRowWin() | leftBlackDiagonalWin() | rightBlackDiagonalWin())))\n E.add_constraint(iff(RedWin, (RedColumnWin() | RedRowWin() | leftRedDiagonalWin() | rightRedDiagonalWin())))\n\n # General: NoWin if and only if there is notColorRowWin, and \n # notColorColumnWin, and notColorDiagonalWin.\n E.add_constraint(iff(NoWin, ((~BlackColumnWin() & ~BlackRowWin() & ~leftBlackDiagonalWin() & ~rightBlackDiagonalWin()) & (~RedColumnWin() & ~RedRowWin() & ~leftRedDiagonalWin() & ~rightRedDiagonalWin()))))\n \n #All posibilities of Connect Four Game outcome\n E.add_constraint(iff(BlackWin, (~RedWin & ~NoWin)))\n E.add_constraint(iff(RedWin, (~BlackWin & ~NoWin)))\n E.add_constraint(iff(NoWin, (~RedWin & ~BlackWin)))\n\n return E\n\n# Checks if any black row is true, return false if one is not found\ndef BlackRowWin():\n f = ~true\n for i in range(rowNum): \n for j in range(columnNum - 3):\n f |= blackRow[i][j]\n return f\n\n# Checks if any red row is true, return false if one is not found\ndef RedRowWin():\n f = ~true\n for i in range(rowNum): \n for j in range(columnNum - 3):\n f |= redRow[i][j]\n return f \n\n# Checks if any black column is true, return false if one is not found\ndef BlackColumnWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum):\n f |= blackColumn[i][j]\n return f \n\n# Checks if any red column is true, return false if one is not found\ndef RedColumnWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum):\n f |= redColumn[i][j]\n return f \n\n# Checks if any left black diagonal is true, return false if one is not found\ndef leftBlackDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= leftBlackDiagonal[i][j]\n return f\n\n# Checks if any right black diagonal is true, return false if one is not found\ndef rightBlackDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= rightBlackDiagonal[i][j]\n return f\n\n# Checks if any left red diagonal is true, return false if one is not found\ndef leftRedDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= leftRedDiagonal[i][j]\n return f \n\n# Checks if any right red diagonal is true, return false if one is not found\ndef rightRedDiagonalWin():\n f = ~true\n for i in range(rowNum- 3): \n for j in range(columnNum - 3):\n f |= rightRedDiagonal[i][j]\n return f \n\n\n# Prints a Connect Four board using computer assigned values from .solve dictionary\ndef printBoard(dic):\n board=[]\n for i in range(rowNum): \n board.append([])\n for j in range(columnNum):\n board[i].append(\"-\")\n if dic == None:\n print(\"NonSatisfiable Board\")\n return []\n else:\n for key, value in dic.items():\n if (key[:6] == \"Black(\") and (value == True):\n xVal = int(key[-4])\n yVal = int(key[-2])\n board[xVal][yVal] = \"B\"\n elif (key[:4] == \"Red(\") and (value == True):\n xVal = int(key[-4])\n yVal = int(key[-2])\n board[xVal][yVal] = \"R\"\n elif (key == \"Black has Won the Game\") and (value == True):\n print(\"Black has Won the Game with:\")\n elif (key == \"Red has Won the Game\") and (value == True):\n print(\"Red has Won the Game with:\")\n elif (key == \"No one has Won the Game\") and (value == True):\n print(\"No one has Won the Game!\")\n for row in board:\n print(row)\n\n# Builds an example full theory of Connect Four for our setting and returns it.\ndef connectFour():\n E = Encoding()\n E = validBoard(E)\n E = rowWin(E, blackRow, blackBoard)\n E = rowWin(E, redRow, redBoard)\n E = columnWin(E, blackColumn, blackBoard)\n E = columnWin(E, redColumn, redBoard)\n E = leftDiagonalWin(E, leftBlackDiagonal, blackBoard)\n E = leftDiagonalWin(E, leftRedDiagonal, redBoard)\n E = rightDiagonalWin(E, rightBlackDiagonal, blackBoard)\n E = rightDiagonalWin(E, rightRedDiagonal, redBoard)\n E = columnRules(E, blackColumn, blackRow, leftBlackDiagonal, rightBlackDiagonal)\n E = columnRules(E, redColumn, redRow, leftRedDiagonal, rightRedDiagonal)\n E = sameCount(E, blackPartialCount, blackBoard)\n E = sameCount(E, redPartialCount, redBoard)\n return E\n\n# Counting the number of peices of a single color,\n# and making the number of black peices equal to the number of red peices on the board.\ndef sameCount(E, partialCount, boardColor):\n\n # Final partial counts should be equal to the full count\n for c in range(rowNum * columnNum + 1):\n E.add_constraint(iff(totalCount[c], partialCount[rowNum- 1][columnNum - 1][c]))\n\n # You can't have more pieces than you've already seen\n for i in range(rowNum):\n for j in range(columnNum):\n for c in range((i * 7) + j + 2,rowNum * columnNum + 1):\n E.add_constraint(~partialCount[i][j][c])\n\n # First index: only black piece or red piece could possibly be true\n E.add_constraint(iff(partialCount[0][0][0], ~boardColor[0][0]))\n E.add_constraint(iff(partialCount[0][0][1], boardColor[0][0]))\n\n #General pattern: Looks at the other color pieces to decide the current color piece.\n for x in range(1, rowNum * columnNum):\n i = x // columnNum\n j = x % columnNum\n E.add_constraint(iff(partialCount[i][j][0], partialCount[(i-1) if (j==0) else i][(columnNum-1) if (j==0) else (j-1)][0] & ~boardColor[i][j]))\n for c in range(1,x+2):\n increased = partialCount[(i-1) if (j==0) else i][(columnNum-1) if (j==0) else (j-1)][c-1] & boardColor[i][j]\n stay_same = partialCount[(i-1) if (j==0) else i][(columnNum-1) if (j==0) else (j-1)][c] & ~boardColor[i][j]\n E.add_constraint(iff(partialCount[i][j][c], increased | stay_same))\n return E\n\n# Function exploring Black wins in our model of Connect Four.\ndef numBlackWins(E):\n E.add_constraint(BlackWin)\n return E.count_solutions()\n\n# Function exploring Red wins in our model of Connect Four.\ndef numRedWins(E):\n E.add_constraint(RedWin)\n return E.count_solutions()\n\n# Function exploring No wins in our model of Connect Four.\ndef numNoWins(E):\n E.add_constraint(NoWin)\n return E.count_solutions()\n\nif __name__ == \"__main__\":\n\n E = connectFour()\n\n print(\"\\nSatisfiable: %s\" % E.is_satisfiable())\n\n # Uncomment if wanting to explore number of Black wins in our model of ConnectFour\n #print(\"# Solutions: %d\" % numBlackWins())\n\n # Uncomment if wanting to explore number of Red wins in our model of ConnectFour\n #print(\"# Solutions: %d\" % numRedWins())\n\n # Uncomment if wanting to explore number of No wins in our model of ConnectFour\n #print(\"# Solutions: %d\" % numNoWins())\n\n dic = E.solve()\n print(\" Solution: %s \\n\" % dic)\n printBoard(dic)\n \n # print(\"\\nVariable likelihoods:\")\n # print(\" %s: %.2f\" % (BlackWin, E.likelihood(BlackWin)))\n # print()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":16600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"346071114","text":"import logging\nimport json\nfrom flask import (flash,redirect,send_file,jsonify,make_response,url_for,session,abort)\nfrom ._compat import as_unicode\nfrom .filemanager import uuid_originalname\nfrom .widgets import GroupFormListWidget,ListMasterWidget\nfrom .baseviews import BaseView,BaseCRUDView,BaseFormView,expose,expose_api\nfrom .security.decorators import has_access,permission_name,has_access_api\nfrom .urltools import *\nfrom .const import FLAMSG_ERR_SEC_ACCESS_DENIED\n\nlog = logging.getLogger(__name__)\n\n\nclass IndexView(BaseView):\n route_base = ''\n default_view = 'index'\n index_template = 'appbuilder/index.html'\n\n @expose('/')\n def index(self):\n self.update_redirect()\n return self.render_template(self.index_template,appbuilder=self.appbuilder)\n\n\nclass UtilView(BaseView):\n route_base = ''\n default_view = 'back'\n\n @expose('/back')\n def back(self):\n return redirect(self.get_redirect())\n\n\nclass SimpleFormView(BaseFormView):\n @expose(\"/form\",methods=['GET'])\n @has_access\n def this_form_get(self):\n self._init_vars()\n form = self.form.refresh()\n\n self.form_get(form)\n widgets = self._get_edit_widget(form=form)\n self.update_redirect()\n return self.render_template(self.form_template,\n title = self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n @expose(\"/form\",methods=['POST'])\n @has_access\n def this_form_post(self):\n self._init_vars()\n form = self.form.refresh()\n\n if form.validate_on_submit():\n response = self.form_post(form)\n if not response:\n return redirect(self.get_redirect())\n return response\n else:\n widgets = self._get_edit_widget(form=form)\n return self.render_template(self.form_template,\n title=self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n\nclass PublicFormView(BaseFormView):\n\n @expose(\"/form\",methods=['GET'])\n def this_form_get(self):\n self._init_vars()\n form = self.form.refresh()\n self.form_get(form)\n widgets = self._get_edit_widget(form=form)\n self.update_redirect()\n return self.render_template(self.form_template,\n title=self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n @expose(\"/form\",methods=['POST'])\n def this_form_post(self):\n self._init_vars()\n form = self.form.refresh()\n if form.validate_on_submit():\n response = self.form_post(form)\n if not response:\n return redirect(self.get_redirect())\n return response\n else:\n widgets = self._get_edit_widget(form=form)\n return self.render_template(self.form_template,\n title=self.form_title,\n widgets=widgets,\n appbuilder=self.appbuilder\n )\n\n\nclass RestCRUDView(BaseCRUDView):\n \"\"\"\n This class view exposes REST method for CRUD operations on you models\n \"\"\"\n\n def _search_form_json(self):\n pass\n\n def _get_api_urls(self,api_urls=None):\n\n view_name = self.__class__.__name__\n api_urls = api_urls or {}\n api_urls['read'] = url_for(view_name + \".api_read\")\n api_urls['delete'] = url_for(view_name + \".api_delete\",pk=\"\")\n api_urls['create'] = url_for(view_name + \".api_create\")\n api_urls['update'] = url_for(view_name + \".api_update\",pk=\"\")\n return api_urls\n\n def _get_modelview_urls(self,modelview_urls=None):\n view_name = self.__class__.__name__\n modelview_urls = modelview_urls or {}\n modelview_urls['show'] = url_for(view_name + \".show\",pk=\"\")\n modelview_urls['add'] = url_for(view_name + \".add\")\n modelview_urls['edit'] = url_for(view_name + \".edit\",pk=\"\")\n return modelview_urls\n\n @expose('/api',methods=['GET'])\n @has_access_api\n @permission_name('list')\n def api(self):\n view_name = self.__class__.__name__\n api_urls = self._get_api_urls()\n modelview_urls = self._get_modelview_urls()\n\n #Collects the CRUD permissions\n can_show = self.appbuilder.sm.has_access('can_show',view_name)\n can_edit = self.appbuilder.sm.has_access('can_edit',view_name)\n can_add = self.appbuilder.sm.has_access('can_add',view_name)\n can_delete = self.appbuilder.sm.has_access('can_delete',view_name)\n\n #Prepares the form with the search fields make it JSON serializable\n form_fields = {}\n search_filters = {}\n dict_filters = self._filters.get_search_filters()\n form = self.search_form.refresh()\n for col in self.search_columns:\n form_fields[col] = form[col]()\n search_filters[col] = [as_unicode(flt.name) for flt in dict_filters[col]]\n\n ret_json = jsonify(can_show=can_show,\n can_add=can_add,\n can_edit=can_edit,\n can_delete=can_delete,\n label_columns=self._label_columns_json(),\n list_columns=self.list_columns,\n order_columns=self.order_columns,\n page_size=self.page_size,\n modelview_name=view_name,\n api_urls=api_urls,\n search_filters=search_filters,\n search_fields=form_fields,\n modelview_urls=modelview_urls)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='read',url='/api/read',methods=['GET'])\n @has_access_api\n @permission_name('list')\n def api_read(self):\n\n #Get arguments for ordering\n if get_order_args().get(self.__class__.__name__):\n order_column,order_direction = get_order_args().get(self.__cllass__.__name__)\n else:\n order_column,order_direction = '',''\n page = get_page_args().get(self.__class__.__name__)\n page_size = get_page_size_args().get(self.__class__.__name__)\n get_filter_args(self._filters)\n joined_filters = self._filters.get_joined_filters(self._base_filters)\n count,lst = self.datamodel.query(joined_filters,order_column,order_direction,page=page,page_size=page_size)\n result = self.datamodel.get_values_json(lst,self.list_columns)\n pks = self.datamodel.get_keys(lst)\n ret_json = jsonify(label_columns=self._label_columns_json(),\n list_columns=self.list_columns,\n order_columns=self.order_columns,\n page=page,\n page_size=page_size,\n count=count,\n modelview_name=self.__class__.__name__,\n pks=pks,\n result=result)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='get',url='/api/get/',methods=['GET'])\n @has_access_api\n @permission_name('show')\n def api_get(self,pk):\n\n item = self.datamodel.get(pk,self._base_filters)\n if not item:\n abort(404)\n _item = dict()\n for col in self.show_columns:\n _item[col] = str(getattr(item,col))\n\n ret_json = jsonify(pk=pk,\n label_columns=self._label_columns_json(),\n include_columns=self.show_columns,\n modelview_name=self.__class__.__name__,\n result=_item)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='create',url='/api/create',methods=['POST'])\n @has_access_api\n @permission_name('add')\n def api_create(self):\n is_valid_form = True\n get_filter_args(self._filters)\n exclude_cols = self._filters.get_relation_cols()\n form = self.add_form.refresh()\n\n self._fill_form_exclude_cols(exclude_cols,form)\n if form.validate():\n item = self.datamodel.obj()\n form.populate_obj(item)\n self.pre_add(item)\n if self.datamodel.add(item):\n self.post_add(item)\n http_return_code = 200\n else:\n http_return_code = 500\n else:\n is_valid_form = False\n\n if is_valid_form:\n response = make_response(jsonify({'message': self.datamodel.message[0],\n 'severity': self.datamodel.message[1]}), http_return_code)\n else:\n # TODO return dict with errors\n response = make_response(jsonify({'message': 'Invalid form',\n 'severity': 'warning'}), 500)\n return response\n\n @expose_api(name='update',url='/api/update/',methods=['PUT'])\n @has_access_api\n @permission_name('edit')\n def api_update(self,pk):\n is_valid_form = True\n get_filter_args(self._filters)\n exclude_cols = self._filters.get_relation_cols()\n\n item = self.datamodel.get(pk,self._base_filters)\n if not item:\n abort(404)\n pk = self.datamodel.get_pk_value(item)\n\n form = self.edit_form.refresh(request.form)\n self._fill_form_exclude_cols(exclude_cols,form)\n form._id = pk\n if form.validate():\n form.populate_obj(item)\n self.pre_update(item)\n if self.datamodel.edit(item):\n self.post_update(item)\n http_return_code = 200\n else:\n http_return_code = 500\n else:\n is_valid_form = False\n if is_valid_form:\n response = make_response(jsonify({'message':self.datamodel.message[0],'severity':self.datamodel.message[1]}),http_return_code)\n else:\n response = make_response(jsonify({'message':'Invalid form','severity':'warning'}),500)\n return response\n\n @expose_api(name='delete',url='/api/delete/',methods=['DELETE'])\n @has_access_api\n @permission_name('delete')\n def api_delete(self,pk):\n item = self.datamodel.get(pk,self._base_filters)\n if not item:\n abort(404)\n self.pre_delete(item)\n if self.datamodel.delete(item):\n self.post_delete(item)\n http_return_code = 200\n else:\n http_return_code = 500\n response = make_response(jsonify({'message':self.datamodel.message[0],'severity':self.datamodel.message[1]}),http_return_code)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n def _get_related_column_data(self,col_name,filters):\n rel_datamodel = self.datamodel.get_related_interface(col_name)\n _filters = rel_datamodel.get_filters(rel_datamodel.get_search_columns_list())\n get_filter_args(_filters)\n if filters:\n filters = _filters.add_filter_list(filters)\n else:\n filters = _filters\n result = rel_datamodel.query(filters)[1]\n ret_list = list()\n for item in result:\n pk = rel_datamodel.get_pk_value(item)\n ret_list.append({'id':int(pk),'text':str(item)})\n ret_json = json.dumps(ret_list)\n return ret_json\n\n @expose_api(name='column_add',url='/api/column/add/',methods=['GET'])\n @has_access_api\n @permission_name('add')\n def api_column_add(self,col_name):\n\n filter_rel_fields = None\n if self.add_form_query_rel_fields:\n filter_rel_fields = self.add_form_query_rel_fields.get(col_name)\n ret_json = self._get_related_column_data(col_name,filter_rel_fields)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n @expose_api(name='column_edit',url='/api/column/edit/',methods=['GET'])\n @has_access_api\n @permission_name('edit')\n def api_column_edit(self,col_name):\n\n filter_rel_fields = None\n if self.edit_form_query_rel_fields:\n filter_rel_fields = self.edit_form_query_rel_fields\n ret_json = self._get_related_column_data(col_name,filter_rel_fields)\n response = make_response(ret_json,200)\n response.headers['Content-Type']=\"application/json\"\n return response\n\n @expose_api(name='readvalues',url='/api/readvalues',methods=['GET'])\n @has_access_api\n @permission_name('list')\n def api_readvalues(self):\n\n if get_order_args().get(self.__class__.__name__):\n order_column,order_direction = get_order_args().get(self.__class__.__name__)\n else:\n order_column,order_direction = '',''\n\n get_filter_args(self._filters)\n joined_filters = self._filters.get_joined_filters(self._base_filters)\n count,result = self.datamodel.query(joined_filters,order_column,order_direction)\n\n ret_list = list()\n for item in result:\n pk = self.datamodel.get_pk_value(item)\n ret_list.append({'id':int(pk),'text':str(item)})\n\n ret_json = json.dumps(ret_list)\n response = make_response(ret_json,200)\n response.headers['Content-Type'] = \"application/json\"\n return response\n\n\nclass ModelView(RestCRUDView):\n\n def __init__(self,**kwargs):\n super(ModelView,self).__init__(**kwargs)\n\n def post_add_redirect(self):\n \"\"\"Override this function to control the redirect after add endpoint is called.\"\"\"\n return redirect(self.get_redirect())\n\n def post_edit_redirect(self):\n return redirect(self.get_redirect())\n\n def post_delete_redirect(self):\n return redirect(self.get_redirect())\n\n \"\"\"\n ---------------\n LIST SHOW ADD EDIT DELETE ---------------\n \"\"\"\n\n @expose('/list/')\n @has_access\n def list(self):\n\n widgets = self._list()\n return self.render_template(self.list_template,title=self.list_title,widgets=widgets)\n\n @expose('/show/',methods=['GET'])\n @has_access\n def show(self,pk):\n widgets = self._show(pk)\n return self.render_template(self.show_template,pk=pk,title=self.show_title,\n widgets=widgets,related_views=self._related_views)\n\n @expose('/add',methods=['GET','POST'])\n @has_access\n def add(self):\n widget = self._add()\n if not widget:\n return self.post_add_redirect()\n else:\n return self.render_template(self.add_template,title=self.add_title,widgets=widget)\n\n @expose('/edit/',methods=['GET','POST'])\n @has_access\n def edit(self,pk):\n widgets = self._edit(pk)\n if not widgets:\n return self.post_edit_redirect()\n else:\n return self.render_template(self.edit_template,title=self.edit_title,\n widgets=widgets,related_views=self._related_views)\n\n @expose('/delete/')\n @has_access\n def delete(self,pk):\n self._delete(pk)\n return self.post_delete_redirect()\n\n @expose('/download/')\n @has_access\n def download(self,filename):\n return send_file(self.appbuilder.app.config['UPLOAD_FOLDER'] + filename,\n attachment_filename=uuid_originalname(filename),as_attachment=True)\n\n @expose('/action//',methods=['GET'])\n def action(self,name,pk):\n if self.appbuilder.sm.has_access(name,self.__class__.__name__):\n action = self.actions.get(name)\n return action.func(self.datamodel.get(pk))\n else:\n flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED),\"danger\")\n return redirect('.')\n\n @expose('/action_post',methods=['POST'])\n def action_post(self):\n name = request.form['action']\n pks = request.form.getlist('rowid')\n if self.appbuilder.sm.has_access(name,self.__class__.__name__):\n action = self.actions.get(name)\n items = [self.datamodel.get(pk) for pk in pks]\n return action.func(items)\n else:\n flash(as_unicode(FLAMSG_ERR_SEC_ACCESS_DENIED),\"danger\")\n return redirect('.')\n\n\nclass MasterDetailView(BaseCRUDView):\n \"\"\"\n Implements behaviour for controlling two CRUD views\n linked by PK and FK, in a master/detail type with\n two lists.\n\n Master view will behave like a left menu::\n class DetailView(ModelView):\n datamodel = SQLAInterface(DetailTable, db.session)\n class MasterView(MasterDetailView):\n datamodel = SQLAInterface(MasterTable, db.session)\n related_views = [DetailView]\n \"\"\"\n list_template = 'appbuilder/general/model/left_master_detail.html'\n list_widget = ListMasterWidget\n master_div_width = 2\n \"\"\" Set to configure bootstrap class for master grid size\"\"\"\n\n @expose('/list/')\n @expose('/list/')\n @has_access\n def list(self,pk=None):\n pages = get_page_args()\n page_sizes = get_page_size_args()\n orders = get_order_args()\n\n widgets = self._list()\n if pk:\n item = self.datamodel.get(pk)\n widgets = self._get_related_views_widgets(item,orders=orders,\n pages=pages,page_sizes=page_sizes,widgets=widgets)\n related_views = self._related_views\n else:\n related_views = []\n\n return self.render_template(self.list_template,\n title=self.list_title,\n widgets=widgets,\n related_views=related_views,\n master_div_width=self.master_div_width)\n\n\n\nclass MultipleView(BaseView):\n\n list_template = 'appbuilder/general/model/multiple_views.html'\n views = None\n _views = None\n\n def __init__(self,**kwargs):\n super(MultipleView,self).__init__(**kwargs)\n self.views = self.views or list()\n self._views = self._views or list()\n\n def get_uninit_inner_views(self):\n return self.views\n\n def get_init_inner_views(self):\n return self._views\n\n @expose('/list/')\n @has_access\n def list(self):\n pages = get_page_args()\n page_sizes = get_page_size_args()\n orders = get_order_args()\n views_widgets = list()\n for view in self._views:\n if orders.get(view.__class__.__name__):\n order_column,order_direction = orders.get(view.__class__.__name__)\n else:\n order_column,order_direction = '',''\n page = pages.get(view.__class__.__name__)\n page_size = page_sizes.get(view.__class__.__name__)\n views_widgets.append(view._get_view_widget(filters=view._base_filters,\n order_column=order_column,\n order_direction=order_direction,\n page=page,page_size=page_size))\n self.update_redirect()\n return self.render_template(self.list_template,\n views=self._views,views_widgets=views_widgets)\n\n\n\nclass CompactCRUDMixin(BaseCRUDView):\n\n @classmethod\n def set_key(cls,k,v):\n k = cls.__name__ + '__' + k\n session[k] = v\n\n @classmethod\n def get_key(cls,k,default=None):\n k = cls.__name__ + '__' + k\n if k in session:\n return session[k]\n else:\n return default\n\n @classmethod\n def del_key(cls,k):\n k = cls.__name__ + '__' + k\n session.pop(k)\n\n def _get_list_widget(self,**args):\n widgets = super(CompactCRUDMixin,self)._get_list_widget(**args)\n session_form_widget = self.get_key('session_form_widget',None)\n\n form_widget = None\n if session_form_widget == 'add':\n form_widget = self._add().get('add')\n elif session_form_widget == 'edit':\n pk = self.get_key('session_form_edit_pk')\n if pk:\n form_widget = self._edit(int(pk)).get('edit')\n return {\n 'list':GroupFormListWidget(\n list_widget=widgets.get('list'),\n form_widget = form_widget,\n form_action=self.get_key('session_form_action',''),\n form_title=self.get_key('session_form_title',''),\n )\n }\n\n @expose('/list/',methods=['GET','POST'])\n @has_access\n def list(self):\n list_widgets = self._list()\n return self.render_template(self.list_template,\n title=self.list_title,widgets=list_widgets)\n\n @expose('/add/',methods=['GET','POST'])\n @has_access\n def add(self):\n widgets = self._add()\n if not widgets:\n self.set_key('session_form_action', '')\n self.set_key('session_form_widget', None)\n return redirect(request.referrer)\n else:\n self.set_key('session_form_widget','add')\n self.set_key('session_form_action',request.full_path)\n self.set_key('session_form_title',self.add_title)\n return redirect(self.get_redirect())\n\n @expose('/edit/',methods=['GET','POST'])\n @has_access\n def edit(self,pk):\n widgets = self._edit(pk)\n self.update_redirect()\n if not widgets:\n self.set_key('session_form_action','')\n self.set_key('session_form_widget',None)\n return redirect(self.get_redirect())\n else:\n self.set_key('session_form_widget','edit')\n self.set_key('session_form_action',request.full_path)\n self.set_key('session_form_title',self.add_title)\n self.set_key('session_form_edit_pk',pk)\n return redirect(self.get_redirect())\n\n @expose('/delete/')\n @has_access\n def delete(self,pk):\n self._delete(pk)\n edit_pk = self.get_key('session_form_edit_pk')\n if pk == edit_pk:\n self.del_key('session_form_edit_pk')\n return redirect(self.get_redirect())\n\n\"\"\"\n This is for retro compatibility\n\"\"\"\nGeneralView = ModelView\n\n\n","sub_path":"flask_appbuilder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"269981979","text":"\"\"\"\nThis code will contain python data structures\n\n\"\"\"\n\"\"\"Singly linked list\nThe basic structure of a singly linked list is that it contains:\ndata\nA reference to the next node. Node contains data and the link to the next done.\n\nWe will create a singly linked list whose node contains data and the link to the next node\n\"\"\"\n\n\n# Define basic element that forms the list\nclass Node:\n # Constructor for the node (has data and link to next node)\n def __init__(self, data=None, next_node=None):\n self.__data = data\n self.__next_node = next_node\n\n \"define methods to get the data, get next node and set the next node\"\n\n # Get data\n def get_data(self):\n return self.__data\n\n # Get the next node\n def get_next(self):\n return self.__next_node\n\n # Set the next node\n def set_next(self, new_next):\n self.__next_node = new_next\n\n\n\"Define the list\"\n\n\nclass SinglyLinkedList:\n # Constructor\n def __init__(self):\n self.__head = Node(\"__head__\")\n\n # Get the first node that contains the specified data\n def get_node(self, data):\n current = self.__head\n\n # Go through the list until it finds a match, or reach the end of the list\n while current:\n if current.get_data() == data:\n return current\n else:\n current = current.get_next()\n return None\n\n # Delete first node that contains the specified data\n def delete(self, data):\n current = self.__head\n previous = None\n\n if current.get_data() != data:\n previous = current\n current = current.get_next()\n # Go through the list until it finds a match, or reach the end of the line\n else:\n previous.set_next(current.get_next())\n # break;\n\n # Append new node to the end of the list\n def append(self, data):\n current = self.__head\n # Go to the last node in the list\n while current.get_next():\n current = current.get_next()\n\n # Append at the end of the list\n current.set_next(Node(data))\n\n # Get the number of nodes in the list\n def size(self):\n current = self.__head\n count = 0\n while current:\n count += 1\n current = current.get_next()\n return count - 1\n\n # Print List\n def print_list(self):\n current = self.__head.get_next()\n while current:\n print(current.get_data())\n current = current.get_next()\n\n\n\"Test our list\"\n# Create list object\nl = SinglyLinkedList()\n# Append cat to the list and print\nl.append('cat')\n# More appends\nl.append('dog')\nl.append('fish')\nl.append('bird')\n\nprint(l.print_list())\n\n# Test Get node\nnode = l.get_node('fish')\nprint(node.get_data())\n\n# delete fish\nl.delete('fish')\nprint(l.print_list())\n\n# size\nprint(l.size())\n","sub_path":"data_structures.py","file_name":"data_structures.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"194970957","text":"from functions.functions import *\nimport itertools\n\n# read ciphertext from file\nf=open('text/12.txt', 'r')\nline=''.join(f.readlines()).replace('\\n', '')\nf.close()\n\n# get five most often bigrams\nx=sort_dict(count_bigrams(line, False))\nfirst_elements = [i[0] for i in list(x.items())[:5]]\n\n# set flag to 0 (change it if plaintext found)\nflag = 0\n\n# go thought every possible combination of\n# (2 bigrams from most often in language, 2 bigrams from most often in ciphertext)\nfor i in itertools.permutations(five_most_often, 2):\n for j in itertools.combinations(first_elements, 2):\n print('-> X: '+str(i)+'; Y: '+str(j)+';')\n\n # get coefficients for decryption, check if not None\n a, b=get_coefs(i, j)\n if len(a)<1:\n print(' error: coef a does not exist')\n\n # go thought every pair (a, b) for decryption\n for (ael, bel) in zip(a, b):\n print(' a: '+str(ael)+'; b: '+str(bel) )\n plaintext = decipher_afin_bigrams(ael, bel, line)\n\n # check if plaintext follows the criteria\n # and if inverse of a exists (if it does, len(plaintext) will be >0)\n if len(plaintext)>0 and is_plaintext(plaintext):\n print(' plaintext: '+plaintext[:100]+'...')\n\n # write plaintext to file and change flag to 1\n if flag==0:\n fileout = open('results/decrypted_12.txt', 'w')\n fileout.write(plaintext)\n fileout.close()\n flag+=1\n\n #exit() # uncomment to end program on first plaintext found\n print()\n","sub_path":"cp_3/kostetska_fb-83_cp3/task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"390257667","text":"#!/usr/bin/env python3\n\nimport datetime\nimport logging\nimport signal\nimport sqlite3\nimport libopenzwave\n\n\nDATABASE = '/home/pi/lunares_hab/sensors-data.sqlite3'\nALLOWED_MEASUREMENTS = ['Battery Level', 'Powerlevel', 'Temperature', 'Luminance', 'Relative Humidity', 'Ultraviolet'] # 'Burglar'\ndevice = '/dev/ttyACM0'\nlog = 'Info'\nsniff = 60.0\n\n\noptions = libopenzwave.PyOptions(\n config_path='/usr/local/etc/openzwave/',\n user_path='/home/pi/lunares_hab/',\n cmd_line='--logging false')\n\noptions.lock()\nmanager = libopenzwave.PyManager()\nmanager.create()\n\n\nwith sqlite3.connect(DATABASE) as db:\n db.execute(\"\"\"CREATE TABLE IF NOT EXISTS sensor_data (\n datetime DATETIME PRIMARY KEY,\n sync_datetime DATETIME DEFAULT NULL,\n device VARCHAR(255),\n type VARCHAR(255),\n value VARCHAR(255),\n unit VARCHAR(255));\"\"\")\n db.execute('CREATE UNIQUE INDEX IF NOT EXISTS sensor_data_datetime_index ON sensor_data (datetime);')\n db.execute('CREATE INDEX IF NOT EXISTS sensor_data_sync_datetime_index ON sensor_data (sync_datetime);')\n\n\ndef save_to_sqlite3(args):\n values = args.get('valueId')\n\n if not values or values.get('label') not in ALLOWED_MEASUREMENTS:\n return None\n\n with sqlite3.connect(DATABASE) as db:\n db.execute('INSERT INTO sensor_data VALUES (:datetime, NULL, :device, :type, :value, :unit)', {\n 'datetime': datetime.datetime.now(datetime.timezone.utc),\n 'type': values.get('label'),\n 'value': values.get('value'),\n 'unit': values.get('units'),\n 'device': '{base:08x}-{node}'.format(\n base=values.get('homeId'),\n node=values.get('nodeId'))})\n\n\nif __name__ == '__main__':\n logging.info('Add watcher')\n manager.addWatcher(save_to_sqlite3)\n\n logging.info('Add device')\n manager.addDriver(device)\n\n try:\n signal.pause()\n\n finally:\n logging.info('Remove watcher')\n manager.removeWatcher(save_to_sqlite3)\n\n logging.info('Remove device')\n manager.removeDriver(device)\n","sub_path":"bin/sensor-zwave-collector.py","file_name":"sensor-zwave-collector.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"176413393","text":"'''important'''\n'''The Little Elephant loves playing with arrays. He has array a, consisting of n positive integers, indexed from 1 to n. Let's denote the number with index i as ai.\n\nAdditionally the Little Elephant has m queries to the array, each query is characterised by a pair of integers lj and rj (1 ≤ lj ≤ rj ≤ n). For each query lj, rj the Little Elephant has to count, how many numbers x exist, such that number x occurs exactly x times among numbers alj, alj + 1, ..., arj.\n\nHelp the Little Elephant to count the answers to all queries.\n\nInput\nThe first line contains two space-separated integers n and m (1 ≤ n, m ≤ 105) — the size of array a and the number of queries to it. The next line contains n space-separated positive integers a1, a2, ..., an (1 ≤ ai ≤ 109). Next m lines contain descriptions of queries, one per line. The j-th of these lines contains the description of the j-th query as two space-separated integers lj and rj (1 ≤ lj ≤ rj ≤ n).\n\nOutput\nIn m lines print m integers — the answers to the queries. The j-th line should contain the answer to the j-th query.\n\nExamples\ninputCopy\n7 2\n3 1 2 2 3 3 7\n1 7\n3 4\noutputCopy\n3\n1\n'''\n\ndef helpElephant(arr,queries,q): # 3 1 2 2 3 3 7\n Ans = [0]*q\n freq =[0]*10000\n queries.sort(key = lambda x:x[1])\n MXN = 10**5 + 5\n\n currL =0\n currR = -1\n count =0\n idx_lis = [0] * q\n\n for i in range(q):\n\n L,R = queries[i]\n idx = origin_index.index([L, R])\n idx_lis[idx]+=1\n if idx_lis[idx]>1:\n idx+=1\n print(idx)\n\n\n\n while(currR=MXN:\n return 0\n if freq[arr[currR]] == arr[currR]:\n count-=1\n freq[arr[currR]]+=1\n if freq[arr[currR]] == arr[currR]:\n count+=1\n\n\n while (currL > L):\n currL -= 1\n if arr[currL]>=MXN:\n return 0\n if freq[arr[currL]] == arr[currL]:\n count -= 1\n freq[arr[currL]] += 1\n if freq[arr[currL]] == arr[currL]:\n count += 1\n\n\n while (currL < L):\n if freq[arr[currL]] == arr[currL]:\n count -= 1\n freq[arr[currL]] -= 1\n if freq[arr[currL]] == arr[currL]:\n count += 1\n currL += 1\n while (currR > R):\n if freq[arr[currR]] == arr[currR]:\n count -= 1\n freq[arr[currR]] -= 1\n if freq[arr[currR]] == arr[currR]:\n count += 1\n currR -= 1\n Ans[idx] = count\n\n\n\n\n for i in range(len(Ans)) :\n print(Ans[i])\n\ninp = list(map(int,input(\"n q\").split()))\nn= inp[0]\nq = inp[1]\narr = list(map(int,input(\"array\").split()))\nqueries = []\norigin_index = [0]*q\nans = []\nfor i in range(q):\n queries.append(list(map(int,input(\" query range\").split())))\n queries[i][0]-=1\n queries[i][1]-=1\n origin_index[i]=queries[i]\n\nhelpElephant(arr,queries,q)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"BasicThingsUshould Know/PlmsonMosAlgo.py","file_name":"PlmsonMosAlgo.py","file_ext":"py","file_size_in_byte":3106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"630796445","text":"class Solution:\n def fourSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n\n # the solution set must not contain duplicate quadruplets.\n\n if len(nums) ==0 or not nums:\n return []\n return_list =[]\n nums.sort()\n print(nums)\n last = len(nums) - 1\n\n for i in range(len(nums) - 3):\n\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n\n for j in range(i+1 ,len(nums) - 2):\n\n #if j > 0 and nums[j] == nums[j - 1]:\n # continue\n\n k = j+ 1\n l = last\n\n while k target:\n l -= 1\n if sum < target:\n k += 1\n\n return return_list\n\n\nsolution = Solution\nprint(solution.fourSum(solution,nums = [1, 0, -1, 0, -2, 2], target = 0))\nprint(solution.fourSum(solution,nums = [0,0,0,0], target = 0))\nprint(solution.fourSum(solution,nums = [-3,-2,-1,0,0,1,2,3], target = 0))","sub_path":"M_18_Four_Sum.py","file_name":"M_18_Four_Sum.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"167798335","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom PyQt5 import QtWidgets, QtCore\n\n\nclass subSelectWindow(QtWidgets.QWidget):\n\n semsub = QtCore.pyqtSignal(str, str)\n\n def __init__(self):\n super(subSelectWindow, self).__init__()\n self.resize(800,600)\n self.selectSem = QtWidgets.QComboBox(self)\n self.selectSem.setGeometry(QtCore.QRect(175, 100, 450, 80))\n self.selectSem.setObjectName(\"selectSem\")\n font = self.selectSem.font()\n font.setPointSize(15)\n self.selectSem.setFont(font)\n self.selectSubject = QtWidgets.QComboBox(self)\n self.selectSubject.setGeometry(QtCore.QRect(175, 220, 450, 80))\n self.selectSubject.setObjectName(\"selectSubject\")\n font = self.selectSubject.font()\n font.setPointSize(12)\n self.selectSubject.setFont(font)\n self.selectBtn = QtWidgets.QPushButton(self)\n self.selectBtn.setGeometry(QtCore.QRect(225, 350, 350, 90))\n self.selectBtn.setObjectName(\"selectBtn\")\n self.selectBtn.setText(\"Confirm and close\")\n self.selectBtn.setStyleSheet(\"#selectBtn{\\n\"\n \"display: inline-block;\\n\"\n \" padding: 15px 25px;\\n\"\n \" font-size: 24px;\\n\"\n \" cursor: pointer;\\n\"\n \" text-align: center;\\n\"\n \" text-decoration: none;\\n\"\n \" outline: none;\\n\"\n \" color: #fff;\\n\"\n \" background-color: #4da6ff;\\n\"\n \" border: none;\\n\"\n \" border-radius: 45px;\\n\"\n \" box-shadow: 0 9px #999;\\n\"\n \"}\\n\"\n )\n self.subjectLabel = QtWidgets.QLabel(self)\n self.subjectLabel.setGeometry(QtCore.QRect(225, 450, 350, 90))\n self.subjectLabel.setObjectName(\"subjectLabel\")\n font = self.subjectLabel.font()\n font.setPointSize(12)\n self.subjectLabel.setFont(font)\n self.selectSem.addItems([\" SEM I\", \" SEM II\", \" SEM III\", \" SEM IV\", \" SEM V\", \" SEM VI\", \" SEM VII\", \" SEM VIII\"])\n self.selectSem.activated[str].connect(self.onSemSelected)\n self.selectSubject.activated[str].connect(self.onSubjectSelected)\n self.selectBtn.clicked.connect(self.send_clicked)\n self.setWindowTitle(\"Select Subject\")\n\n def send_clicked(self):\n\n self.semsub.emit(self.selectSem.currentText(), self.selectSubject.currentText())\n self.close()\n\n\n def onSubjectSelected(self, text):\n # self.got_password.emit(text)\n self.subjectLabel.setText(text)\n\n def onSemSelected(self, text):\n\n if(text == ' SEM I'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Applied Mathematics 1\", \" Applied Chemistry 1\", \" Applied Physics 1\", \" Basic Electrical and Electronic Engineering\", \" Engineering Mechanics\", \" Environmental Studies\"])\n elif(text == ' SEM II'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Applied Mathematics 2\", \" Applied Chemistry 2\", \" Applied Physics 2\", \" Engineering Drawing\", \" Structured Programming Approach\", \" Communication Skills\"])\n elif(text == ' SEM III'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Data Structure and Analysis\", \" Logic Design\", \" Principle of Communications\", \" Database Managemnet System\",\" Applied Mathematics 3\"])\n elif(text == ' SEM IV'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Automata Theory\", \" Operating Systems\", \" Computer Networks\", \" Computer Organizations and Architecture\",\" Applied Mathematics 4\"])\n elif(text == ' SEM V'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Microcontroller and Embedded Programming\", \" Cryptography and Network Security\", \" Internet Programming\", \" E-commerce and E-business\", \" Advanced Data Management Technology\"])\n elif(text == ' SEM VI'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Software Engineering with\\n Project Management\", \" Data Mining and Business Intelligence\", \" Cloud Computing and Services\", \" Digital Forensics\", \" Wireless Networks\"])\n elif(text == ' SEM VII'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Enterprise Network Design\", \" Infrastructure Security\", \" Artificial Intelligence\", \" Software Testing and\\n Quality Assurance\", \" Management Information System\"])\n elif(text == ' SEM VIII'):\n self.selectSubject.clear()\n self.selectSubject.addItems([\" Enterprise Resource Managenment\", \" Big Data Analytics\", \" Project Management\", \" Internet Of Everything\"])\n","sub_path":"Add_questions/subSelect.py","file_name":"subSelect.py","file_ext":"py","file_size_in_byte":5032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"623931231","text":"#!/bin/env python\n\nimport argparse\nimport math\nimport os\n\nimport cv2\nfrom keras.callbacks import ModelCheckpoint\nimport numpy as np\n\nfrom class_mapper import map_to\nfrom model import load_model\n\n# IMPORTANT: Keep the slash at the end!\nDATA_ROOT_PATH = '/tmp/data/tl_classification/'\n\ndef load_image(base_path, image_path):\n \"\"\"Reads an image file and returns a bgr8 array and its label\n\n Arguments:\n base_path -- The directory where the captures are saved\n image_path -- The name of the file to load\n \"\"\"\n full_path = os.path.join(base_path, image_path)\n label = map_to(int(image_path.split('/')[0]))\n image = cv2.imread(full_path) # Assumes the file is a bgr8 encoded jpg\n #image = cv2.cvtColor(cv2.imread(base_path + '/' + image_path), cv2.COLOR_BGR2RGB)\n return image, label\n\ndef train(weights_file):\n captures = [] #os.listdir(DATA_ROOT_PATH)\n\n for dirpath, subdirs, files in os.walk(DATA_ROOT_PATH):\n if len(files) > 0:\n for f in files:\n captures.append(os.path.join(dirpath, f).replace(DATA_ROOT_PATH, ''))\n\n assert len(captures) > 0, \"No files found!\"\n\n from sklearn.cross_validation import train_test_split\n train_samples, validation_samples = train_test_split(captures, test_size=0.2)\n\n import sklearn\n\n def batch_len(array):\n \"\"\"Shortcut function to calculating the length of a batch from the\n generator based on the augmentations performed\"\"\"\n return len(array) * 2 # Mirror\n\n def generator(samples, batch_size=32):\n \"\"\"Generator function to return a number of example instances for training\n\n Arguments:\n samples -- The full array of samples (X data and y result)\n batch_size -- The number of samples (before augmentation) that will be\n returned by the generator. The samples are shuffled before\n each new batch is generated.\n \"\"\"\n def augment_and_append(image, label):\n images.append(image)\n labels.append(label)\n images.append(np.fliplr(image))\n labels.append(label)\n\n num_samples = len(samples)\n while 1:\n sklearn.utils.shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n images = []\n labels = []\n for batch_sample in batch_samples:\n image, label = load_image(DATA_ROOT_PATH, batch_sample)\n augment_and_append(image, label)\n\n #print(labels)\n X_data = np.array(images)\n y_data = np.array(labels)\n yield sklearn.utils.shuffle(X_data, y_data)\n\n train_generator = generator(train_samples, batch_size=10)\n validation_generator = generator(validation_samples, batch_size=10)\n\n #inputs = Input(shape=(600, 800, 3))\n #resized = Lambda(lambda image: ktf.image.resize_images(image, (224, 224)))(inputs)\n #model = MobileNet(alpha=2, depth_multiplier=1, include_top=True, weights=None, classes=4, input_tensor=resized)\n\n #model.compile(loss='mse', optimizer='adam')\n\n #if not base_model is None:\n # model.load_weights(base_model)\n\n model = load_model(weights_file)\n checkpoint = ModelCheckpoint('M_{val_loss:.4f}.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True)\n callbacks_list = [checkpoint]\n\n history = model.fit_generator(train_generator, steps_per_epoch=2000, epochs=30, callbacks=callbacks_list, validation_data=validation_generator, validation_steps=30, use_multiprocessing=True)\n\n model.save('M.h5')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Traffic Light Classification Training\")\n parser.add_argument(\n 'model_weights',\n type=str,\n help=\"Path to a weights file that will be fine-tuned\"\n )\n args = parser.parse_args()\n train(args.model_weights)\n","sub_path":"ros/src/tl_detector/light_classification/mobilenet_model.py","file_name":"mobilenet_model.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"251770382","text":"import pandas as pd\nimport requests\nimport time\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime\nimport numpy as np\nimport random\nimport sys\n\n\nstart_date = input('Enter start date (yyyymmdd): ')\nstart_day = start_date[6:8]\nstart_month = start_date[4:6]\nstart_year = start_date[:4]\nstart_minute = '00'\n\nend_date = input('Enter a end date (yyyymmdd): ')\nif end_date == '':\n\tend_date = start_date\n\nend_day = end_date[6:8]\nend_month = end_date[4:6]\nend_year = end_date[:4]\nend_minute = '00'\n\nif len(start_date) != 8 or len(end_date) != 8:\n\tprint('\\nDATE ERROR: Dates must have 8 digits.')\n\tsys.exit(0)\n\nstart_hour = input('Enter a start hour or \"full\": ').zfill(2)\nif start_hour.isdigit() == True:\n\tend_hour = input('Enter a end hour: ').zfill(2)\n\tif start_date == end_date:\n\t\tif (int(end_hour) - int(start_hour)) < 0:\n\t\t\tprint('\\nTIME ERROR: Difference between two hours must be greater than zero.')\n\t\t\tsys.exit(0)\n\telif int(end_hour) > 24 or int(start_hour) > 23:\n\t\tprint('\\nTIME ERROR: Hours must be between 0 and 23.')\n\t\tsys.exit(0)\n\nif start_hour.isdigit() == False:\n\tif start_hour == 'full':\n\t\tstart_hour = '00'.zfill(2)\n\t\tend_hour = '23'.zfill(2)\n\telse:\n\t\tprint('\\nTIME ERROR: Not a valid alternative hour.')\n\t\tsys.exit(0)\n\n\n#=========NM Stations\n\nlist_nm = ['AATB','APTY','ARNM','ATHN','BKSN','CALG','CALM','DOMB',\n\t\t\t'DOMC','DRBS','ESOI','FSMT','HRMS','INVK','IRK2','IRK3',\n\t\t\t'IRKT','JBGO','JUNG','JUNG1','KERG','KIEL','KIEL2','LMKS',\n\t\t\t'MCRL','MGDN','MOSC','MRNY','MWSN','MXCO','NAIN','NANM','NEU3',\n\t\t\t'NEWK','NRLK','NVBK','OULU','PSNM','PTFM','PWNK','ROME','SANB','SNAE'\n\t\t\t,'SOPB','SOPO','TERA','THUL','TIBT','TXBY','YKTK']\n\nnum_station = int(input('How many stations to parse: '))\nprint(f'You are parsing {num_station} station(s)')\n\nstation_multi = []\nfor i in range(num_station):\n\tstation = input('Enter station names: ').upper()\n\tif station == '':\n\t\tstation = 'OULU'\n\t\tstation_multi.append(station)\n\telif station == 'RANDOM':\n\t\tstation = random.choice(list_nm)\n\t\tstation_multi.append(station)\n\telse:\n\t\tstation_multi.append(station)\n\nprint(f'Parsing the {station_multi} stations')\n\n\nevent_obj_start = datetime.datetime.strptime(f'{start_date} {start_hour}', '%Y%m%d %H')\nevent_obj_start_str = datetime.datetime.strftime(event_obj_start, '%Y%m%d %H:%M:%S')\nevent_obj_start_str_date = datetime.datetime.strftime(event_obj_start, '%Y%m%d %H')\n\nevent_obj_end = datetime.datetime.strptime(f'{end_date} {end_hour}', '%Y%m%d %H')\nevent_obj_end_str = datetime.datetime.strftime(event_obj_end, '%Y%m%d %H:%M:%S')\nevent_obj_end_str_date = datetime.datetime.strftime(event_obj_end, '%Y%m%d %H')\n\n\n\n#=======sorting column header test\n\n#sorter_list = ['PSNM', 'TIBT', 'ESOI', 'ATHN', 'MXCO', 'ARNM', 'NANM', 'PTFM', 'CALM', 'AATB', 'ROME', 'BKSN', 'HRMS', 'JUNG', 'JUNG1', 'LMKS', 'IRK2', 'IRK3', 'IRKT', 'DRBS', 'NVBK', 'MCRL', 'MOSC', 'NEWK', 'KIEL', 'KIEL2', 'MGDN', 'YKTK', 'KERG', 'CALG', 'OULU', 'SANB', 'SNAE', 'APTY', 'NRLK', 'TXBY', 'FSMT', 'INVK', 'JBGO', 'NAIN', 'PWNK', 'THUL', 'MWSN', 'NEU3', 'SOPB', 'SOPO', 'MRNY', 'DOMB', 'DOMC', 'TERA']\nsorter = {'PSNM':0, 'TIBT':1, 'ESOI':2, 'ATHN':3, 'MXCO':4, 'ARNM':5, 'NANM':6, 'PTFM':7, 'CALM':8, 'AATB':9, 'ROME':10, 'BKSN':11, 'HRMS':12, 'JUNG':13, 'JUNG1':14, 'LMKS':15, 'IRK2':16, 'IRK3':17, 'IRKT':18, 'DRBS':19, 'NVBK':20, 'MCRL':21, 'MOSC':22, 'NEWK':23, 'KIEL':24, 'KIEL2':25, 'MGDN':26, 'YKTK':27, 'KERG':28, 'CALG':29, 'OULU':30, 'SANB':31, 'SNAE':32, 'APTY':33, 'NRLK':34, 'TXBY':35, 'FSMT':36, 'INVK':37, 'JBGO':38, 'NAIN':39, 'PWNK':40, 'THUL':41, 'MWSN':42, 'NEU3':43, 'SOPB':44, 'SOPO':45, 'MRNY':46, 'DOMB':47, 'DOMC':48, 'TERA':49}\n#sorted_sorter = sorted(sorter.items(), key=operator.itemgetter(1))\nsorted_lambda = sorted(sorter.items(), key=lambda x: x[1])\n\n\nsorted_nm_list = []\nfor i in [i[0] for i in sorted_lambda]:\n\tif i in station_multi:\n\t\tsorted_nm_list.append(i)\n\n#========creating station string for url\n\nstation_str = ''\nfor i in sorted_nm_list:\n\tstation_str += f'&stations[]={i}'\n\n#=========Fetch online neutron monitor data\n\nurl = f'http://www.nmdb.eu/nest/draw_graph.php?formchk=1{station_str}&tabchoice=revori&dtype=corr_for_efficiency&tresolution=0&yunits=0&date_choice=bydate&start_day={start_day}&start_month={start_month}&start_year={start_year}&start_hour={start_hour}&start_min={start_minute}&end_day={end_day}&end_month={end_month}&end_year={end_year}&end_hour={end_hour}&end_min={end_minute}&output=ascii'\n\nnm_data = pd.DataFrame([])\n\nname_list = ['datetime'] + [ str(i) for i in sorted_nm_list]\n\ndateparse = lambda x: pd.datetime.strptime(x, '%Y-%m-%d %H:%M:%S')\nnm_data = pd.read_csv(url,sep=';|\\n|\\b', skiprows=133, skipfooter=3, engine='python', index_col='datetime', date_parser=dateparse, names=name_list, na_values=[' null']) #, , parse_dates=['datetime'], date_parser=dateparse\n#delim_whitespace=True\n\nnm_counter = []\nfor item in sorted_nm_list:\n\tif nm_data[f'{item}'].isnull().values.any() == True:\n\t\tnm_counter.append(1)\n\telse:\n\t\tnm_counter.append(0)\n\n'''\nfor i in nm_counter:\n\tif i == 1:\n\t\tprint('Please select station with data for this time frame.')\n\t\tsys.exit(0)\n'''\n\n#====Plotting\nmyFmt = mdates.DateFormatter('%m/%d\\n%H:%M') #this is line that breaks code (ValueError: year 60740 is out of range)\n\ncolor_count = []\nfor i in sorted_nm_list:\n\n\tcolor_list = ['red','orange','green','blue','indigo','violet','purple'] #,'yellow'\n\tcolor_list = list(set(color_list) - set(color_count))\n\n\trand_color = random.choice(color_list)\n\tcolor_count.append(rand_color)\n\n\t#nm_data[f'{i}'].loc[f'{event_obj_start_str_date}':f'{event_obj_end_str_date}'].plot(color=rand_color, label= f'{i}')\n\tplt.plot(nm_data.index, nm_data[f'{i}'], color=rand_color, label=f'{i}')\n\n#nm_data['RCORR_E'].loc[f'{event_obj_start_str_date}':f'{event_obj_end_str_date}'].plot(color='limegreen', label= 'Corrected for Efficiency')\n\nplt.title(f'Neutron Monitor Data Corrected for Efficiency\\n[{event_obj_start_str} -- {event_obj_end_str}]', fontname=\"Arial\", fontsize = 14)\nplt.xlabel('Time', fontname=\"Arial\", fontsize = 14)\nplt.ylabel('Counts/s', fontname=\"Arial\", fontsize = 14)\nplt.minorticks_on()\nplt.grid(True)\n#plt.yscale('log')\nplt.legend(loc='upper right')\nplt.tight_layout()\n#ax = fig.add_subplot(111)\nax = plt.gca()\n\nax.xaxis.set_major_formatter(myFmt) #this is line that breaks code (ValueError: year 60740 is out of range)\n#ax.xaxis.set_major_formatter(dates.DateFormatter('%H'))\n#plt.axes().xaxis.set_major_formatter(myFmt)\n\nplt.setp(ax.xaxis.get_majorticklabels(), rotation=0, horizontalalignment='center')\n\n\n#plt.savefig('nm_data.png', format='png', dpi=900)\nplt.show()\n","sub_path":"Scripts/deprecated_scripts/pandas_test_nm.py","file_name":"pandas_test_nm.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"174889086","text":"\nimport os\nimport sys\nimport time\n\nimport threading as mt\nimport multiprocessing as mp\n\nimport radical.utils as ru\n\nfrom .. import Session\nfrom .. import utils as rpu\nfrom .. import constants as rpc\n\n\n# ------------------------------------------------------------------------------\n#\nclass Worker(rpu.Component):\n\n # --------------------------------------------------------------------------\n #\n def __init__(self, cfg):\n\n if isinstance(cfg, str): cfg = ru.Config(cfg=ru.read_json(cfg))\n else : cfg = ru.Config(cfg=cfg)\n\n self._n_cores = cfg.cores\n self._n_gpus = cfg.gpus\n\n self._info = ru.Config(cfg=cfg.get('info', {}))\n self._session = Session(cfg=cfg, uid=cfg.sid, _primary=False)\n\n rpu.Component.__init__(self, cfg, self._session)\n\n self._term = mp.Event() # set to terminate\n self._res_evt = mp.Event() # set on free resources\n\n self._mlock = ru.Lock(self._uid) # lock `_modes` and `_mdata`\n self._modes = dict() # call modes (call, exec, eval, ...)\n self._mdata = dict() # call mode meta data\n\n # We need to make sure to run only up to `gpn` tasks using a gpu\n # within that pool, so need a separate counter for that.\n self._resources = {'cores' : [0] * self._n_cores,\n 'gpus' : [0] * self._n_gpus}\n\n # resources are initially all free\n self._res_evt.set()\n\n # # create a multiprocessing pool with `cpn` worker processors. Set\n # # `maxtasksperchild` to `1` so that we get a fresh process for each\n # # task. That will also allow us to run command lines via `exec`,\n # # effectively replacing the worker process in the pool for a specific\n # # task.\n # #\n # # We use a `fork` context to inherit log and profile handles.\n # #\n # # NOTE: The mp documentation is wrong; mp.Pool does *not* have a context\n # # parameters. Instead, the Pool has to be created within\n # # a context.\n # ctx = mp.get_context('fork')\n # self._pool = ctx.Pool(processes=self._n_cores,\n # initializer=None,\n # maxtasksperchild=1)\n # NOTE: a multiprocessing pool won't work, as pickle is not able to\n # serialize our worker object. So we use our own process pool.\n # It's not much of a loss since we want to respawn new processes for\n # each task anyway (to improve isolation).\n self._pool = dict() # map task uid to process instance\n self._plock = ru.Lock('p' + self._uid) # lock _pool\n\n # We also create a queue for communicating results back, and a thread to\n # watch that queue\n self._result_queue = mp.Queue()\n self._result_thead = mt.Thread(target=self._result_watcher)\n self._result_thead.daemon = True\n self._result_thead.start()\n\n # connect to master\n self.register_subscriber(rpc.CONTROL_PUBSUB, self._control_cb)\n self.register_publisher(rpc.CONTROL_PUBSUB)\n\n # run worker initialization *before* starting to work on requests.\n # the worker provides three builtin methods:\n # eval: evaluate a piece of python code\n # exec: execute a command line (fork/exec)\n # shell: execute a shell command\n # call: execute a method or function call\n self.register_mode('call', self._call)\n self.register_mode('eval', self._eval)\n self.register_mode('exec', self._exec)\n self.register_mode('shell', self._shell)\n\n self.pre_exec()\n\n # connect to the request / response ZMQ queues\n self._res_put = ru.zmq.Putter('to_res', self._info.res_addr_put)\n self._req_get = ru.zmq.Getter('to_req', self._info.req_addr_get,\n cb=self._request_cb)\n\n # the worker can return custom information which will be made available\n # to the master. This can be used to communicate, for example, worker\n # specific communication endpoints.\n\n # `info` is a placeholder for any additional meta data communicated to\n # the worker\n self.publish(rpc.CONTROL_PUBSUB, {'cmd': 'worker_register',\n 'arg': {'uid' : self._uid,\n 'info': self._info}})\n\n\n # --------------------------------------------------------------------------\n #\n def pre_exec(self):\n '''\n This method can be overloaded by the Worker implementation to run any\n pre_exec commands before spawning worker processes.\n '''\n\n pass\n\n\n # --------------------------------------------------------------------------\n #\n def register_mode(self, name, executor):\n\n assert(name not in self._modes)\n\n self._modes[name] = executor\n self._mdata[name] = dict()\n\n\n # --------------------------------------------------------------------------\n #\n def register_call(self, name, method):\n\n # ensure the call mode is usable\n mode = 'call'\n\n assert(mode in self._modes)\n assert(name not in self._mdata[mode])\n\n self._mdata[mode][name] = method\n\n\n # --------------------------------------------------------------------------\n #\n def _call(self, data):\n '''\n We expect data to have a three entries: 'method' or 'function',\n containing the name of the member method or the name of a free function\n to call, `args`, an optional list of unnamed parameters, and `kwargs`,\n and optional dictionary of named parameters.\n '''\n\n if 'method' in data:\n to_call = getattr(self, data['method'], None)\n\n elif 'function' in data:\n names = dict(list(globals().items()) + list(locals().items()))\n to_call = names.get(data['function'])\n\n else:\n raise ValueError('no method or function specified: %s' % data)\n\n if not to_call:\n raise ValueError('callable not found: %s' % data)\n\n\n args = data.get('args', [])\n kwargs = data.get('kwargs', {})\n\n try:\n out = to_call(*args, **kwargs)\n err = None\n ret = 0\n\n except Exception as e:\n self._log.exception('_call failed: %s' % (data))\n out = None\n err = 'call failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _eval(self, data):\n '''\n We expect data to have a single entry: 'code', containing the Python\n code to be eval'ed\n '''\n\n try:\n out = eval(data['code'])\n err = None\n ret = 0\n\n except Exception as e:\n self._log.exception('_eval failed: %s' % (data))\n out = None\n err = 'eval failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _exec(self, data):\n '''\n We expect data to have two entries: 'exe', containing the executabele to\n run, and `args` containing a list of arguments (strings) to pass as\n command line arguments. We use `sp.Popen` to run the fork/exec, and to\n collect stdout, stderr and return code\n '''\n\n try:\n import subprocess as sp\n\n exe = data['exe'],\n args = data.get('args', []),\n env = data.get('env', {}),\n\n proc = sp.Popen(executable=exe, args=args, env=env,\n stdin=None, stdout=sp.PIPE, stderr=sp.PIPE,\n close_fds=True, shell=False)\n out, err = proc.communicate()\n ret = proc.returncode\n\n except Exception as e:\n self._log.exception('_exec failed: %s' % (data))\n out = None\n err = 'exec failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _shell(self, data):\n '''\n We expect data to have a single entry: 'cmd', containing the command\n line to be called as string.\n '''\n\n try:\n out, err, ret = ru.sh_callout(data['cmd'])\n\n except Exception as e:\n self._log.exception('_shell failed: %s' % (data))\n out = None\n err = 'shell failed: %s' % e\n ret = 1\n\n return out, err, ret\n\n\n # --------------------------------------------------------------------------\n #\n def _alloc_task(self, task):\n '''\n allocate task resources\n '''\n\n with self._mlock:\n\n cores = task.get('cores', 1)\n gpus = task.get('gpus' , 0)\n\n assert(cores >= 1)\n assert(cores <= self._n_cores)\n assert(gpus <= self._n_gpus)\n\n if cores > self._resources['cores'].count(0): return False\n if gpus > self._resources['gpus' ].count(0): return False\n\n alloc_cores = list()\n alloc_gpus = list()\n\n if cores:\n for n in range(self._n_cores):\n if not self._resources['cores'][n]:\n self._resources['cores'][n] = 1\n alloc_cores.append(n)\n if len(alloc_cores) == cores:\n break\n\n if gpus:\n for n in range(self._n_gpus):\n if not self._resources['gpus'][n]:\n self._resources['gpus'][n] = 1\n alloc_gpus.append(n)\n if len(alloc_gpus) == gpus:\n break\n\n task['resources'] = {'cores': alloc_cores,\n 'gpus' : alloc_gpus}\n return True\n\n\n # --------------------------------------------------------------------------\n #\n def _dealloc_task(self, task):\n '''\n deallocate task resources\n '''\n\n with self._mlock:\n\n resources = task['resources']\n\n for n in resources['cores']:\n assert(self._resources['cores'][n])\n self._resources['cores'][n] = 0\n\n for n in resources['gpus']:\n assert(self._resources['gpus'][n])\n self._resources['gpus'][n] = 0\n\n # signal available resources\n self._res_evt.set()\n\n return True\n\n\n # --------------------------------------------------------------------------\n #\n def _request_cb(self, tasks):\n '''\n grep call type from tasks, check if methods are registered, and\n invoke them.\n '''\n\n task = ru.as_list(task)\n\n for task in tasks:\n\n self._prof.prof('reg_start', uid=self._uid, msg=task['uid'])\n task['worker'] = self._uid\n\n try:\n # ok, we have work to do. Check the requirements to see how\n # many cpus and gpus we need to mark as busy\n while not self._alloc_task(task):\n # no resource - wait for new resources\n #\n # NOTE: this will block smaller tasks from being executed\n # right now. alloc_task is not a proper scheduler,\n # after all.\n while not self._res_evt.wait(timeout=1.0):\n\n # break on termination\n if self._term.is_set():\n return False\n\n self._res_evt.clear()\n\n # we got an allocation for this task, and can run it, so apply\n # to the process pool. The callback (`self._result_cb`) will\n # pick the task up on completion and free resources.\n #\n # NOTE: we don't use mp.Pool - see __init__ for details\n\n # ret = self._pool.apply_async(func=self._dispatch, args=[task],\n # callback=self._result_cb,\n # error_callback=self._error_cb)\n proc = mp.Process(target=self._dispatch, args=[task],\n daemon=True)\n\n with self._plock:\n\n # we need to include `proc.start()` in the lock, as\n # otherwise we may end up getting the `self._result_cb`\n # before the pid could be registered in `self._pool`.\n proc.start()\n self._pool[proc.pid] = proc\n self._log.debug('applied: %s: %s: %s',\n task['uid'], proc.pid, self._pool.keys())\n\n except Exception as e:\n\n self._log.exception('request failed')\n\n # free resources again for failed task\n self._dealloc_task(task)\n\n res = {'req': task['uid'],\n 'out': None,\n 'err': 'req_cb error: %s' % e,\n 'ret': 1}\n\n self._res_put.put(res)\n\n\n # --------------------------------------------------------------------------\n #\n def _dispatch(self, task):\n\n # this method is running in a process of the process pool, and will now\n # apply the task to the respective execution mode.\n #\n # NOTE: application of pre_exec directives may got here\n\n task['pid'] = os.getpid()\n\n # ----------------------------------------------------------------------\n def _dispatch_thread(tlock):\n out, err, ret = self._modes[mode](task.get('data'))\n with tlock:\n res = [task, str(out), str(err), int(ret)]\n self._log.debug('put 1 result: task %s', task['uid'])\n self._result_queue.put(res)\n # ----------------------------------------------------------------------\n\n\n try:\n # self._log.debug('dispatch: %s: %d', task['uid'], task['pid'])\n mode = task['mode']\n assert(mode in self._modes), 'no such call mode %s' % mode\n\n tout = self._cfg.workload.timeout\n self._log.debug('dispatch with tout %s', tout)\n\n tlock = mt.Lock()\n thread = mt.Thread(target=_dispatch_thread,\n args=[tlock])\n thread.daemon = True\n thread.start()\n thread.join(timeout=tout)\n\n with tlock:\n if thread.is_alive():\n out = None\n err = 'timeout (>%s)' % tout\n ret = 1\n res = [task, str(out), str(err), int(ret)]\n self._log.debug('put 2 result: task %s', task['uid'])\n self._result_queue.put(res)\n\n # self._log.debug('dispatch done: %s', task['uid'])\n\n except Exception as e:\n\n self._log.exception('dispatch failed')\n out = None\n err = 'dispatch failed: %s' % e\n ret = 1\n res = [task, str(out), str(err), int(ret)]\n self._log.debug('put 3 result: task %s', task['uid'])\n self._result_queue.put(res)\n\n finally:\n # if we kill the process too quickly, the result put above\n # will not make it out, thus make sure the queue is empty\n # first.\n self._result_queue.close()\n self._result_queue.join_thread()\n sys.exit(ret)\n # os.kill(os.getpid(), signal.SIGTERM)\n\n\n\n # --------------------------------------------------------------------------\n #\n def _result_watcher(self):\n\n while True:\n\n try:\n res = self._result_queue.get()\n self._log.debug('got result: %s', res)\n self._result_cb(res)\n except:\n self._log.exception('queue error')\n raise\n\n\n # --------------------------------------------------------------------------\n #\n def _result_cb(self, result):\n\n try:\n task, out, err, ret = result\n # self._log.debug('result cb: task %s', task['uid'])\n\n with self._plock:\n pid = task['pid']\n del(self._pool[pid])\n\n # free resources again for the task\n self._dealloc_task(task)\n\n res = {'req': task['uid'],\n 'out': out,\n 'err': err,\n 'ret': ret}\n\n self._res_put.put(res)\n self._prof.prof('reg_stop', uid=self._uid, msg=task['uid'])\n except:\n self._log.exception('result cb failed')\n raise\n\n\n\n # --------------------------------------------------------------------------\n #\n def _error_cb(self, error):\n\n self._log.debug('error: %s', error)\n raise RuntimeError(error)\n\n\n # --------------------------------------------------------------------------\n #\n def _control_cb(self, topic, msg):\n\n if msg['cmd'] == 'worker_terminate':\n if msg['arg']['uid'] == self._uid:\n\n self._log.debug('got terminate msg: %s: %s', topic, msg)\n\n self._term.set()\n self.stop()\n sys.exit(0)\n\n\n # --------------------------------------------------------------------------\n #\n def run(self):\n\n while not self._term.is_set():\n time.sleep(1)\n\n\n# ------------------------------------------------------------------------------\n","sub_path":"src/radical/pilot/task_overlay/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":17871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"523018541","text":"from flask import render_template, jsonify, request\nfrom application import app, conn\nfrom mixpanel import Mixpanel\n\nmp = Mixpanel(\"e25bfe00c1f58cb35f850ae58bd8378b\")\n\n@app.route(\"/\")\ndef index():\n return render_template('index.html')\n\n@app.route(\"/_petition\", methods=['GET', 'POST'])\ndef petition():\n cur = conn.cursor()\n\n if request.method == 'POST':\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n email = request.form['email']\n story = request.form.get('story')\n\n mp.people_set(email, {\n '$first_name' : first_name,\n '$last_name' : last_name,\n '$email' : email,\n 'story' : story\n })\n\n mp.track(email, \"Signed Petition\");\n\n cur.execute(\"INSERT INTO signature (first_name, last_name, email, story) VALUES (%s, %s, %s, %s);\", (first_name, last_name, email, story))\n\n\n cur.execute(\"SELECT first_name, last_name FROM signature;\")\n results = cur.fetchall()\n\n signatures = []\n for signature in results:\n signatures.append(signature[0] + ' ' + signature[1])\n\n cur.close()\n conn.commit()\n return jsonify({\"results\": signatures})\n","sub_path":"application/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"497977883","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Rapid Events Test.\n\nAuthor: Imran Matin\nEmail: imatin@ucsd.edu\n\nUsage:\n# in a new terminal\npython cSBC.py\n# in a new terminal\npython test_send_rapid_events.py\n\nTests the functionality of the cSBC when it is processing an event, and \nanother event is triggered and sent to it. Functions as the mSBC. Note, change\nthe HOST variable to be the IP of the cSBC if you are not running it on the \nlocalhost. Set the number of events to be sent back to back and how many times\nto send them.\n\"\"\"\n\n# Import socket module\nimport socket\nfrom time import sleep\n\n# The server's hostname or IP address\nHOST = \"127.0.0.1\"\n# The port used by the server\nPORT = 65431\n# Number of back to back events to be sent\nNUM_EVENTS = 5\n# Number of times to send NUM_EVENTS\nNUM_TRIALS = 3\n\n\ndef send_events(num_events):\n \"\"\"Sends num_events back to back commands to the cSBC.\"\"\"\n # Send num_events back to back event requests\n for i in range(0, num_events):\n try:\n # open a socket for this client\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n # connect to the server\n s.connect((HOST, PORT))\n print(\"Sending event\")\n # send a command to the server\n s.sendall(b\"EVENT\")\n sleep(0.05)\n except Exception as e:\n print(f\"\\n{e}\\n\")\n\n\ndef send_shutdown():\n \"\"\"Continuously sends shutdown command to cSBC until it is shutdown.\"\"\"\n while True:\n try:\n # open a socket for this client\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n # connect to the server\n s.connect((HOST, PORT))\n print(\"Sending shutdown\")\n # send a command to the server\n s.sendall(b\"SHUTDOWN\")\n break\n except Exception as e:\n print(f\"\\n{e}\\n\")\n sleep(1)\n\n\nif __name__ == \"__main__\":\n print(\"Starting Send Rapid Events Test...\")\n try:\n for i in range(0, NUM_TRIALS):\n # wait period to allow for cSBC to place collect images\n sleep(5)\n send_events(NUM_EVENTS)\n\n # Shutdown the cSBC\n send_shutdown()\n except Exception as e:\n print(f\"Exception Occurred\")\n print(\"Completed Send Rapid Events Test...\")\n","sub_path":"tests/test_send_rapid_events.py","file_name":"test_send_rapid_events.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"463479341","text":"import datajoint as dj\nimport os, re, inspect\nimport numpy as np\nfrom churchland_pipeline_python import lab, acquisition, equipment, reference, processing\nfrom churchland_pipeline_python.utilities import speedgoat, datajointutils\nfrom decimal import Decimal\nfrom functools import reduce\nfrom typing import Tuple, List\n\nDataJointTable = dj.user_tables.UserTable\n\nschema = dj.schema(dj.config.get('database.prefix') + 'churchland_analyses_pacman_acquisition')\n\n# =======\n# LEVEL 0\n# =======\n\n@schema \nclass ArmPosture(dj.Lookup):\n definition = \"\"\"\n # Arm posture\n -> lab.Monkey\n arm_posture_id: tinyint unsigned # arm posture ID number\n ---\n elbow_flexion: tinyint unsigned # elbow flexion angle (deg)\n shoulder_flexion: tinyint unsigned # shoulder flexion angle relative to coronal plane (deg)\n \"\"\"\n \n contents = [\n ['Cousteau', 0, 90, 65],\n ['Cousteau', 1, 90, 40],\n ['Cousteau', 2, 90, 75]\n ]\n\n\n@schema\nclass ConditionParams(dj.Lookup):\n \"\"\"\n Task condition parameters. Each condition consists of a unique combination of force, \n stimulation, and general target trajectory parameters. For conditions when stimulation\n was not delivered, stimulation parameters are left empty. Each condition also includes\n a set of parameters unique to the particular type of target trajectory.\n \"\"\"\n\n definition = \"\"\"\n condition_id: smallint unsigned # condition ID number\n \"\"\"\n\n class Force(dj.Part):\n definition = \"\"\"\n # Force parameters\n -> master\n force_id: smallint unsigned # force ID number\n ---\n force_max: tinyint unsigned # maximum force (N)\n force_offset: decimal(5,4) # baseline force (N)\n force_inverted: bool # whether pushing on the load cell moves PacMan up (False) or down (True) onscreen\n \"\"\"\n \n class Stim(dj.Part):\n definition = \"\"\"\n # CereStim parameters\n -> master\n stim_id: smallint unsigned # stim ID number\n ---\n -> equipment.ElectrodeArrayModel.Electrode # stim electrode\n stim_current: smallint unsigned # stim current (uA)\n stim_polarity: tinyint unsigned # cathodic (0) or anodic (1) first //TODO check this\n stim_pulses: tinyint unsigned # number of pulses in stim train\n stim_width1: smallint unsigned # first pulse duration (us)\n stim_width2: smallint unsigned # second pulse duration (us)\n stim_interphase: smallint unsigned # interphase duration (us)\n stim_frequency: smallint unsigned # stim frequency (Hz)\n \"\"\"\n\n class Target(dj.Part):\n definition = \"\"\"\n # Target force profile parameters\n -> master\n target_id: smallint unsigned # target ID number\n ---\n target_duration: decimal(5,4) # target duration (s)\n target_offset: decimal(5,4) # target offset from baseline (proportion playable window)\n target_pad_pre: decimal(5,4) # duration of \"padding\" dots preceding target force profile (s)\n target_pad_post: decimal(5,4) # duration of \"padding\" dots following target force profile (s)\n \"\"\"\n \n class Static(dj.Part):\n definition = \"\"\"\n # Static force profile parameters\n -> master.Target\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='CONVERT(ROUND(force_max*target_offset,{}), char)'.format(n_sigfigs)) \\\n .proj(condition_label='CONCAT(\"Static (\", amp, \" N)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='CONVERT(ROUND(force_max*target_offset, 4), char)') \\\n .proj(condition_rank='CONCAT(\"00_\", LPAD(amp, 8, 0))')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n\n class Ramp(dj.Part):\n definition = \"\"\"\n # Linear ramp force profile parameters\n -> master.Target\n ---\n target_amplitude: decimal(5,4) # target amplitude (proportion playable window)\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='CONVERT(ROUND(force_max*target_amplitude/target_duration,{}), char)'.format(n_sigfigs)) \\\n .proj(condition_label='CONCAT(\"Ramp (\", amp, \" N/s)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(amp='ROUND(force_max*target_amplitude/target_duration, 4)') \\\n .proj(condition_rank='CONCAT(\"10_\", LPAD(CONVERT(ABS(amp),char), 8, 0), \"_\", IF(amp>0, \"0\", \"1\"))')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n \n class Sine(dj.Part):\n definition = \"\"\"\n # Sinusoidal (single-frequency) force profile parameters\n -> master.Target\n ---\n target_amplitude: decimal(5,4) # target amplitude (proportion playable window)\n target_frequency: decimal(5,4) # target frequency (Hz)\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Force) \\\n .proj(\n amp='CONVERT(ROUND(target_amplitude*force_max,{}), char)'.format(n_sigfigs), \n freq='CONVERT(ROUND(target_frequency,{}), char)'.format(n_sigfigs)\n ) \\\n .proj(condition_label='CONCAT(\"Sine (\", amp, \" N, \", freq, \" Hz)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Target * ConditionParams.Force) \\\n .proj(\n amp='ROUND(target_amplitude*force_max, 4)', \n freq='CONVERT(ROUND(target_frequency, 4), char)'\n ) \\\n .proj(condition_rank=(\n 'CONCAT(\"20_\", LPAD(freq, 8, 0), \"_\", LPAD(CONVERT(ABS(amp),char), 8, 0), \"_\", IF(amp>0, \"0\", \"1\"))'\n ))\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n \n class Chirp(dj.Part):\n definition = \"\"\"\n # Chirp force profile parameters\n -> master.Target\n ---\n target_amplitude: decimal(5,4) # target amplitude (proportion playable window)\n target_frequency_init: decimal(5,4) # target initial frequency (Hz)\n target_frequency_final: decimal(5,4) # target final frequency (Hz)\n \"\"\"\n\n def proj_label(self, keep_self: bool=True, n_sigfigs: int=4):\n \"\"\"Project label.\"\"\"\n\n rel = (self * ConditionParams.Force) \\\n .proj(\n amp='CONVERT(ROUND(force_max*target_amplitude,{}), char)'.format(n_sigfigs),\n freq1='CONVERT(ROUND(target_frequency_init,{}), char)'.format(n_sigfigs),\n freq2='CONVERT(ROUND(target_frequency_final,{}), char)'.format(n_sigfigs),\n ) \\\n .proj(condition_label='CONCAT(\"Chirp (\", amp, \" N, \", freq1, \"-\", freq2, \" Hz)\")')\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n def proj_rank(self, keep_self: bool=True):\n \"\"\"Project ranking based on frequency and amplitude.\"\"\"\n\n rel = (self * ConditionParams.Force) \\\n .proj(\n amp='ROUND(force_max*target_amplitude, 4)',\n freq1='LPAD(CONVERT(ROUND(target_frequency_init, 4), char), 8, 0)',\n freq2='LPAD(CONVERT(ROUND(target_frequency_final, 4), char), 8, 0)',\n ) \\\n .proj(condition_rank=(\n 'CONCAT(\"30_\", freq1, \"_\", freq2, \"_\", LPAD(CONVERT(ABS(amp),char), 8, 0), \"_\", IF(amp>0, \"0\", \"1\"))'\n ))\n\n if keep_self:\n rel = self * rel\n\n return rel\n\n\n def proj_label(self, n_sigfigs: int=4):\n \"\"\"Project label in all child target tables and joins with master.\"\"\"\n\n target_children = datajointutils.get_parts(ConditionParams.Target)\n\n target_labels = [dj.U('condition_id', 'condition_label') & (x & self).proj_label(n_sigfigs=n_sigfigs) for x in target_children]\n\n labeled_self = reduce(lambda x,y: x+y, target_labels)\n\n return labeled_self\n\n\n def proj_rank(self):\n \"\"\"Project rank in all child target tables and joins with master.\"\"\"\n\n target_children = datajointutils.get_parts(ConditionParams.Target)\n\n target_ranks = [dj.U('condition_id', 'condition_rank') & (x & self).proj_rank() for x in target_children]\n\n ranked_self = reduce(lambda x,y: x+y, target_ranks)\n\n return ranked_self\n\n\n def get_common_attributes(\n self, \n table: DataJointTable, \n include: List[str]=['label','rank'],\n n_sigfigs: int=4,\n ) -> List[dict]:\n \"\"\"Fetches most common attributes in the input table.\n\n Args:\n table (DataJointTable): DataJoint table to use in the restriction\n include (List[str], optional): Attributes to project into the condition table. \n Options: ['label','rank','time','force']. Defaults to ['label','rank'].\n n_sigfigs (int, optional): Number of significant figures include in label. Defaults to 4.\n\n Returns:\n condition_attributes (List[dict]): list of attributes\n \"\"\"\n\n # count condition frequency in the table\n condition_counts = self.aggr(table, count='count(*)')\n\n # restrict by most counts\n max_count = dj.U().aggr(condition_counts, count='max(count)').fetch1('count')\n self = self & (condition_counts & 'count={}'.format(max_count)).proj()\n\n if include is not None:\n\n # project label\n self = self * ConditionParams().proj_label(n_sigfigs=n_sigfigs) if 'label' in include else self\n\n # project rank\n self = self * ConditionParams().proj_rank() if 'rank' in include else self\n\n # fetch attributes\n condition_attributes = self.fetch(as_dict=True, order_by=('condition_rank' if 'rank' in include else None))\n\n # aggregate target attributes\n target_attributes = []\n target_attributes.append('condition_time') if 'time' in include else None\n target_attributes.append('condition_force') if 'force' in include else None\n\n if any(target_attributes):\n\n # ensure matched sample rates across sessions\n behavior_recordings = acquisition.BehaviorRecording & table\n unique_sample_rates = dj.U('behavior_recording_sample_rate') & behavior_recordings\n assert len(unique_sample_rates) == 1, 'Mismatched sample rates!'\n\n fs = unique_sample_rates.fetch1('behavior_recording_sample_rate')\n\n # join condition table with secondary attributes\n for cond_attr in condition_attributes:\n\n t, f = ConditionParams.target_force_profile(cond_attr['condition_id'], fs)\n\n if 'time' in include:\n cond_attr.update(condition_time=t)\n\n if 'force' in include:\n cond_attr.update(condition_force=f)\n\n else:\n condition_attributes = self.fetch(as_dict=True)\n\n return condition_attributes\n\n \n @classmethod\n def parse_params(self, params: dict, session_date: str=''):\n \"\"\"\n Parses a dictionary constructed from a set of Speedgoat parameters (written\n on each trial) in order to extract the set of attributes associated with each\n part table of ConditionParams\n \"\"\"\n\n # force attributes\n force_attr = dict(\n force_max = params['frcMax'], \n force_offset = params['frcOff'],\n force_inverted = params['frcPol']==-1\n )\n\n cond_rel = self.Force\n\n # stimulation attributes\n if params.get('stim')==1:\n \n prog = re.compile('stim([A-Z]\\w*)')\n stim_attr = {\n 'stim_' + prog.search(k).group(1).lower(): v\n for k,v in zip(params.keys(), params.values()) \n if prog.search(k) is not None and k != 'stimDelay'\n }\n\n # replace stim electrode with electrode array model electrode key\n try:\n ephys_stimulation_rel = acquisition.EphysStimulation & {'session_date': session_date}\n electrode_model_key = (equipment.ElectrodeArrayModel & ephys_stimulation_rel).fetch1('KEY')\n\n except:\n print('Missing EphysStimulation entry for session {}'.format(session_date))\n\n else:\n # get electrode array model electrode key (convert index from matlab convention)\n electrode_idx_key = {'electrode_idx': stim_attr['stim_electrode'] - 1}\n electrode_key = (equipment.ElectrodeArrayModel.Electrode & electrode_model_key & electrode_idx_key).fetch1('KEY')\n stim_attr.update(**electrode_key)\n\n # remove stim electrode attribute\n stim_attr.pop('stim_electrode')\n\n cond_rel = cond_rel * self.Stim\n \n else:\n stim_attr = dict()\n cond_rel = cond_rel - self.Stim\n\n # target attributes\n targ_attr = dict(\n target_duration = params['duration'],\n target_offset = params['offset'][0]\n )\n\n # target pad durations\n pad_dur = [v for k,v in params.items() if re.search('padDur',k) is not None]\n if len(pad_dur) == 1:\n targ_attr.update(target_pad_pre=pad_dur[0], target_pad_post=pad_dur[0])\n\n # target type attributes\n if params['type'] == 'STA':\n\n targ_type_rel = self.Static\n targ_type_attr = dict()\n\n elif params['type'] == 'RMP':\n\n targ_type_rel = self.Ramp\n targ_type_attr = dict(\n target_amplitude = params['amplitude'][0]\n )\n\n elif params['type'] == 'SIN':\n\n targ_type_rel = self.Sine\n targ_type_attr = dict(\n target_amplitude = params['amplitude'][0],\n target_frequency = params['frequency'][0]\n )\n\n elif params['type'] == 'CHP':\n\n targ_type_rel = self.Chirp\n targ_type_attr = dict(\n target_amplitude = params['amplitude'][0],\n target_frequency_init = params['frequency'][0],\n target_frequency_final = params['frequency'][1]\n )\n\n cond_rel = cond_rel * self.Target * targ_type_rel\n\n # aggregate all parameter attributes into a dictionary\n cond_attr = dict(\n Force = force_attr,\n Stim = stim_attr,\n Target = targ_attr,\n TargetType = targ_type_attr\n )\n\n return cond_attr, cond_rel, targ_type_rel\n \n @classmethod\n def target_force_profile(self, condition_id: int, fs: int):\n\n # ensure integer frequency\n assert fs == round(fs), 'Non-integer frequency'\n fs = int(fs)\n\n # join condition table with part tables\n joined_table, part_tables = datajointutils.join_parts(self, {'condition_id': condition_id}, depth=2, context=inspect.currentframe())\n\n # condition parameters\n cond_params = joined_table.fetch1()\n\n # convert sample rate to decimal type with precision inferred from condition parameters\n fs_dec = Decimal(fs).quantize(cond_params['target_duration'])\n\n # lengths of each target region\n target_lens = (\n int(round(cond_params['target_pad_pre'] * fs_dec)),\n int(round(cond_params['target_duration'] * fs_dec)) + 1,\n int(round(cond_params['target_pad_post'] * fs_dec))\n )\n\n # time samples\n xi = (\n np.arange(-target_lens[0], 0),\n np.arange(0, target_lens[1]),\n np.arange(target_lens[1], sum(target_lens[-2:]))\n )\n\n # target force functions\n if self.Static in part_tables:\n\n force_fcn = lambda t,c: c['target_offset'] * np.zeros(t.shape)\n\n elif self.Ramp in part_tables:\n\n force_fcn = lambda t,c: (c['target_amplitude']/c['target_duration']) * t\n\n elif self.Sine in part_tables:\n\n force_fcn = lambda t,c: c['target_amplitude']/2 * (1 - np.cos(2*np.pi*c['target_frequency']*t))\n\n elif self.Chirp in part_tables:\n\n force_fcn = lambda t,c: c['target_amplitude']/2 * \\\n (1 - np.cos(2*np.pi*t * (c['target_frequency_init'] + (c['target_frequency_final']-c['target_frequency_init'])/(2*c['target_duration'])*t)))\n\n else:\n print('Unrecognized condition table')\n\n # convert condition parameters to float\n cond_params = {k:float(v) if isinstance(v,Decimal) else v for k,v in cond_params.items()}\n\n # construct target force profile\n force = np.hstack((\n force_fcn(xi[1][0]/fs, cond_params) * np.ones(target_lens[0]),\n force_fcn(xi[1]/fs, cond_params),\n force_fcn(xi[1][-1]/fs, cond_params) * np.ones(target_lens[2])\n ))\n\n # add force offset\n force += cond_params['target_offset']\n\n # scale force from screen units to Newtons\n force *= cond_params['force_max']\n\n # concatenate time samples and convert to seconds\n t = np.hstack(xi) / fs\n\n # round time to maximum temporal precision\n t = t.round(int(np.ceil(np.log10(fs))))\n\n return t, force\n\n\n@schema\nclass TaskState(dj.Lookup):\n definition = \"\"\"\n # Simulink Stateflow task state IDs and names\n task_state_id: tinyint unsigned # task state ID number\n ---\n task_state_name: varchar(255) # task state name\n \"\"\"\n \n\n# =======\n# LEVEL 1\n# =======\n \n@schema\nclass Behavior(dj.Imported):\n definition = \"\"\"\n # Behavioral data imported from Speedgoat\n -> acquisition.BehaviorRecording\n \"\"\"\n\n key_source = acquisition.BehaviorRecording\n\n class Condition(dj.Part):\n definition = \"\"\"\n # Condition data\n -> master\n -> ConditionParams\n ---\n condition_time: longblob # condition time vector (s)\n condition_force: longblob # condition force profile (N)\n \"\"\"\n\n class SaveTag(dj.Part):\n definition = \"\"\"\n # Save tags and associated notes\n -> master\n save_tag: tinyint unsigned # save tag number\n \"\"\"\n\n class Trial(dj.Part):\n definition = \"\"\"\n # Trial data\n -> master.Condition\n trial: smallint unsigned # session trial number\n ---\n -> master.SaveTag\n successful_trial: bool # whether the trial was successful\n simulation_time: longblob # task model simulation time\n task_state: longblob # task state IDs\n force_raw_online: longblob # amplified output of load cell\n force_filt_online: longblob # online (boxcar) filtered and normalized force used to control Pac-Man\n reward: longblob # TTL signal indicating the delivery of juice reward\n photobox: longblob # photobox signal\n stim = null: longblob # TTL signal indicating the delivery of a stim pulse\n \"\"\"\n\n def process_force(self, data_type='raw', apply_filter=True, keep_keys=False):\n\n # aggregate load cell parameters per session\n load_cell_params = (acquisition.Session.Hardware & {'hardware': '5lb Load Cell'}) * equipment.Hardware.Parameter & self\n\n force_capacity_per_session = dj.U(*acquisition.Session.primary_key) \\\n .aggr((load_cell_params & {'equipment_parameter': 'force capacity'}), force_capacity='equipment_parameter_value')\n\n voltage_output_per_session = dj.U(*acquisition.Session.primary_key) \\\n .aggr((load_cell_params & {'equipment_parameter': 'voltage output'}), voltage_output='equipment_parameter_value')\n\n load_cell_params_per_session = force_capacity_per_session * voltage_output_per_session\n\n # 25 ms Gaussian filter\n filter_rel = processing.Filter.Gaussian & {'sd':25e-3, 'width':4}\n\n # join trial force data with force and load cell parameters\n force_rel = self * ConditionParams.Force * load_cell_params_per_session\n\n # fetch force data\n data_type_attr = {'raw':'force_raw_online', 'filt':'force_filt_online'}\n data_attr = data_type_attr[data_type]\n force_data = force_rel \\\n .proj(data_attr, 'force_max', 'force_offset', 'force_capacity', 'voltage_output') \\\n .fetch(as_dict=True, order_by='trial')\n\n # sample rate\n fs = (acquisition.BehaviorRecording & self).fetch1('behavior_recording_sample_rate')\n\n # process trial data\n for f in force_data:\n\n f[data_attr] = f[data_attr].copy()\n\n # normalize force (V) by load cell capacity (V)\n f[data_attr] /= f['voltage_output']\n\n # convert force to proportion of maximum load cell output (N)\n f[data_attr] *= f['force_capacity']/f['force_max']\n\n # subtract baseline force (N)\n f[data_attr] -= float(f['force_offset'])\n\n # multiply force by maximum gain (N)\n f[data_attr] *= f['force_max']\n\n # filter\n if apply_filter:\n f[data_attr] = filter_rel.filt(f[data_attr], fs)\n\n # pop force parameters\n for key in ['force_id', 'force_max', 'force_offset', 'force_capacity', 'voltage_output']:\n [f.pop(key) for f in force_data]\n\n # limit output to force signal\n if not keep_keys:\n force_data = np.array([f[data_attr] for f in force_data])\n\n return force_data \n \n def make(self, key):\n\n self.insert1(key)\n\n if (acquisition.Session.Hardware & key & {'hardware': 'Speedgoat'}):\n\n # behavior sample rate\n fs = int((acquisition.BehaviorRecording & key).fetch1('behavior_recording_sample_rate'))\n\n # summary file path\n summary_file_path = (acquisition.BehaviorRecording.File & key & {'behavior_file_extension': 'summary'})\\\n .proj_file_path().fetch1('behavior_file_path')\n\n # ensure local path\n summary_file_path = reference.EngramTier.ensure_local(summary_file_path)\n\n # read summary file\n summary = speedgoat.read_task_states(summary_file_path)\n\n # update task states\n TaskState.insert(summary, skip_duplicates=True)\n\n # parameter and data file paths\n params_file_paths = (acquisition.BehaviorRecording.File & key & {'behavior_file_extension': 'params'})\\\n .proj_file_path().fetch('behavior_file_path')\n\n data_file_paths = (acquisition.BehaviorRecording.File & key & {'behavior_file_extension': 'data'})\\\n .proj_file_path().fetch('behavior_file_path')\n\n # ensure local paths\n params_file_paths = [reference.EngramTier.ensure_local(pth) for pth in params_file_paths]\n data_file_paths = [reference.EngramTier.ensure_local(pth) for pth in data_file_paths]\n\n # populate conditions from parameter files\n for params_path in params_file_paths:\n\n # trial number\n trial = re.search(r'beh_(\\d*)', params_path).group(1)\n\n # ensure matching data file exists\n if params_path.replace('params','data') not in data_file_paths:\n\n print('Missing data file for trial {}'.format(trial))\n\n else:\n # read params file\n params = speedgoat.read_trial_params(params_path)\n\n if not params:\n continue\n\n # extract condition attributes from params file\n cond_attr, cond_rel, targ_type_rel = ConditionParams.parse_params(params, key['session_date'])\n\n # aggregate condition part table parameters into a single dictionary\n all_cond_attr = {k: v for d in list(cond_attr.values()) for k, v in d.items()}\n \n # insert new condition if none exists\n if not(cond_rel & all_cond_attr):\n\n # insert condition table\n new_cond_id = datajointutils.next_unique_int(ConditionParams, 'condition_id')\n cond_key = {'condition_id': new_cond_id}\n\n ConditionParams.insert1(cond_key)\n\n # insert Force, Stim, and Target tables\n for cond_part_name in ['Force', 'Stim', 'Target']:\n\n # attributes for part table\n cond_part_attr = cond_attr[cond_part_name]\n\n if not(cond_part_attr):\n continue\n\n cond_part_rel = getattr(ConditionParams, cond_part_name)\n cond_part_id = cond_part_name.lower() + '_id'\n\n if not(cond_part_rel & cond_part_attr):\n\n cond_part_attr[cond_part_id] = datajointutils.next_unique_int(cond_part_rel, cond_part_id)\n \n else:\n cond_part_attr[cond_part_id] = (cond_part_rel & cond_part_attr).fetch(cond_part_id, limit=1)[0]\n\n cond_part_rel.insert1(dict(**cond_key, **cond_part_attr))\n\n # insert target type table\n targ_type_rel.insert1(dict(**cond_key, **cond_attr['TargetType'], target_id=cond_attr['Target']['target_id']))\n \n\n # populate trials from data files\n success_state = (TaskState() & 'task_state_name=\"Success\"').fetch1('task_state_id')\n\n for data_path in data_file_paths:\n\n # trial number\n trial = int(re.search(r'beh_(\\d*)',data_path).group(1))\n\n # find matching parameters file\n try:\n params_path = next(filter(lambda f: data_path.replace('data','params')==f, params_file_paths))\n except StopIteration:\n print('Missing parameters file for trial {}'.format(trial))\n else:\n # convert params to condition keys\n params = speedgoat.read_trial_params(params_path)\n\n if not params:\n continue\n\n cond_attr, cond_rel, targ_type_rel = ConditionParams.parse_params(params, key['session_date'])\n\n # read data\n data = speedgoat.read_trial_data(data_path, success_state, fs)\n\n if not data:\n continue\n \n # aggregate condition part table parameters into a single dictionary\n all_cond_attr = {k: v for d in list(cond_attr.values()) for k, v in d.items()}\n\n # insert condition data\n cond_id = (cond_rel & all_cond_attr).fetch1('condition_id')\n cond_key = dict(**key, condition_id=cond_id)\n if not(self.Condition & cond_key):\n t, force = ConditionParams.target_force_profile(cond_id, fs)\n cond_key.update(condition_time=t, condition_force=force)\n self.Condition.insert1(cond_key, allow_direct_insert=True)\n\n # insert save tag key\n save_tag_key = dict(**key, save_tag=params['saveTag'])\n if not (self.SaveTag & save_tag_key):\n self.SaveTag.insert1(save_tag_key)\n\n # insert trial data\n trial_key = dict(**key, trial=trial, condition_id=cond_id, **data, save_tag=params['saveTag'])\n self.Trial.insert1(trial_key)\n\n else: \n print('Unrecognized task controller')\n return None","sub_path":"pacman_pipeline_python/pacman_acquisition.py","file_name":"pacman_acquisition.py","file_ext":"py","file_size_in_byte":29384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"57476203","text":"from __future__ import (print_function,\n unicode_literals,\n division)\nfrom future.builtins import str, open, range, dict\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nfrom spikevo import *\nfrom spikevo.pynn_transforms import PyNNAL\nimport argparse\nfrom pprint import pprint\n\nbackend = 'genn'\nneuron_class = 'IF_cond_exp'\n# heidelberg's brainscales seems to like these params\ne_rev = 92 #mV\n# e_rev = 500.0 #mV\n\nbase_params = {\n 'cm': 0.09, # nF\n 'v_reset': -70., # mV\n 'v_rest': -65., # mV\n 'v_thresh': -55., # mV\n # 'e_rev_I': -e_rev, #mV\n # 'e_rev_E': 0.,#e_rev, #mV\n 'tau_m': 10., # ms\n 'tau_refrac': 2.0, # ms\n 'tau_syn_E': 1.0, # ms\n 'tau_syn_I': 5.0, # ms\n\n}\n\nbase_params['e_rev_I'] = -e_rev\nbase_params['e_rev_E'] = 0.0\n\ntimestep = 0.1\nmax_w = 0.01\nstart_w = max_w / 2.0\n\ntau_plus = 5.0\ntau_minus = 10.0\na_plus = 0.01\na_minus = 0.005\ndelays = range(1, 11)\n\nstart_dt, num_dt = -15, 30\nsim_time = np.round(1.5 * num_dt)\nstart_t = sim_time - num_dt\ntrigger_t = start_t + (start_dt + num_dt//2)\nnum_neurons = num_dt\n\npynnx = PyNNAL(backend)\npynnx._sim.setup(timestep=timestep, min_delay=timestep,\n backend='SingleThreadedCPU')\n\npprojs = {}\nfor delay in delays:\n\n a_plus_local = a_plus if delay == 1.0 else -a_plus\n a_minus_local = a_minus if delay == 1.0 else -a_minus\n\n projs = {}\n for dt in range(start_dt, start_dt+num_dt, 1):\n pre_spike_times = [[trigger_t + dt]]\n trigger_spike_times = [[trigger_t]]\n\n trigger = pynnx.Pop(1, 'SpikeSourceArray',\n {'spike_times': trigger_spike_times})\n\n post = pynnx.Pop(1, neuron_class, base_params)\n pynnx.set_recording(post, 'spikes')\n\n pre = pynnx.Pop(1, 'SpikeSourceArray',\n {'spike_times': pre_spike_times})\n\n tr2post = pynnx.Proj(trigger, post, 'OneToOneConnector', 0.1, 1.0, label='trigger connection')\n\n\n stdp = {\n 'timing_dependence': {\n 'name': 'SpikePairRule',\n 'params': {'tau_plus': tau_plus,\n 'tau_minus': tau_minus,\n # 'tau_minus': 33.7,\n },\n },\n 'weight_dependence': {\n 'name':'AdditiveWeightDependence',\n # 'name':'MultiplicativeWeightDependence',\n 'params': {\n # 'w_min': (static_w['KC to DN'])/10.0,\n 'w_min': 0.0,\n 'w_max': max_w,\n # 'w_max': (static_w['KC to DN']),\n 'A_plus': a_plus_local,\n 'A_minus': a_minus_local,\n # 'A_plus': max_w * a_plus,\n # 'A_minus': max_w * a_minus,\n },\n }\n }\n\n pre2post = pynnx.Proj(pre, post, 'AllToAllConnector', start_w, delay,\n stdp=stdp, label='plastic connection')\n\n projs[dt] = pre2post\n\n pprojs[delay] = projs\n\npynnx.run(sim_time)\nexperiments = {}\nfor delay in pprojs:\n dt_dw = {}\n for dt in pprojs[delay]:\n dt_dw[dt] = (pynnx.get_weights(pprojs[delay][dt])[0,0] - start_w) / max_w\n experiments[delay] = dt_dw\n\npynnx.end()\n\n\n\nplt.figure()\nax = plt.subplot()\nplt.axvline(0, linestyle='--', color='gray')\nplt.axhline(0, linestyle='--', color='gray')\n\nfor delay in experiments:\n dt_dw = experiments[delay]\n dts = sorted(dt_dw.keys())\n dws = [dt_dw[dt] for dt in dts]\n plt.plot(dts, dws, label=delay)\n\nmax_dw = np.max(np.abs(dws)) * 1.5\nax.set_ylim(-max_dw, max_dw)\nax.set_xlabel(r'$\\Delta t = t_{pre} - t_{post}$ [ms]')\nax.set_ylabel(r'$\\Delta w $')\nplt.legend()\nplt.grid()\nplt.show()\n\nnp.savez_compressed('delay_experiments.npz', experiments=experiments)","sub_path":"codebase/misc_tests/stdp_curve.py","file_name":"stdp_curve.py","file_ext":"py","file_size_in_byte":3831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"255638872","text":"#!/usr/bin/env python3\n\nfrom math import inf, sqrt\nfrom uuid import uuid4\n\nfrom ev3dev.ev3 import Sound\nimport paho.mqtt.client as mqtt\n\nfrom communication import Communication\nfrom motioncontrol import MotionControl\nfrom planet import Direction, Planet\n\nADJACENCIES = {\n Direction.NORTH: [(-1, 1), (0, 1), (1, 1)],\n Direction.EAST: [(1, 1), (1, 0), (1, -1)],\n Direction.SOUTH: [(1, -1), (0, -1), (-1, -1)],\n Direction.WEST: [(-1, -1), (-1, 0), (-1, 1)]\n}\n\ndef play_daisy():\n with open('daisy.txt') as f:\n notes = [line.rstrip('\\n') for line in f.readlines()]\n notes = ' '.join(notes)\n s = Sound()\n s.beep(notes)\n\nclass RobotBrain:\n def __init__(self, testplanet=None):\n\n mqtt_client = mqtt.Client(\n client_id=str(uuid4()),\n clean_session=False,\n protocol=mqtt.MQTTv31)\n\n self.communication = Communication(mqtt_client, planet=testplanet)\n\n self.motioncontrol = MotionControl()\n self.planet = Planet()\n\n self.visited = set()\n self.current_path = []\n self.target = None\n self.use_target_heuristic = False\n\n # calibrate and drive to starting node\n self.motioncontrol.calibrate()\n start_color, _, _ = self.motioncontrol.follow()\n\n start = self.communication.send_ready()\n\n if __debug__:\n print(\"received starting node from server: {}\".format(start))\n\n self.planet.set_starting_node(start, start_color)\n\n self.current_node = start\n self.visited.add(start)\n self.motioncontrol.update_position(Planet.from_node(start))\n\n # discover exits\n self.motioncontrol.update_rotation(Planet.from_direction(Direction.NORTH))\n exits, self.current_direction = self.motioncontrol.scan_paths()\n\n if __debug__:\n print('discovered exits: {}'.format(exits))\n\n for exit in exits:\n if exit == Direction.SOUTH:\n continue\n self.planet.add_undiscovered_exit(self.current_node + (exit,))\n\n def explore(self):\n while True:\n if __debug__:\n print(\"exploring, current pose is: {}\".format(\n self.current_node + (self.current_direction,)))\n\n # follow path to next node\n start = self.current_node + (self.current_direction,)\n reached = self.motioncontrol.follow()\n\n destination = None\n blocked = False\n if reached is None:\n destination = start\n blocked = True\n\n else:\n color, position, rotation = reached\n node = self.planet.to_node(position, color)\n direction = self.planet.to_direction(rotation)\n destination = node + (Planet.invert_direction(direction),)\n\n if __debug__:\n print(\"reached node at: {}\".format(destination))\n\n destination, weight, blocked, paths, target = \\\n self.communication.send_path(start, destination, blocked)\n\n if __debug__:\n print(\"received path response from server:\")\n print(\" destination: {}:\".format(destination))\n print(\" weight: {}\".format(weight))\n print(\" paths: {}\".format(paths))\n print(\" target: {}\".format(target))\n\n self.planet.add_path(start, destination, weight)\n self.planet.mark_exit_discovered(start)\n self.planet.mark_exit_discovered(destination)\n\n if target:\n self.target=target\n\n if __debug__:\n print(\"new target!\")\n\n received_paths = False\n if paths:\n received_paths = True\n for start, dest, weight in paths:\n self.planet.add_path(start, dest, weight)\n self.planet.mark_exit_discovered(start)\n self.planet.mark_exit_discovered(dest)\n\n x, y, incoming_direction = destination\n direction = Planet.invert_direction(incoming_direction)\n\n self.current_node = (x, y)\n self.current_direction = direction\n\n if self.target:\n target_x, target_y = self.target\n self.use_target_heuristic = sqrt((target_x - x)**2 + \\\n (target_y - y)**2) < 15\n\n if not self.use_target_heuristic and __debug__:\n print(\"WARNING: ignoring target heuristics!\")\n\n self.motioncontrol.update_position(\n Planet.from_node(self.current_node))\n self.motioncontrol.update_rotation(\n Planet.from_direction(self.current_direction))\n\n already_visited = True\n if self.current_node not in self.visited:\n already_visited = False\n\n exits, direction = self.motioncontrol.scan_paths()\n self.current_direction = direction\n\n self.motioncontrol.update_rotation(Planet.from_direction(\n self.current_direction))\n\n if __debug__:\n print('discovered exits: {}'.format(exits))\n\n for exit in exits:\n if exit == incoming_direction:\n continue\n self.planet.add_undiscovered_exit(self.current_node + (exit,))\n\n self.visited.add(self.current_node)\n\n current_x, current_y = self.current_node\n known_paths = self.planet.get_paths()\n undiscovered = self.planet.get_undiscovered_exits()\n\n # exploration completed\n target_path_known = self.target and \\\n self.planet.shortest_path(self.current_node, self.target)\n\n if not undiscovered and not target_path_known:\n self.communication.send_exploration_completed(\"...\")\n\n if __debug__:\n print(\"completey discovered planet!\")\n\n break\n\n # progress towards target\n if self.target is not None:\n if __debug__:\n print(\"There is a target...\")\n\n if self.current_node == self.target:\n self.communication.send_target_reached(\"...\")\n\n if __debug__:\n print(\"finished!\")\n\n break\n\n shortest_path = \\\n self.planet.shortest_path(self.current_node, self.target)\n if shortest_path:\n if __debug__:\n print(\"found shortest path to target\")\n\n self.current_path = shortest_path\n elif self.use_target_heuristic:\n if __debug__:\n print(\"no known path to target, finding path to nearest projected node\")\n\n target_x, target_y = self.target\n\n best_node = None\n best_node_distance = inf\n best_exit = None\n best_path = None\n\n reachable_nodes = \\\n self.planet.get_connected_known_nodes(self.current_node)\n\n for node in reachable_nodes:\n shortest_path = self.planet.shortest_path(\n self.current_node, node)\n\n if shortest_path is None:\n continue\n\n exits = self.planet.get_undiscovered_exits().get(node)\n if not exits:\n continue\n\n node_x, node_y = node\n\n for exit in exits:\n projected_x, projected_y = ADJACENCIES[exit][1]\n\n projected_node_distance = \\\n sqrt((target_x - (node_x + projected_x))**2 + \\\n (target_y - (node_y + projected_y))**2)\n\n if projected_node_distance > best_node_distance:\n continue\n\n if projected_node_distance < best_node_distance or \\\n len(shortest_path) < len(best_path):\n\n best_node = node\n best_node_distance = projected_node_distance\n best_exit = exit\n best_path = shortest_path + [(node_x, node_y, exit)]\n\n self.current_path = best_path\n\n # current node not yet discovered\n if self.current_node in undiscovered and \\\n (not self.use_target_heuristic or target is None):\n\n if __debug__:\n print(\"current node not discovered\")\n\n undiscovered_exits = undiscovered[self.current_node]\n\n # prefer paths that are likely to lead to already discovered nodes\n preferred_exit_heuristics = {}\n\n for exit in undiscovered_exits:\n preferred_exit_heuristics[exit] = 0\n for delta_x, delta_y in ADJACENCIES[exit]:\n if (current_x + delta_x, current_y + delta_y) in known_paths:\n preferred_exit_heuristics[exit] += 1\n\n if __debug__:\n print(\"exit heuristics: {}\".format(preferred_exit_heuristics))\n\n while True:\n preferred_exit = None\n max_heuristic = -1\n for direction in [Direction.NORTH, Direction.EAST,\n Direction.SOUTH, Direction.WEST]:\n\n heuristic = preferred_exit_heuristics.get(direction)\n if heuristic is None:\n continue\n\n if heuristic > max_heuristic:\n preferred_exit = direction\n max_heuristic = heuristic\n\n if __debug__:\n print(\"choosing exit: \" + str(preferred_exit))\n\n turn = self.motioncontrol.turn_to(preferred_exit,\n sweep=already_visited)\n if turn == preferred_exit:\n break\n\n if __debug__:\n warn = \"WARNING: exit {} not found, marking as discovered\"\n print(warn.format(preferred_exit))\n\n self.planet.mark_exit_discovered(\n self.current_node + (preferred_exit,))\n preferred_exit_heuristics.pop(preferred_exit)\n\n self.current_direction = preferred_exit\n self.motioncontrol.update_rotation(\n Planet.from_direction(self.current_direction))\n\n continue\n\n elif (not self.current_path or received_paths) and \\\n (not self.use_target_heuristic or self.target is None):\n\n if __debug__:\n print(\"current node discovered, looking for nearest undiscovered node\")\n\n # find nearest node with unexplored exit\n nearest_undiscovered_node = None\n nearest_undiscovered_node_path = None\n nearest_undiscovered_node_distance = inf\n\n for node in undiscovered:\n path = self.planet.shortest_path(self.current_node, node)\n if not path:\n continue\n\n pathlen = len(path)\n\n if pathlen < nearest_undiscovered_node_distance:\n nearest_undiscovered_node = node\n nearest_undiscovered_node_path = path\n nearest_undiscovered_node_distance = pathlen\n\n self.current_path = nearest_undiscovered_node_path\n\n if __debug__:\n dbg = \"backtracking to nearest undiscovered node {} on path {}\"\n print(dbg.format(nearest_undiscovered_node,\n nearest_undiscovered_node_path))\n\n if __debug__:\n print(\"following path {}\".format(self.current_path))\n\n _, _, exit = self.current_path[0]\n self.current_path = self.current_path[1:]\n\n while self.motioncontrol.turn_to(exit, sweep=already_visited) != exit:\n pass\n\n self.current_direction = exit\n self.motioncontrol.update_rotation(\n Planet.from_direction(self.current_direction))\n\n play_daisy()\n","sub_path":"src/robotbrain.py","file_name":"robotbrain.py","file_ext":"py","file_size_in_byte":12743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"292581692","text":"import pandas as pd\r\nimport numpy as np\r\nimport time\r\nimport pickle\r\nimport sys\r\nimport os\r\nfrom datetime import timedelta\r\nfrom array import array\r\n\r\ndef init():\r\n global fldr\r\n try:\r\n from statsmodels.tsa.stattools import acf, pacf\r\n from statsmodels.tsa.arima_model import ARIMA\r\n except ImportError:\r\n import pip\r\n package_name='statsmodels'\r\n pip.main(['install', package_name])\r\n from statsmodels.tsa.stattools import acf, pacf\r\n from statsmodels.tsa.arima_model import ARIMA\r\n # serialize the model on disk in the special 'outputs' folder\r\n print (\"Read the model from model.pkl in directory \", fldr)\r\n \r\n fl = open(fldr+\"model.pkl\", 'rb')\r\n global ar_res\r\n ar_res = pickle.load( fl)\r\n fl.close()\r\n\r\ndef PrepareFcstData(strt, stp):\r\n df = pd.DataFrame(np.zeros(shape=(stp-strt+1, 1)))\r\n fcst = pd.DataFrame(np.zeros(shape=(stp-strt+1, 1)))\r\n\r\n for idx in range(strt, strt + len(df)):\r\n d = timedelta(hours=idx)\r\n df.iloc[idx - strt, 0] = pd.datetime(2017, 6, 19, 0, 0, 0, 0) + d\r\n # print(df)\r\n fcst.index = df.iloc[:, 0]\r\n fcst.index.name = 'time'\r\n fcst.columns = ['forecast']\r\n return fcst\r\n\r\ndef predictForecast(strt, stp):\r\n import json\r\n import numpy\r\n fst = PrepareFcstData(strt,stp)\r\n\r\n global ar_res\r\n fst['forecast']=ar_res.predict(start = strt-1, end= stp, dynamic= True)\r\n return fst\r\n\r\ndef run(inputString):\r\n import json\r\n import numpy\r\n try:\r\n input_list=json.loads(inputString)\r\n except ValueError:\r\n return 'Bad input: expecting a json encoded list of lists.'\r\n strt = int(input_list[0][\"start\"])\r\n stp = int(input_list[1][\"stop\"])\r\n print(\"start:\",strt)\r\n print(\"stop:\",stp)\r\n \r\n pred = predictForecast(strt,stp)\r\n return str(pred)\r\n\r\nglobal fldr\r\nfldr=\"\"\r\nif __name__ == \"__main__\":\r\n \r\n fldr = os.environ['AZUREML_NATIVE_SHARE_DIRECTORY'] + \"outputs/\"\r\n # predict future values\r\n print ('Python version: {}'.format(sys.version))\r\n print('Pandas version:',pd.__version__)\r\n print ()\r\n init()\r\n #f = run('{\"input\":[{\"start\":\"127\"},{\"stop\":\"151\"}]}')\r\n f = run('[{\"start\":\"127\"},{\"stop\":\"151\"}]')\r\n\r\n print(\"Forecast Values:\")\r\n print(f)\r\n\r\n\r\n","sub_path":"score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"123567607","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\nfrom newspapers.utils import find_one_tag, form_article_id\n\n\ndef check_economist_url(url, logger):\n parts = url.split('/')\n if parts[3] == '1843':\n return False\n\n # page not found error\n if url == 'https://www.economist.com/prospero/2020/01/09/how-to-save-culture-from-climate-change':\n return False\n\n import re\n pattern = re.compile('\\/\\d{4}\\/\\d{2}\\/\\d{2}\\/')\n match = pattern.findall(url)\n if len(match) == 1:\n return True\n return False\n\n\ndef clean_string(string, unwanted):\n for unw in unwanted:\n string = string.replace(unw, '')\n return string\n\n\ndef parse_economist_url(url, logger=None):\n import time\n # error without the sleep\n time.sleep(1)\n response = requests.get(url)\n html = response.text\n soup = BeautifulSoup(html, features=\"html5lib\")\n\n headline = find_one_tag(soup, 'span', {'class': 'article__headline', 'itemprop': 'headline'}).text\n\n body = find_one_tag(soup, 'div', {\n 'itemprop': 'text',\n 'class': 'ds-layout-grid ds-layout-grid--edged layout-article-body'\n })\n body = body.findAll('p')\n body = ''.join(p.text for p in body)\n\n unwanted = [\n 'For more coverage of climate change, register for The Climate Issue, our fortnightly newsletter, or visit our climate-change hub',\n 'Sign up to our new fortnightly climate-change newsletter hereThis article appeared in the Leaders section of the print edition under the headline \"The climate issue\"'\n ]\n body = clean_string(body, unwanted)\n\n app = find_one_tag(soup, 'script', {'type': 'application/json'})\n app = json.loads(app.text)\n meta = app['props']['pageProps']['metadata']\n published = meta['datePublished']\n modified = meta['dateModified']\n\n return {\n \"newspaper_id\": \"economist\",\n 'body': body,\n 'article_id': form_article_id(url),\n 'headline': headline,\n 'article_url': url,\n 'html': html,\n 'date_published': published,\n 'date_modified': modified,\n }\n\n\neconomist = {\n \"newspaper_id\": \"economist\",\n \"newspaper\": \"The Economist\",\n \"newspaper_url\": \"economist.com\",\n \"checker\": check_economist_url,\n \"parser\": parse_economist_url\n}\n\n\nif __name__ == '__main__':\n url = 'https://www.economist.com/briefing/2010/11/25/facing-the-consequences'\n url = 'https://www.economist.com/books-and-arts/2019/05/16/climate-change-strikes-the-venice-biennale'\n response = requests.get(url)\n html = response.text\n soup = BeautifulSoup(html, features=\"html5lib\")\n\n headline = find_one_tag(soup, 'span', {'class': 'article__headline', 'itemprop': 'headline'}).text\n","sub_path":"newspapers/economist.py","file_name":"economist.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"623883936","text":"import time\r\n\r\nimport pymysql\r\n\r\n\r\ndb = pymysql.connect(\"192.168.1.151\",\"root\",\"123456\",\"db_ny_ctd\",charset='utf8')\r\ncursor = db.cursor()\r\n\r\ndb2 = pymysql.connect(\"localhost\",\"root\",\"123456\",\"test\",charset='utf8')\r\ncursor2 = db2.cursor()\r\n\r\nwhile True:\r\n\r\n get_new_record_sql = 'select max(endtime) from t_c_cti_callrecord'\r\n cursor2.execute(get_new_record_sql)\r\n new_row = cursor2.fetchone()\r\n new_time = new_row[0]\r\n print(new_time)\r\n\r\n sql = \"SELECT * FROM t_c_cti_callrecord \\\r\n WHERE endtime > '%s'\" % (new_time)\r\n try:\r\n cursor.execute(sql)\r\n results = cursor.fetchall()\r\n for row in results:\r\n\r\n insert_sql = \"INSERT INTO t_c_cti_callrecord \\\r\n VALUES('%s', '%s', '%d', '%s', '%s','%d', '%d', '%d', '%d', '%s','%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s','%s', '%d', '%d', '%d', '%d','%d', '%d', '%s', '%s', '%d','%s', '%s', '%s', '%d', '%d','%s', '%s', '%s', '%s', '%s', '%d','%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s', '%s','%s', '%s', '%s', '%s' )\" % \\\r\n (row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12],row[13],row[14],row[15],row[16],row[17],row[18],row[19],row[20],row[21],row[22],row[23],row[24],row[25],row[26],row[27],row[28],row[29],row[30],row[31],row[32],row[33],row[34],row[35],row[36],row[37],row[38],row[39],row[40],row[41],row[42],row[43],row[44],row[45],row[46],row[47],row[48],row[49],row[50],row[51],row[52],row[53],row[54])\r\n\r\n cursor2.execute(insert_sql)\r\n db2.commit()\r\n except () as e:\r\n print (e)\r\n time.sleep(1)\r\n\r\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"607618083","text":"import random\nfrom termcolor import colored \n\ndef NumRand():\n random.seed()\n x=random.randint(0,100)\n \n return x\n\ndef NumRandPer(x,y):\n random.seed()\n x=random.randint(x,y)\n \n return x\n\ndef depuraNum():\n nValido=False\n while (nValido==False):\n try:\n n= int (input(colored(\"ingrese el numero deseado: \\n\",\"yellow\")))\n nValido=True\n except ValueError:\n print(colored(\"el cararcter ingresado no es un numero ingrese el valor nuevamente \\n\",\"red\"))\n nValido=False\n return n \n\ndef finalizar():\n print(colored(\"Programa finalizado\",\"red\") )\n\n\n\ndef menu():\n print(colored(\"Ejercicio numero aleatorio \\n\",\"yellow\"))\n print(colored(\"que deseas hacer? \\n\",\"yellow\"))\n print(colored(\"1. numero aleatorio entre 0 y 100\\n\",\"green\"))\n print(colored(\"2. numero aleatorio valores personalizados\\n\",\"green\"))\n print(colored(\"3. salir\\n\",\"red\"))\n respuesta= input(\"\")\n return respuesta\n\n\n\ndef continuar():\n input(colored(\"Presiona Enter para continuar...\",\"yellow\"))\n\ndef main():\n salida=False\n while (salida==False):\n opcion=menu()\n if (opcion==\"1\"):\n print(colored(\"su numero aleatorio es: \",\"yellow\") )\n print(colored(str(NumRand()) + \"\\n\",\"green\") )\n continuar()\n \n elif(opcion==\"2\"):\n print(colored(\"rango minimo\\n\", \"yellow\") )\n x = depuraNum()\n print(colored(\"rango maximo\\n\", \"yellow\") )\n y = depuraNum()\n print(colored(\"su numero aleatorio entre \" + str(x) +\" y \" + str(y) + \"es: \",\"yellow\") )\n print(colored(str(NumRandPer(x,y)) + \"\\n\",\"green\") )\n continuar()\n elif(opcion==\"3\"): \n salida=True\n print(colored(\"Gracias por usar el programa\\n\",\"red\") )\n finalizar()\n else:\n print(colored(\"No ha ingresado una opcion correcta\\n\",\"red\") )\n continuar() \n\nmain() ","sub_path":"py6_retos2/reto5.py","file_name":"reto5.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"168673813","text":"\"\"\"\nCompute the Celestial location radius RMS corresponding to the PRD requirement\nof 1.0 arcsec.\n\"\"\"\n\nimport asciitable\nimport numpy as np\nfrom Chandra.Time import DateTime\n\n# Read using Tab instead of Rdb because the RDB 2nd-line header is wrong.\ndat = asciitable.read(\n \"/proj/sot/ska/data/astromon/standard_xcorr/plot.rdb\",\n Reader=asciitable.Tab,\n data_start=2,\n guess=False,\n)\nok = dat[\"status_id\"] == \"\"\ndat = dat[ok]\n\nstart = DateTime() - (5 * 365)\nstop = DateTime()\nok = (DateTime(dat[\"date_obs\"]).date > start.date) & (\n DateTime(dat[\"date_obs\"]).date < stop.date\n)\nprint(\"{} to {}\".format(start.date, stop.date))\n\nprint(\"N srcs: {}\".format(len(dat[ok])))\nprint(\"RMS radius {}\".format(np.sqrt(np.mean(dat[ok][\"dr\"] ** 2))))\nprint(\"90 percentile radius = {} arcsec\".format(np.percentile(dat[ok][\"dr\"], 90)))\nprint(\"99 percentile radius = {} arcsec\".format(np.percentile(dat[ok][\"dr\"], 99)))\n\nfor detector in [\"ACIS-S\", \"ACIS-I\", \"HRC-S\", \"HRC-I\"]:\n det = dat[ok][\"detector\"] == detector\n print(\n \"90 percentile radius for {} is {} arcsec\".format(\n detector, np.percentile(dat[ok][\"dr\"][det], 90)\n )\n )\n\nprint(\n \"{:.1f} percent outside a 1 arcsec radius\".format(\n 100.0 * np.count_nonzero(dat[ok][\"dr\"] > 1.0) / len(dat[ok][\"dr\"])\n )\n)\n\n\nprint(\"Worst case is {:.1f}\".format(np.max(dat[ok][\"dr\"])))\n","sub_path":"legacy/calc_rms.py","file_name":"calc_rms.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"522969284","text":"#!/usr/bin/env python\n\"\"\"Main script for running the pyinseq package.\"\"\"\n\nimport argparse\nimport os\nfrom shutil import copyfile\nimport sys\nimport yaml\nfrom demultiplex import sample_prep, demultiplex_fastq, trim_fastq\nfrom gbkconvert import gbk2fna, gbk2ftt\nfrom mapReads import bowtieBuild, bowtieMap, parseBowtie\nfrom processMapping import mapSites, mapGenes, buildGeneTable\nfrom utils import convert_to_filename, createExperimentDirectories\n\ndef parseArgs(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input',\n help='input Illumina reads file',\n required=True)\n parser.add_argument('-s', '--samples',\n help='sample list with barcodes',\n required=True)\n parser.add_argument('-e', '--experiment',\n help='experiment name (no spaces or special characters)',\n required=True)\n parser.add_argument('-g', '--genome',\n help='genome in GenBank format (one concatenated file for multiple contigs/chromosomes)',\n required=True)\n parser.add_argument('-d', '--disruption',\n help='fraction of gene disrupted (0.0 - 1.0)',\n default=1.0)\n parser.add_argument('--nobarcodes',\n help='barcodes have already been removed from the samples; \\\n -i should list the directory with filenames (.fastq.gz) \\\n corresponding to the sample names',\n action='store_true',\n default=False)\n parser.add_argument('--keepall',\n help='keep all intermediate files generated \\\n (warning: large size!)',\n action='store_true',\n default=False)\n return parser.parse_args(args)\n\n\nclass cd:\n \"\"\"Context manager to change to the specified directory then back.\"\"\"\n def __init__(self, newPath):\n self.newPath = os.path.expanduser(newPath)\n\n def __enter__(self):\n self.savedPath = os.getcwd()\n os.chdir(self.newPath)\n\n def __exit__(self, etype, value, traceback):\n os.chdir(self.savedPath)\n\n\ndef pipeline_organize(samples):\n\n print('\\n===================='\\\n '\\n* Setting up *'\\\n '\\n====================\\n')\n\n # Create the directory struture based on the experiment name\n createExperimentDirectories(experiment)\n\n # Note: barcode length hardcoded at 4 bp here\n barcode_qc, barcode_length = True, 4\n\n # if nobarcodes:\n # barcode_qc, barcode_length = False, 0\n\n # TODO(For rerunning samples, modify samplesDict construction; read in a YAML file?)\n\n # TODO(Modify as needed for already-demultiplexed samples)\n\n # samples = OrderedDict([('name1', {'name': 'name1', 'barcode': 'barcode1'}),\n # ('name2', {'name': 'name2', 'barcode': 'barcode2'})])\n global samplesDict\n samplesDict = sample_prep(samples, barcode_qc)\n\n # add 'demultiplexedPath' and 'trimmedPath' fields for each sample\n for sample in samplesDict:\n demultiplexedPath = 'results/{experiment}/raw_data/{sampleName}.fastq.gz'.format(\n experiment=experiment,\n sampleName=samplesDict[sample]['name'])\n trimmedPath = 'results/{experiment}/{sampleName}_trimmed.fastq'.format(\n experiment=experiment,\n sampleName=samplesDict[sample]['name'])\n samplesDict[sample]['demultiplexedPath'] = demultiplexedPath\n samplesDict[sample]['trimmedPath'] = trimmedPath\n\n print('\\nProcessing {} total samples:'.format(len(samplesDict)))\n for s in samplesDict:\n print('{0}\\n barcode: {1}'.format(s, samplesDict[s]['barcode']))\n samples_yaml = 'results/{}/samples.yml'.format(experiment)\n with open(samples_yaml, 'w') as fo:\n fo.write(yaml.dump(samplesDict, default_flow_style=False))\n print('Sample details written to {}'.format(samples_yaml))\n\ndef pipeline_no_demultiplex(reads):\n # copy reads files into the experiment/raw_data directory\n for sample in samplesDict:\n # makes sure the reads directory has a trailing slash\n if reads[-1] != '/':\n reads += '/'\n src = reads + sample + '.fastq.gz'\n dst = samplesDict[sample]['demultiplexedPath']\n copyfile(src, dst)\n\ndef pipeline_demultiplex(reads):\n\n print('\\n===================='\\\n '\\n* Demultiplexing *'\\\n '\\n====================\\n')\n\n # demultiplex based on barcodes defined in the sample file\n print('\\nDemultiplexing from input file:\\n {}'.format(reads))\n nreads = demultiplex_fastq(reads, samplesDict, experiment)\n logdata['total_reads'] = nreads\n print('Demultiplexed into output files:')\n for s in samplesDict:\n print(' ' + samplesDict[s]['demultiplexedPath'])\n\ndef pipeline_mapping(gbkfile, organism, genomeDir, disruption, barcode_length=4):\n # Prepare genome files from the GenBank input\n\n print('\\n===================='\\\n '\\n* Mapping *'\\\n '\\n====================\\n')\n\n fnaPrint = \\\n '\\nPreparing nucleotide fasta file from GenBank file to use in bowtie mapping.\\n' \\\n ' GenBank source file: {}'.format(gbkfile)\n fttPrint = \\\n '\\nPreparing feature table file from GenBank file to use in gene mapping.\\n' \\\n ' GenBank source file: {}'.format(gbkfile)\n print(fnaPrint)\n gbk2fna(gbkfile, organism, genomeDir)\n print(fttPrint)\n gbk2ftt(gbkfile, organism, genomeDir)\n\n # Change directory, build bowtie indexes, change directory back\n with cd(genomeDir):\n print('\\nBuilding bowtie index files in results/{}/genome_lookup'.format(experiment))\n bowtieBuild(organism)\n\n # Dictionary of each sample's cpm by gene\n geneMappings = {}\n for sample in samplesDict:\n s = samplesDict[sample]\n print('\\nProcessing sample {}'.format(sample))\n sample_reads, trimmed_reads = trim_fastq(s['demultiplexedPath'], s['trimmedPath'], sample, barcode_length)\n logdata[sample] = {}\n logdata[sample]['reads_with_bc'] = sample_reads\n logdata[sample]['reads_with_bc_seq_tn'] = trimmed_reads\n # Change directory, map to bowtie, change directory back\n trimmedSampleFile = '{0}_trimmed.fastq'.format(sample)\n bowtieOutputFile = '{0}_bowtie.txt'.format(sample)\n with cd(genomeDir):\n # Paths are relative to the genome_lookup directory\n # from where bowtie is called\n bowtie_in = '../{0}'.format(trimmedSampleFile)\n bowtie_out = '../{0}'.format(bowtieOutputFile)\n # map to bowtie and produce the output file\n print('\\nMapping {} reads with bowtie'.format(sample))\n bowtie_msg_out = bowtieMap(organism, bowtie_in, bowtie_out)\n # store bowtie data for each sample in dictionary\n logdata[sample]['bowtie_results'] = parseBowtie(bowtie_msg_out)\n # Map each bowtie result to the chromosome\n insertions = len(mapSites('results/{0}/{1}'.format(experiment, bowtieOutputFile)))\n logdata[sample]['insertion_sites'] = insertions\n # Add gene-level results for the sample to geneMappings\n # Filtered on gene fraction disrupted as specified by -d flag\n geneMappings[sample] = mapGenes(organism, sample, disruption, experiment)\n if not keepall:\n # Delete trimmed fastq file, bowtie mapping file after writing mapping results\n os.remove(s['trimmedPath'])\n os.remove('results/{0}/{1}'.format(experiment, bowtieOutputFile))\n buildGeneTable(organism, samplesDict, geneMappings, experiment)\n # print(logdata)\n\n\ndef pipeline_analysis():\n\n print('\\n===================='\\\n '\\n* Analysis *'\\\n '\\n====================\\n')\n\n samples_summary = 'results/{}/samples_summary.yml'.format(experiment)\n with open(samples_summary, 'w') as fo:\n fo.write(yaml.dump(logdata, default_flow_style=False))\n print('Writing file with summary of results:\\n {}'.format(samples_summary))\n\ndef main():\n \"\"\"Start here.\"\"\"\n args = parseArgs(sys.argv[1:])\n global experiment\n experiment = convert_to_filename(args.experiment)\n gbkfile = args.genome\n reads = args.input\n samples = args.samples\n disruption = float(args.disruption) #set input disruption value as a float as input can be int\n if disruption < 0.0 or disruption > 1.0: #test whether disruption value is from 0.0 to 1.0\n disruption = 1.0 #if disruption value is not from 0.0 to 1.0, set disruption to default value of 1.0\n print('\\n*** WARNING ***'\\\n '\\nDisruption value: {}'\n '\\nDisruption value must be from 0.0 to 1.0'\\\n '\\nProceeding with default value of 1.0\\n'.format(float(args.disruption)))\n nobarcodes = args.nobarcodes\n global keepall\n keepall = args.keepall\n # Logging of sample info\n global logdata\n logdata = {}\n # Organism reference files called 'genome.fna' etc\n organism = 'genome'\n\n # --- ORGANIZE SAMPLE LIST AND FILE PATHS --- #\n pipeline_organize(samples)\n\n # --- DEMULTIPLEX OR MOVE FILES IF ALREADY DEMULTIPLEXED --- #\n if nobarcodes:\n pipeline_no_demultiplex(reads)\n else:\n pipeline_demultiplex(reads)\n\n # --- BOWTIE MAPPING --- #\n genomeDir = 'results/{experiment}/genome_lookup/'.format(experiment=experiment)\n pipeline_mapping(gbkfile, organism, genomeDir, disruption)\n\n # --- ANALYSIS OF RESULTS --- #\n pipeline_analysis()\n\n\n # --- CONFIRM COMPLETION --- #\n print('\\n===================='\\\n '\\n* Done *'\\\n '\\n====================\\n')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/pyinseq.py","file_name":"pyinseq.py","file_ext":"py","file_size_in_byte":9873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"159716256","text":"# Copyright (c) 2019 Patrick Levin\r\n#\r\n# Permission is hereby granted, free of charge, to any person obtaining a copy\r\n# of this software and associated documentation files (the \"Software\"), to deal\r\n# in the Software without restriction, including without limitation the rights\r\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r\n# copies of the Software, and to permit persons to whom the Software is\r\n# furnished to do so, subject to the following conditions:\r\n#\r\n# The above copyright notice and this permission notice shall be included in all\r\n# copies or substantial portions of the Software.\r\n#\r\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\r\n# SOFTWARE.\r\n# ==============================================================================\r\n\r\n# Keys in the model.json file\r\nTFJS_NODE_KEY = 'node'\r\n\r\nTFJS_NODE_ATTR_KEY = 'attr'\r\nTFJS_NODE_CONST_KEY = 'Const'\r\nTFJS_NODE_PLACEHOLDER_KEY = 'Placeholder'\r\n\r\nTFJS_ATTR_DTYPE_KEY = 'dtype'\r\nTFJS_ATTR_SHAPE_KEY = 'shape'\r\nTFJS_ATTR_VALUE_KEY = 'value'\r\nTFJS_ATTR_STRING_VALUE_KEY = 's'\r\n\r\nTFJS_NAME_KEY = 'name'\r\nTFJS_DATA_KEY = 'data'\r\n\r\n# CLI arguments\r\nCLI_INPUT_PATH = 'input_path'\r\nCLI_OUTPUT_PATH = 'output_path'\r\nCLI_OUTPUT_FORMAT = 'output_format'\r\nCLI_SAVED_MODEL_TAGS = 'saved_model_tags'\r\nCLI_VERSION = 'version'\r\nCLI_SAVED_MODEL = 'tf_saved_model'\r\nCLI_FROZEN_MODEL = 'tf_frozen_model'\r\nCLI_SILENT_MODE = 'silent'","sub_path":"tfjs_graph_converter/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"45735600","text":"# Display the puppy image with a tight view.\n#\nimport pyvista as pv\nfrom pyvista import examples\npuppy = examples.download_puppy()\npl = pv.Plotter(border=True, border_width=5)\n_ = pl.add_mesh(puppy, rgb=True)\npl.camera.tight()\npl.show()\n#\n# Set the background to blue use a 5% padding around the image.\n#\npl = pv.Plotter()\n_ = pl.add_mesh(puppy, rgb=True)\npl.background_color = 'b'\npl.camera.tight(padding=0.05)\npl.show()\n","sub_path":"version/0.36/api/core/_autosummary/pyvista-Camera-tight-1.py","file_name":"pyvista-Camera-tight-1.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"294243107","text":"import keyring\nfrom selenium import webdriver\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\n\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom config import Config\nfrom supported_settings import valid_room_settings\n\n\ndef calc_date(days):\n \"\"\"return: date in 'days' number of days \"\"\"\n today = datetime.today()\n two_weeks = today + timedelta(days=days)\n book_date = two_weeks.strftime(\"%d.%m.%Y\")\n\n return book_date\n\n\nclass NTNU:\n\n def __init__(self):\n self.driver = None\n self.__username = Config.username\n self.__password = keyring.get_password('ntnu', self.__username)\n self.chromedriver_path = Config.chromedriver\n\n def change_login_info(self, username, passwd):\n self.__username = username\n self.__password = passwd\n\n def start_session(self, headless=True):\n \"\"\"this function starts selenium, you can toggle gui with headless\"\"\"\n\n\n if headless:\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n options.add_argument('--log-level=off')\n self.driver = webdriver.Chrome(self.chromedriver_path, options=options)\n return\n\n self.driver = webdriver.Chrome(self.chromedriver_path)\n\n def login(self, headless=True):\n \"\"\"this loges in to ntnu\"\"\"\n self.start_session(headless)\n\n # self.driver.minimize_window()\n\n self.driver.get(\"https://innsida.ntnu.no/c/portal/login\")\n\n # username\n self.driver.find_element_by_id('username').send_keys(self.__username)\n\n # password\n self.driver.find_element_by_id('password').send_keys(self.__password)\n\n # click login button\n # WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.xPATH, \"/html/body/div/article/section[2]/div[1]/form[1]/button\"))).click()\n self.driver.find_element_by_xpath('/html/body/div/article/section[2]/div[1]/form[1]/button').click()\n\n # self.driver.find_element_by_id('students-menu-button').click()\n\n def book_room(self, **kwargs):\n \"\"\"has to continue after login\"\"\"\n # pass in kwargs corresponding to parameters to change it\n parameters = {\n 'start_time': '08:00',\n 'duration': '04:00', # this is duration from booking in hours\n 'days': 14,\n 'area': 'Gløshaugen',\n 'building': \"Elektro E/F\",\n 'min_people': None,\n 'room_id': 'E204',\n 'description_text': \"Studering\"\n }\n\n for key, value in kwargs.items():\n parameters[key] = value\n\n\n self.driver.get(\"http://www.ntnu.no/romres\")\n\n # tries to press yes, comtinue button, if element not found we skip this part as we dont need it\n try:\n WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.NAME, \"Yes\"))).click()\n except Exception:\n pass\n\n # start time\n start_time = parameters['start_time']\n select_start = Select(self.driver.find_element_by_id(\"start\"))\n select_start.select_by_value(start_time)\n\n # end time\n duration = parameters['duration']\n select_end = Select(self.driver.find_element_by_id('duration'))\n select_end.select_by_value(duration)\n\n # date\n days = parameters['days']\n date = calc_date(days) # max\n select_date = self.driver.find_element_by_id('preset_date')\n select_date.clear()\n select_date.send_keys(date)\n select_date.send_keys(Keys.ENTER)\n\n # area\n area = parameters['area']\n select_area = Select(self.driver.find_element_by_id('area'))\n select_area.select_by_visible_text(area)\n\n # building\n building = parameters['building']\n select_building = Select(self.driver.find_element_by_id('building'))\n select_building.select_by_visible_text(building)\n\n # min people\n min_people = parameters['min_people']\n if min_people:\n people_input_box = self.driver.find_element_by_id('size')\n people_input_box.send_keys(min_people)\n people_input_box.send_keys(Keys.ENTER)\n\n # press \"vis ledige rom\" button\n self.driver.find_element_by_id('preformsubmit').click()\n\n # UNCOMMENT LINE UNDER TO GET TEXT ELEMENT OF ALL ROOMS THAT OCCURS\n # available_rooms_text = self.driver.find_element_by_id('room_table').text\n\n # this fetches the right input str for the chosen room (see supported_settings.py)\n room_id = valid_room_settings[0][parameters['area']][parameters['building']][parameters['room_id']]\n\n # choose the room\n\n try:\n WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, room_id))).click()\n # self.driver.find_element_by_id(room_id).click()\n fail = False\n except:\n print(f\"room: {room_id} not found. \\n trying to book a random room\")\n fail = True\n\n # booking your desired room failed, will continue to try booking first element\n if fail:\n try:\n self.driver.find_elements_by_xpath(\n '/html/body/div[4]/div[2]/div[2]/section/form/div/section[1]/fieldset/ul/li[1]/div[1]/input').click()\n except:\n print(\"first try failed\")\n try:\n self.driver.find_element_by_xpath(\n '/html/body/div[4]/div[2]/div[2]/section/form/div/section[1]/fieldset/ul/li/div[1]/input').click()\n except:\n print('failed on second try. \\nCanceling')\n self.driver.close()\n return\n\n # order button\n self.driver.find_element_by_id('rb-bestill').click()\n\n # description\n description_text = parameters['description_text']\n # description_box = self.driver.find_element_by_id('name')\n description_box = WebDriverWait(self.driver, 5).until(EC.presence_of_element_located((By.ID, 'name')))\n description_box.send_keys(description_text)\n\n # confirm buttton\n self.driver.find_element_by_name('confirm').click()\n\n # send comfirmation email\n self.driver.find_element_by_name('sendmail').click()\n\n print(\"\\nBooking complete! \\n-----------------------------------------------\")\n\n self.driver.quit()\n\n def tab(self, **action):\n \"\"\"currently supported kwargs: newtab, switch\"\"\"\n options = {\n 'newtab': False,\n 'switch': None\n }\n for key, value in options.items():\n if key in action.keys():\n options[key] = action[key]\n print(options)\n\n # open new tab and switch to it\n if options['newtab'] and options['switch'] is True:\n self.driver.execute_script(\"window.open('');\")\n self.driver.switch_to.window(self.driver.window_handles[-1])\n # if url is spesified go to that url\n if str(options['newtab'])[:3] == 'http':\n self.driver.get(options['new_tab'])\n\n if type(options['switch']) is int:\n tab = options['switch']\n self.driver.switch_to.window(self.driver.window_handles[tab])\n\n\nif __name__ == '__main__':\n book = NTNU()\n book.login(False)\n book.book_room()\n","sub_path":"ntnu.py","file_name":"ntnu.py","file_ext":"py","file_size_in_byte":7592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"276607114","text":"#source:\n# http://news.cnstock.com/news/sns_yw/index.html\n# view-source:http://news.cnstock.com/news/sns_yw/index.html (html文档)\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom pprint import pprint #pprint库,中的pprint方法,将字典格式化输出)source:(http://www.broadview.com.cn/article/194)\nfrom time import sleep,ctime\nimport os #与操作系统相关的第三方库,比如os.path\nimport threading\nimport queue\n\nall_urls = []\nnews_data_title = []\n#for item in range(1,116):\n# pages_list = []\n# pages = 'http://news.cnstock.com/news/sns_yw/'+str(item)\n# for page in pages:\n# pages_list.append(page)\npage_url = ['{}{}'.format('http://news.cnstock.com/news/sns_yw/',page) for page in range(1,2)]#获得第1页所有的url,也可以改为(1,100)\n\n\ndef get_links_and_title(): #获得目录页面的新闻标题和新闻链接\n for one_page in page_url:\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'}\n html = requests.get(one_page, headers=headers)\n metadata = html.text\n soup = BeautifulSoup(metadata, 'html.parser')\n links_part = soup.find_all('ul',class_='new-list article-mini') #此处获得相关新闻模块的源码\n for links in links_part:\n a = links.find_all('a')\n for one in a:\n #print(one)\n href = one.attrs['href']\n all_urls.append(href)#将链接放到列表中\n news_title = one.get_text() #attrs['title'] #使用此方法期间遇到有的a标签没有title属性导致报错\n news_data_title.append(news_title) #将标题放到列表中\n pprint('新闻链接:{}'.format(href))\n pprint('新闻标题:{}'.format(news_title))\n #print(len(all_urls)) 此方法统计所有页面的新闻链接数量 \n\ndef get_url_text(): # 获取单个新闻页面的新闻内容\n file_directory = r'C:\\Users\\马海斌\\Desktop\\文件\\programing\\Python\\爬取文件' # 创建文件目录\n news_index = 1 # 申明文件初始的索引号\n num_news = 0 # 申明一个变量统计爬取文章的数量\n for url in all_urls:\n num_news += 1\n if num_news == 1000:\n sleep(10) #设置睡眠条件和时间\n num_news = 0\n else:\n html = requests.get(url)\n metadata = html.text\n soup = BeautifulSoup(metadata,'html.parser')\n try:\n file_title = soup.find('h1').get_text()\n except AttributeError:\n pass\n\n file_title = file_title.replace('*','')\n file_title = file_title.replace('|','')\n file_title = file_title.replace('?', '')\n file_title = file_title.replace('\"', '')\n file_title = file_title.replace('\"', '')\n file_title = file_title.replace('>', '')\n file_title = file_title.replace('<', '')\n file_title = file_title.replace(':', '')\n file_title = file_title.replace('\\\\', '')\n file_title = file_title.replace('/', '')\n file_title = file_title.replace('\\r\\n', '')\n\n t = '' # 申明写入的文件的初始局部变量\n try:\n news_time = soup.find('span', class_='timer').get_text() # 获取新闻页面的发布时间\n t += '<新闻发布时间:{}>'.format(news_time)\n except AttributeError:\n pass\n\n for text in soup.select('div.content > p'):\n t += text.get_text()\n t += '<新闻链接:{}>'.format(url) #文件中加上url\n\n #print(t)\n with open(os.path.join(file_directory, str(file_title)) + '.txt', 'a',encoding='utf8') as file:\n file.write(t)\n news_index += 1\n #text = soup.find_all('p') 此方法不够精确\n #t = ''\n #for w in text:\n # t += w.get_text()\n #pprint(t)\n\n\nif __name__ == '__main__':\n\n get_links_and_title() # 得到所有的标题链接\n\n get_url_text() # 得到单链接的新闻文本","sub_path":"spider of Python/stock_news.py","file_name":"stock_news.py","file_ext":"py","file_size_in_byte":4206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"52436563","text":"from Kisok.backends import Backend\nimport sys, gtk, gobject\nimport braseroburn\n\nclass BurnBackend(Backend):\n\n def __init__(self, *args, **kwargs):\n self.iso = None\n\n def write(self, isoname):\n braseroburn.start()\n track = braseroburn.TrackImageCfg()\n track.set_source(isoname)\n session = braseroburn.SessionCfg()\n session.add_track(track, None)\n\n option_dialog = braseroburn.BurnOptions(session)\n response = option_dialog.run()\n option_dialog.destroy()\n if response != gtk.RESPONSE_OK:\n sys.exit(1)\n\n burn_dialog = braseroburn.BurnDialog()\n burn_dialog.show()\n burn_dialog.run(session)\n burn_dialog.destroy()\n braseroburn.stop()\n","sub_path":"Kisok/backends/burn.py","file_name":"burn.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"298376439","text":"class Iadc(object):\n def __init__(self, fpga, zdok=0):\n self.fpga = fpga\n self.zdok = zdok\n self.reg = 'iadc%d_controller' % self.zdok\n self._set_3wire(0, 0, 1, 0) # initial state\n\n def reg_reset(self):\n self._set_3wire(0, 0, 1, 0)\n self._set_3wire(1, 0, 1, 0)\n self._set_3wire(0, 0, 1, 0)\n \n\n def ddrb_reset(self):\n self.fpga.write_int(self.reg, 0, offset=1, blindwrite=True)\n self.fpga.write_int(self.reg, 1, offset=1, blindwrite=True)\n self.fpga.write_int(self.reg, 0, offset=1, blindwrite=True)\n\n def mmcm_reset(self):\n self.fpga.write_int(self.reg, 0, offset=2, blindwrite=True)\n self.fpga.write_int(self.reg, 1, offset=2, blindwrite=True)\n self.fpga.write_int(self.reg, 0, offset=2, blindwrite=True)\n\n def _set_3wire(self, mode, clk, ldn, data):\n # bit mappings\n CLK = 0\n DATA = 1\n STROBE = 2\n MODE = 3\n v = (mode << MODE) + (ldn << STROBE) + (data << DATA) + (clk << CLK)\n #print mode, clk, ldn, data,\n #if clk:\n # print 'Clocked data', data\n #else:\n # print ''\n self.fpga.write_int(self.reg, v, blindwrite=True)\n \n def write_reg(self, addr, val):\n self._set_3wire(1, 0, 1, 0) # mode high\n self._set_3wire(1, 0, 1, 0) # strobe high\n self._set_3wire(1, 1, 1, 0) # clock tick\n self._set_3wire(1, 0, 1, 0) # \n self._set_3wire(1, 0, 0, 0) # strobe down\n for i in range(3)[::-1]:\n d = (addr >> i) & 0x1\n self._set_3wire(1, 0, 0, d) # set data bit\n self._set_3wire(1, 1, 0, d) # tick clock\n self._set_3wire(1, 0, 0, d) # \n for i in range(16)[::-1]:\n d = (val >> i) & 0x1\n self._set_3wire(1, 0, 0, d) # set data bit\n self._set_3wire(1, 1, 0, d) # tick clock\n self._set_3wire(1, 0, 0, d) # \n # tick clock once more\n self._set_3wire(1, 1, 0, 0) # tick clock\n self._set_3wire(1, 0, 0, 0) # \n # strobe\n self._set_3wire(1, 0, 1, 0) # tick clock\n self._set_3wire(1, 1, 1, 0) # tick clock\n self._set_3wire(1, 0, 1, 0) # \n\n def set_dual_input(self):\n #self.write_reg(0, 0b0111000010111100)\n self.write_reg(0, 0x7cbc)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_single_input(self):\n self.write_reg(0, 0x7cac)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_ramp_mode(self):\n self.write_reg(0b110, 0b11)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_const_mode(self, const=0xaa):\n self.write_reg(0b110, (const<<2) + 0b01)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n def set_data_mode(self):\n self.write_reg(0b110, 0b00)\n self.ddrb_reset()\n self.mmcm_reset()\n self.ddrb_reset()\n\n\n","sub_path":"attic/quicklook/iadc.py","file_name":"iadc.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"399375539","text":"from random import randint\nfrom math import gcd\nimport csv\n\n\n# instead of quantum order finding\ndef order(a, N):\n for i in range(1, N):\n if pow(a, i, N) == 1:\n return i\n\n\ndata = []\n\nfor N in range(3, 100000):\n order_counts = 0\n while True:\n a = randint(2, N-1)\n if gcd(a, N) != 1:\n data.append([N, False, order_counts])\n break\n t = pow(a, (N-1)//2, N)\n if t != 1 and t != N - 1:\n data.append([N, False, order_counts])\n break\n elif t == 1:\n continue\n else:\n ord_a = order(a, N)\n order_counts += 1\n if ord_a == N - 1:\n data.append([N, True, order_counts])\n break\n\n\nwith open(\"output.csv\", \"w\") as f:\n writer = csv.writer(f)\n writer.writerows(data)\n","sub_path":"primary_test.py","file_name":"primary_test.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"443219517","text":"def fonk(a): #fonksiyon belirtilen sayıya kadar o sayının bölenlerini hesaplar ardından bölenler toplamının sayıya eşitliğini ineler\n bölenler=[]\n for i in range(1,a):\n kalan=a%i\n if kalan==0:\n bölenler=bölenler+[i]\n topla=0\n for i in bölenler:\n topla+=i\n if topla==a:\n return print(a)\nwhile True:\n try:\n for i in range(1000):\n fonk(i)\n seçim=input(\"\\n*****Devam etmek için her hangi bir tuşa /// çıkmak için (q) tuşuna basınız..:\")\n if seçim==\"q\" or seçim ==\"Q\":\n break\n except ValueError:\n print(\"Lütfen bir tam sayı giriniz..:\")\n continue\n\n\n\n","sub_path":"8.hafta 3. ödev (mükemmel sayı tesbiti).py","file_name":"8.hafta 3. ödev (mükemmel sayı tesbiti).py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"76325987","text":"# Embedded file name: /usr/lib/python2.6/site-packages/awx/main/models/ad_hoc_commands.py\nimport hmac\nimport json\nimport logging\nfrom django.conf import settings\nfrom django.db import models\nfrom django.utils.text import Truncator\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.exceptions import ValidationError\nfrom django.core.urlresolvers import reverse\nfrom jsonfield import JSONField\nfrom awx.main.models.base import *\nfrom awx.main.models.unified_jobs import *\nfrom awx.main.utils import decrypt_field\nlogger = logging.getLogger('awx.main.models.ad_hoc_commands')\n__all__ = ['AdHocCommand', 'AdHocCommandEvent']\n\nclass AdHocCommand(UnifiedJob):\n MODULE_NAME_CHOICES = [ (x, x) for x in settings.AD_HOC_COMMANDS ]\n MODULE_NAME_DEFAULT = 'command' if 'command' in settings.AD_HOC_COMMANDS else None\n\n class Meta(object):\n app_label = 'main'\n\n job_type = models.CharField(max_length=64, choices=JOB_TYPE_CHOICES, default='run')\n inventory = models.ForeignKey('Inventory', related_name='ad_hoc_commands', null=True, on_delete=models.SET_NULL)\n limit = models.CharField(max_length=1024, blank=True, default='')\n credential = models.ForeignKey('Credential', related_name='ad_hoc_commands', null=True, default=None, on_delete=models.SET_NULL)\n module_name = models.CharField(max_length=1024, default=MODULE_NAME_DEFAULT, choices=MODULE_NAME_CHOICES, blank=bool(MODULE_NAME_DEFAULT))\n module_args = models.TextField(blank=True, default='')\n forks = models.PositiveIntegerField(blank=True, default=0)\n verbosity = models.PositiveIntegerField(choices=VERBOSITY_CHOICES, blank=True, default=0)\n become_enabled = models.BooleanField(default=False)\n hosts = models.ManyToManyField('Host', related_name='ad_hoc_commands', editable=False, through='AdHocCommandEvent')\n\n def clean_credential(self):\n cred = self.credential\n if cred and cred.kind != 'ssh':\n raise ValidationError('You must provide a machine / SSH credential.')\n return cred\n\n def clean_limit(self):\n return self.limit\n\n def clean_module_name(self):\n if type(self.module_name) not in (str, unicode):\n raise ValidationError('Invalid type for ad hoc command')\n if not self.module_name.strip():\n module_name = 'command'\n raise module_name not in settings.AD_HOC_COMMANDS and ValidationError('Unsupported module for ad hoc commands.')\n return module_name\n\n def clean_module_args(self):\n if type(self.module_args) not in (str, unicode):\n raise ValidationError('Invalid type for ad hoc command')\n module_args = self.module_args\n if self.module_name in ('command', 'shell') and not module_args:\n raise ValidationError('No argument passed to %s module.' % self.module_name)\n return module_args\n\n @property\n def passwords_needed_to_start(self):\n \"\"\"Return list of password field names needed to start the job.\"\"\"\n if self.credential and self.credential.active:\n return self.credential.passwords_needed\n else:\n return []\n\n @classmethod\n def _get_parent_field_name(cls):\n return ''\n\n @classmethod\n def _get_task_class(cls):\n from awx.main.tasks import RunAdHocCommand\n return RunAdHocCommand\n\n def get_absolute_url(self):\n return reverse('api:ad_hoc_command_detail', args=(self.pk,))\n\n @property\n def task_auth_token(self):\n \"\"\"Return temporary auth token used for task requests via API.\"\"\"\n if self.status == 'running':\n h = hmac.new(settings.SECRET_KEY, self.created.isoformat())\n return '%d-%s' % (self.pk, h.hexdigest())\n\n def get_passwords_needed_to_start(self):\n return self.passwords_needed_to_start\n\n def is_blocked_by(self, obj):\n from awx.main.models import InventoryUpdate\n if type(obj) == InventoryUpdate:\n if self.inventory == obj.inventory_source.inventory:\n return True\n return False\n\n @property\n def task_impact(self):\n from awx.main.models.inventory import Host\n count_hosts = Host.objects.filter(active=True, enabled=True, inventory__ad_hoc_commands__pk=self.pk).count()\n return min(count_hosts, 5 if self.forks == 0 else self.forks) * 10\n\n def generate_dependencies(self, active_tasks):\n from awx.main.models import InventoryUpdate\n if not self.inventory:\n return []\n else:\n inventory_sources = self.inventory.inventory_sources.filter(active=True, update_on_launch=True)\n inventory_sources_found = []\n dependencies = []\n for obj in active_tasks:\n if type(obj) == InventoryUpdate:\n if obj.inventory_source in inventory_sources:\n inventory_sources_found.append(obj.inventory_source)\n\n try:\n start_args = json.loads(decrypt_field(self, 'start_args'))\n except Exception:\n start_args = None\n\n start_args = start_args or {}\n inventory_sources_already_updated = start_args.get('inventory_sources_already_updated', [])\n if inventory_sources_already_updated:\n for source in inventory_sources.filter(pk__in=inventory_sources_already_updated):\n if source not in inventory_sources_found:\n inventory_sources_found.append(source)\n\n if inventory_sources.count():\n for source in inventory_sources:\n if source not in inventory_sources_found and source.needs_update_on_launch:\n dependencies.append(source.create_inventory_update(launch_type='dependency'))\n\n return dependencies\n\n def copy(self):\n data = {}\n for field in ('job_type', 'inventory_id', 'limit', 'credential_id', 'module_name', 'module_args', 'forks', 'verbosity', 'become_enabled'):\n data[field] = getattr(self, field)\n\n return AdHocCommand.objects.create(**data)\n\n def save(self, *args, **kwargs):\n update_fields = kwargs.get('update_fields', [])\n if not self.name:\n self.name = Truncator(u': '.join(filter(None, (self.module_name, self.module_args)))).chars(512)\n if 'name' not in update_fields:\n update_fields.append('name')\n super(AdHocCommand, self).save(*args, **kwargs)\n return\n\n\nclass AdHocCommandEvent(CreatedModifiedModel):\n \"\"\"\n An event/message logged from the ad hoc event callback for each host.\n \"\"\"\n EVENT_TYPES = [('runner_on_failed', _('Host Failed'), True), ('runner_on_ok', _('Host OK'), False), ('runner_on_unreachable', _('Host Unreachable'), True)]\n FAILED_EVENTS = [ x[0] for x in EVENT_TYPES if x[2] ]\n EVENT_CHOICES = [ (x[0], x[1]) for x in EVENT_TYPES ]\n\n class Meta:\n app_label = 'main'\n unique_together = [('ad_hoc_command', 'host_name')]\n ordering = ('-pk',)\n\n ad_hoc_command = models.ForeignKey('AdHocCommand', related_name='ad_hoc_command_events', on_delete=models.CASCADE, editable=False)\n host = models.ForeignKey('Host', related_name='ad_hoc_command_events', null=True, default=None, on_delete=models.SET_NULL, editable=False)\n host_name = models.CharField(max_length=1024, default='', editable=False)\n event = models.CharField(max_length=100, choices=EVENT_CHOICES)\n event_data = JSONField(blank=True, default={})\n failed = models.BooleanField(default=False, editable=False)\n changed = models.BooleanField(default=False, editable=False)\n counter = models.PositiveIntegerField(default=0)\n\n def get_absolute_url(self):\n return reverse('api:ad_hoc_command_event_detail', args=(self.pk,))\n\n def __unicode__(self):\n return u'%s @ %s' % (self.get_event_display(), self.created.isoformat())\n\n def save(self, *args, **kwargs):\n from awx.main.models.inventory import Host\n update_fields = kwargs.get('update_fields', [])\n res = self.event_data.get('res', None)\n if self.event in self.FAILED_EVENTS:\n if not self.event_data.get('ignore_errors', False):\n self.failed = True\n if 'failed' not in update_fields:\n update_fields.append('failed')\n if isinstance(res, dict) and res.get('changed', False):\n self.changed = True\n if 'changed' not in update_fields:\n update_fields.append('changed')\n self.host_name = self.event_data.get('host', '').strip()\n if 'host_name' not in update_fields:\n update_fields.append('host_name')\n try:\n if not self.host_id and self.host_name:\n host_qs = Host.objects.filter(inventory__ad_hoc_commands__id=self.ad_hoc_command_id, name=self.host_name)\n host_id = host_qs.only('id').values_list('id', flat=True)\n if host_id.exists():\n self.host_id = host_id[0]\n if 'host_id' not in update_fields:\n update_fields.append('host_id')\n except (IndexError, AttributeError):\n pass\n\n super(AdHocCommandEvent, self).save(*args, **kwargs)\n return","sub_path":"usr/lib/python2.6/site-packages/awx/main/models/ad_hoc_commands.py","file_name":"ad_hoc_commands.py","file_ext":"py","file_size_in_byte":9301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"16898486","text":"from slackclient import SlackClient\nfrom bot.users import Users\nfrom bot.channels import Channels\nfrom bot.commands import CommandLoader\nimport time\n\nclass SlackBot:\n\n commands = None\n slack = None\n users = None\n channels = None\n\n def __init__(self):\n self.slack = SlackClient('xoxb-22358844560-K8mlKRpzJwzYqTCSpX1DWla6')\n self.users = Users(self.slack)\n self.channels = Channels(self.slack)\n self.commands = CommandLoader().load(self.slack, self.users, self.channels)\n\n def start(self):\n if self.slack.rtm_connect():\n while True:\n for message in self.slack.rtm_read():\n if message.get('type', None) == 'message':\n\n # Middleware\n \n\n # Command Triggers\n for command in self.commands:\n for trigger in command['triggers']:\n if message.get('text', None) != None and '+' + trigger == message.get('text', None).partition(\" \")[0]:\n command['instance'].execute(message)\n else:\n print('Failed To Connect')\n","sub_path":"le-bot/bot/slackbot.py","file_name":"slackbot.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"476509937","text":"'''\nCreated on 06.03.2020\n\n@author: JanB-4096\n'''\nfrom src import GameConfig\nimport pygame\nimport numpy as np\n\n\nclass NPCControl():\n \n def __init__(self, p1, p2, p1_difficulty, p2_difficulty):\n self.settings = {'p1': {'mode': p1, 'difficulty': p1_difficulty}, \\\n 'p2': {'mode': p2, 'difficulty': p2_difficulty}}\n\n def translate_keyboard(self, events, change_position_p1, change_position_p2):\n \n for event in events:\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_w:\n change_position_p1 = -1*GameConfig.change_bar_vertical\n elif event.key == pygame.K_s:\n change_position_p1 = 1*GameConfig.change_bar_vertical\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_w or event.key == pygame.K_s:\n change_position_p1 = 0 \n \n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n change_position_p2 = -1*GameConfig.change_bar_vertical\n elif event.key == pygame.K_DOWN:\n change_position_p2 = 1*GameConfig.change_bar_vertical\n if event.type == pygame.KEYUP:\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n change_position_p2 = 0\n \n return change_position_p1, change_position_p2\n \n def calc_linear_npc(self, position_p, position_ball, change_ball, player):\n change_position_p = 0\n \n if self.settings[player]['difficulty'] == 'middle':\n if position_p + 2*GameConfig.bar_hight/5 >= position_ball[1]:\n change_position_p = -1*GameConfig.change_bar_vertical\n elif position_p + 3*GameConfig.bar_hight/5 <= position_ball[1]:\n change_position_p = GameConfig.change_bar_vertical\n elif self.settings[player]['difficulty'] == 'easy':\n if position_p >= position_ball[1]:\n change_position_p = -1*GameConfig.change_bar_vertical\n elif position_p + GameConfig.bar_hight <= position_ball[1]:\n change_position_p = GameConfig.change_bar_vertical\n elif self.settings[player]['difficulty'] == 'very_hard':\n timesteps_until_hit = 0\n \n if change_ball[0] > 0 and player == 'p2': #going right\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p2[0]) / (change_ball[0]))\n elif change_ball[0] < 0 and player == 'p1':\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p1[0] + GameConfig.bar_width) / (change_ball[0]))\n \n if timesteps_until_hit != 0: \n predicted_y_ball = position_ball[1] + change_ball[1]*timesteps_until_hit\n if predicted_y_ball > GameConfig.display_hight:\n predicted_y_ball = 2*GameConfig.display_hight - predicted_y_ball\n elif predicted_y_ball < 0:\n predicted_y_ball *= -1\n \n # TODO: remove factor 2 ... just implemented for improving enemy for neural net\n if position_p + GameConfig.bar_hight/5 >= predicted_y_ball:\n change_position_p = -2*GameConfig.change_bar_vertical\n elif position_p + 4*GameConfig.bar_hight/5 <= predicted_y_ball:\n change_position_p = 2*GameConfig.change_bar_vertical\n if change_ball[0] > 0 and player == 'p1' or change_ball[0] < 0 and player == 'p2': #go back to middle position if ball was hit\n distance_to_center = GameConfig.display_hight/2 - (position_p + GameConfig.bar_hight/2)\n change_position_p = np.sign(distance_to_center) * 2*GameConfig.change_bar_vertical * int((np.abs(distance_to_center) > GameConfig.change_bar_vertical))\n \n else: \n timesteps_until_hit = 0\n if change_ball[0] > 0 and player == 'p2': #going right\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p2[0]) / (change_ball[0]))\n elif change_ball[0] < 0 and player == 'p1':\n timesteps_until_hit = np.abs((position_ball[0] - GameConfig.startpoint_bar_p1[0] + GameConfig.bar_width) / (change_ball[0]))\n if timesteps_until_hit != 0: \n predicted_y_ball = position_ball[1] + change_ball[1]*timesteps_until_hit\n if predicted_y_ball > GameConfig.display_hight:\n predicted_y_ball = 2*GameConfig.display_hight - predicted_y_ball\n elif predicted_y_ball < 0:\n predicted_y_ball *= -1\n if position_p + GameConfig.bar_hight/5 >= predicted_y_ball:\n change_position_p = -1*GameConfig.change_bar_vertical\n elif position_p + 4*GameConfig.bar_hight/5 <= predicted_y_ball:\n change_position_p = GameConfig.change_bar_vertical\n \n return change_position_p\n \n def calc_ai_p1(self, position_p, position_ball, change_ball, species):\n \n output_nn = species.calculate_output_to_input([position_p, position_ball[0], position_ball[1], change_ball[0], change_ball[1]])\n if output_nn[0] > output_nn[1]: #up\n return -1*GameConfig.change_bar_vertical\n else: #down\n return GameConfig.change_bar_vertical\n \n def calc_swarm_p1(self, position_p, position_ball, change_ball, speciesList):\n output_nn = []\n for species in speciesList:\n output_nn.append(self.calc_ai_p1(position_p, position_ball, change_ball, species))\n return GameConfig.change_bar_vertical * np.sign(np.average(output_nn))\n","sub_path":"src/NPCControl.py","file_name":"NPCControl.py","file_ext":"py","file_size_in_byte":5959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"607131800","text":"from dataclasses import dataclass\nfrom typing import Sequence, Tuple\n\nfrom django.contrib.admin.options import BaseModelAdmin\nfrom django.db import models\nfrom django.db.models import OuterRef, Subquery, functions\nfrom django.db.models.functions import Cast\nfrom django.urls import reverse\nfrom django.utils.html import format_html\n\nfrom .query import (\n BaseType,\n BooleanType,\n DateTimeType,\n DateType,\n HTMLType,\n MonthType,\n NumberType,\n StringType,\n WeekDayType,\n YearType,\n)\n\n_OPEN_IN_ADMIN = \"admin\"\n\n\n_AGG_MAP = {\n \"average\": lambda x: models.Avg(Cast(x, output_field=models.IntegerField())),\n \"count\": lambda x: models.Count(x, distinct=True),\n \"max\": models.Max,\n \"min\": models.Min,\n \"std_dev\": models.StdDev,\n \"sum\": lambda x: models.Sum(Cast(x, output_field=models.IntegerField())),\n \"variance\": models.Variance,\n}\n\n\n_AGGREGATES = {\n # NTS beware that Sum(type) -> type\n StringType: [\"count\"],\n NumberType: [\"average\", \"count\", \"max\", \"min\", \"std_dev\", \"sum\", \"variance\"],\n DateTimeType: [\"count\"], # average, min and max might be nice here but sqlite\n DateType: [\"count\"], # average, min and max might be nice here but sqlite\n BooleanType: [\"average\", \"sum\"],\n}\n\n\n_FUNC_MAP = {\n \"year\": (functions.ExtractYear, YearType),\n \"quarter\": (functions.ExtractQuarter, NumberType),\n \"month\": (functions.ExtractMonth, MonthType),\n \"day\": (functions.ExtractDay, NumberType),\n \"week_day\": (functions.ExtractWeekDay, WeekDayType),\n \"hour\": (functions.ExtractHour, NumberType),\n \"minute\": (functions.ExtractMinute, NumberType),\n \"second\": (functions.ExtractSecond, NumberType),\n \"date\": (functions.TruncDate, DateType),\n}\n\n_FUNCTIONS = {\n DateTimeType: [\n \"year\",\n \"quarter\",\n \"month\",\n \"day\",\n \"week_day\",\n \"hour\",\n \"minute\",\n \"second\",\n \"date\",\n ],\n DateType: [\"year\", \"quarter\", \"month\", \"day\", \"week_day\"],\n}\n\n\ndef s(path):\n return \"__\".join(path)\n\n\ndef get_model_name(model, sep=\".\"):\n return f\"{model._meta.app_label}{sep}{model.__name__}\"\n\n\n@dataclass\nclass OrmBoundField:\n field: \"OrmBaseField\"\n previous: \"OrmBoundField\"\n full_path: Sequence[str]\n pretty_path: Sequence[str]\n queryset_path: str = None\n aggregate_clause: Tuple[str, models.Func] = None\n filter_: bool = False\n having: bool = False\n model_name: str = None\n\n @property\n def path_str(self):\n return s(self.full_path)\n\n @property\n def group_by(self):\n return self.field.can_pivot\n\n def annotate(self, request, qs):\n return qs\n\n def __getattr__(self, name):\n return getattr(self.field, name)\n\n @classmethod\n def blank(cls):\n return cls(field=None, previous=None, full_path=[], pretty_path=[])\n\n\n@dataclass\nclass OrmModel:\n fields: dict\n admin: BaseModelAdmin = None\n\n @property\n def root(self):\n return bool(self.admin)\n\n\n@dataclass\nclass OrmBaseField:\n model_name: str\n name: str\n pretty_name: str\n type_: BaseType = None\n concrete: bool = False\n rel_name: str = None\n can_pivot: bool = False\n admin: object = None\n choices: Sequence[Tuple[str, str]] = ()\n\n def __post_init__(self):\n if not self.type_:\n assert self.rel_name\n if self.concrete or self.can_pivot:\n assert self.type_\n\n def format(self, value):\n return self.type_.format(value, self.choices)\n\n\nclass OrmFkField(OrmBaseField):\n def __init__(self, model_name, name, pretty_name, rel_name):\n super().__init__(model_name, name, pretty_name, rel_name=rel_name)\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n )\n\n\nclass OrmConcreteField(OrmBaseField):\n def __init__(self, model_name, name, pretty_name, type_, choices=None):\n super().__init__(\n model_name,\n name,\n pretty_name,\n concrete=True,\n type_=type_,\n rel_name=(\n type_.name if type_ in _AGGREGATES or type_ in _FUNCTIONS else None\n ),\n can_pivot=True,\n choices=choices or (),\n )\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(full_path),\n filter_=True,\n )\n\n\nclass OrmCalculatedField(OrmBaseField):\n def __init__(self, model_name, name, pretty_name, admin):\n super().__init__(\n model_name, name, pretty_name, type_=StringType, can_pivot=True, admin=admin\n )\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(previous.full_path + [\"id\"]),\n model_name=self.model_name,\n )\n\n def format(self, obj):\n if obj is None:\n return None\n\n if hasattr(self.admin, self.name):\n # admin callable\n func = getattr(self.admin, self.name)\n try:\n return func(obj)\n except Exception as e:\n return str(e)\n else:\n # model property or callable\n try:\n value = getattr(obj, self.name)\n return value() if callable(value) else value\n except Exception as e:\n return str(e)\n\n\nclass OrmBoundAnnotatedField(OrmBoundField):\n def annotate(self, request, qs):\n from .orm import admin_get_queryset\n\n return qs.annotate(\n **{\n self.queryset_path: Subquery(\n admin_get_queryset(self.admin, request, [self.name])\n .filter(pk=OuterRef(s(self.previous.full_path + [\"id\"])))\n .values(self.admin_order_field)[:1],\n output_field=self.field_type,\n )\n }\n )\n\n\nclass OrmAnnotatedField(OrmBaseField):\n def __init__(\n self,\n model_name,\n name,\n pretty_name,\n type_,\n field_type,\n admin,\n admin_order_field,\n choices=None,\n ):\n super().__init__(\n model_name,\n name,\n pretty_name,\n type_=type_,\n can_pivot=True,\n admin=admin,\n concrete=True,\n choices=choices or (),\n )\n self.field_type = field_type\n self.admin_order_field = admin_order_field\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n\n full_path = previous.full_path + [self.name]\n return OrmBoundAnnotatedField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=f\"ddb_{s(full_path)}\",\n filter_=True,\n )\n\n\nclass OrmAdminField(OrmBaseField):\n def __init__(self, model_name):\n super().__init__(\n model_name, _OPEN_IN_ADMIN, _OPEN_IN_ADMIN, type_=HTMLType, can_pivot=True\n )\n\n def bind(self, previous):\n previous = previous or OrmBoundField.blank()\n full_path = previous.full_path + [self.name]\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(previous.full_path + [\"id\"]),\n model_name=self.model_name,\n )\n\n def format(self, obj):\n if obj is None:\n return None\n\n model_name = get_model_name(obj.__class__, \"_\")\n url_name = f\"admin:{model_name}_change\".lower()\n url = reverse(url_name, args=[obj.pk])\n return f'{obj}'\n\n\nclass OrmFileField(OrmConcreteField):\n def __init__(self, model_name, name, pretty_name, url_func):\n super().__init__(model_name, name, pretty_name, type_=HTMLType)\n self.url_func = url_func\n\n def format(self, value):\n if not value:\n return None\n\n return format_html('{}', self.url_func(value), value)\n\n\nclass OrmAggregateField(OrmBaseField):\n def __init__(self, model_name, name):\n super().__init__(model_name, name, name, type_=NumberType, concrete=True)\n self.aggregate = name\n\n def bind(self, previous):\n assert previous\n full_path = previous.full_path + [self.name]\n agg = _AGG_MAP[self.aggregate](s(previous.full_path))\n return OrmBoundField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(full_path),\n aggregate_clause=(s(full_path), agg),\n having=True,\n )\n\n\nclass OrmBoundFunctionField(OrmBoundField):\n def annotate(self, request, qs):\n return qs.annotate(\n **{\n self.queryset_path: _FUNC_MAP[self.function][0](\n s(self.previous.full_path)\n )\n }\n )\n\n\nclass OrmFunctionField(OrmBaseField):\n def __init__(self, model_name, name, type_):\n super().__init__(\n model_name, name, name, type_=type_, concrete=True, can_pivot=True\n )\n self.function = name\n\n def bind(self, previous):\n assert previous\n full_path = previous.full_path + [self.name]\n return OrmBoundFunctionField(\n field=self,\n previous=previous,\n full_path=full_path,\n pretty_path=previous.pretty_path + [self.pretty_name],\n queryset_path=s(full_path),\n filter_=True,\n )\n","sub_path":"data_browser/orm_fields.py","file_name":"orm_fields.py","file_ext":"py","file_size_in_byte":10401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"131839778","text":"from django.core.cache import cache\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom django.views.decorators.cache import cache_page\n\n# @cache_page(100,key_prefix='py2002')\nfrom userapp.models import User\n\n\ndef query(request):\n # 查询 model数据 回传到html文件\n users = User.objects.all()\n return render(request, 'redisapp/query.html', {'users': users})\n\n\ndef change_age(request):\n rst = request.GET.get('age')\n user = User.objects.get(pk=1)\n user.age = rst\n user.save()\n # 手动清除缓存\n # 方式一:\n # caches = cache.keys('*py2002*')\n # for c in caches:\n # cache.delete(c)\n # 方式二\n # caches = cache.keys('*py2002*')\n # cache.delete_many(caches)\n # 方式三:\n # cache.delete_pattern('*py2002*')\n # 方式四:\n # cache.clear()\n return HttpResponse('修改成功')\n\n\ndef set_session(request):\n request.session['python2002'] = True\n return HttpResponse('设置session')\n","sub_path":"redisapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"510050052","text":"URL_GOOGLE = 'https://www.google.com/search?q=погода+{}+{}.{}'\r\nURL_SINOPTIK = 'https://ua.sinoptik.ua/погода-{}/{}'\r\nURL_POGODA33 = 'https://pogoda33.ua/погода-{}/тиждень'\r\nURL_METEOTREND = 'https://ua.meteotrend.com/forecast/ua/{}/'\r\n\r\nDAYS = ('понеділок','вівторок','середа','четвер','пʼятниця','субота','неділя')\r\nMONTHS = ('січня','лютого','березня','квітня','травня','червня','липня','серпня','вересня','жовтня','листопада','грудня')\r\nHEADERS = {\r\n\t'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',\r\n\t'accept':'*/*'\r\n}\r\n\r\nlocalization = {'й':'i','ц':'ts','у':'u','к':'k','е':'e','н':'n','г':'h',\r\n'ш':'sh','щ':'sch','з':'z','х':'kh','ї':'i','ф':'f',\r\n'і':'i','в':'v','а':'a','п':'p','р':'r','о':'o','л':'l','д':'d','ж':'zh','є':'ie',\r\n'ґ':'g','я':'ia','ч':'ch','с':'s','м':'m','и':'y','т':'t','ь':'','б':'b','ю':'iu'\r\n}\r\n","sub_path":"configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"156108818","text":"'''\r\nImport the pandas package with the alias pd.\r\nImport the file 'tweets.csv' using the pandas function read_csv().\r\nAssign the resulting DataFrame to df.\r\nComplete the for loop by iterating over col, the 'lang' column in the DataFrame df.\r\nComplete the bodies of the if-else statements in the for loop:\r\nif the key is in the dictionary langs_count, add 1 to its current value,\r\nelse add the key to langs_count and set its value to 1.\r\nUse the loop variable entry in your code.\r\n'''\r\n\r\n# Import pandas\r\nimport pandas as pd\r\n\r\n# Import Twitter data as DataFrame: df\r\ndf = pd.read_csv('tweets.csv')\r\n\r\n# Initialize an empty dictionary: langs_count\r\nlangs_count = {}\r\n\r\n# Extract column from DataFrame: col\r\ncol = df['lang']\r\n\r\n# Iterate over lang column in DataFrame\r\nfor entry in col:\r\n\r\n # If the language is in langs_count, add 1\r\n if entry in langs_count.keys():\r\n langs_count[entry] += 1\r\n # Else add the language to langs_count, set the value to 1\r\n else:\r\n langs_count[entry] = 1\r\n \r\n# Print the populated dictionary\r\nprint(langs_count)\r\n","sub_path":"03-python-data-science-toolbox-1/01-writing-your-own-functions/bringing-it-all-together.py","file_name":"bringing-it-all-together.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"649345009","text":"from ComplexLib import *\r\nimport sympy\r\nimport numpy\r\nimport math\r\nimport cmath\r\ndef act(matriz, vector):\r\n '''Función que calcula la acción de una matriz sobre un vector'''\r\n v = [(0, 0) for i in range(len(matriz))]\r\n for i in range(len(matriz)):\r\n for j in range(len(vector)):\r\n v[i] = sumaComplejos(v[i], productoComplejos(matriz[i][j], vector[j]))\r\n return v\r\n\r\ndef modsquare(a):\r\n '''Función que eleva al cuadrado las componentes de el número complejo'''\r\n return a[0] ** 2 + a[1] ** 2\r\n\r\n\r\ndef magVector(v1):\r\n '''Función que encuentra el la magnitud del vector'''\r\n suma = 0\r\n for i in range(len(v1)):\r\n suma += modsquare(v1[i])\r\n return math.sqrt(suma)\r\n\r\n\r\ndef innerP(v1, v2):\r\n '''Función que realiza el producto interno entre dos vectores'''\r\n suma = (0, 0)\r\n for i in range(len(v2)):\r\n v2[i] = conjugado(v2[i])\r\n for i in range(len(v1)):\r\n suma = sumaComplejos(suma, productoComplejos(v2[i], v1[i]))\r\n\r\n return suma\r\n\r\n\r\ndef probability(vector, position):\r\n '''Función que encuentra la probabilidad de observar una particula en una posicion luego de observarlo'''\r\n suma = 0\r\n for i in range(len(vector)):\r\n suma += modsquare(vector[i])\r\n a = modsquare(vector[position])\r\n return round((a / suma) * 100, 2)\r\n\r\n\r\ndef normalized(v1):\r\n '''Función que convierte los vectores en vectores unitarios'''\r\n a = magVector(v1)\r\n for i in range(len(v1)):\r\n v1[i] = divisionComplejos(v1[i], (a, 0))\r\n return v1\r\n\r\n\r\ndef amplitudTransicion(v1, v2):\r\n '''Encuentra la probabilidad de pasar de un estado a otro luego de ser observado'''\r\n return innerP(normalized(v1), normalized(v2))\r\n\r\n\r\ndef expectedValue(obs, state):\r\n '''Función que encuentra el valor esperado entre un observador y un estado inicial'''\r\n m1 = action2(obs, state)\r\n return (innerP(m1, state))[0]\r\n\r\n\r\ndef identity(n, val):\r\n '''Función que crea la matriz identidad y tiene como parametro el valor esperado'''\r\n matriz = [[(0,0) for j in range(n)] for i in range(n)]\r\n for i in range(n):\r\n matriz[i][i] = (val,0)\r\n return matriz\r\n\r\ndef varianza(obs, state):\r\n '''Función que encuentra la varianza entre un observador y un estado inicial'''\r\n s1 = list(state)\r\n m1 = restaMatrices(obs, identity(len(state), expectedValue(obs, state)))\r\n m2 = productoMatrices(m1,m1)\r\n m3 = innerP(action2(m2, s1), s1)\r\n return m3[0]\r\n\r\n\r\ndef med_var(observator, state):\r\n '''Función que encuentra la varianza y la media'''\r\n if matrizHermitiana(observator):\r\n print('MEDIA', expectedValue(observator, state))\r\n print('VARIANZA', varianza(observator, state))\r\n else:\r\n print('El observador no es una una matriz hermitiana')\r\n\r\n\r\ndef eigenValues(eValues):\r\n '''Función que encuentra los valores propios de una matriz'''\r\n lst = []\r\n min, max = -100, 100\r\n for i in range(min, max):\r\n if eValues.get(i) is not None:\r\n lst += [i]\r\n return lst\r\n\r\n\r\ndef eigenVectors(eVector):\r\n '''Función que halla los vectores propios de una matriz'''\r\n lst = []\r\n for i in range(len(eVector)):\r\n n = complex(eVector[i])\r\n x, y = int(n.real), int(n.imag)\r\n lst += [(x, y)]\r\n return lst\r\n\r\n\r\ndef convMatriz(m):\r\n '''Función que convierte una matriz de tuplas en una matriz de números complejos'''\r\n m1 = [[0 for j in range(len(m[0]))] for i in range(len(m))]\r\n for i in range(len(m)):\r\n for j in range(len(m[0])):\r\n m1[i][j] = complex(m[i][j][0], m[i][j][1])\r\n return m1\r\n\r\n\r\ndef prob(matriz, state,):\r\n '''Función que halla la probabilidad de que un estado llegue a un vector propio'''\r\n eValues, eVector = reviewObs(matriz, state)\r\n if expectedValue(matriz, state) in eValues:\r\n print('Probabilidad de llegar a un vector propio 100%')\r\n else:\r\n if type(eVector[0]) == list:\r\n for i in range(len(eVector)):\r\n return amplitudTransicion(state, eVector[i])\r\n else:\r\n return amplitudTransicion(state, eVector)\r\n\r\n\r\ndef reviewObs(m1, state):\r\n '''Función que revisa que la matriz sea hermitiana, y si lo es, calcula la media y la varianza del observable en el estado dado.'''\r\n a = convMatriz(m1)\r\n a = sympy.Matrix(a)\r\n eValues = a.eigenvals()\r\n eValues = eigenValues(eValues)\r\n x, v = a.eigenvects()[0][2][0], a.eigenvects()[0][0]\r\n eVectors = x*v\r\n eVector = eigenVectors(eVectors)\r\n return eValues, eVector\r\n\r\ndef dynamic(n, matriz, state):\r\n '''Función quhe calcula la posicion de una particula luego de que recorra una serie de matrices unitarias'''\r\n for i in range(n):\r\n state = act(matriz, state)\r\n return state\r\n","sub_path":"teoria_cuantica_basica/teoriaCuanticaBasica.py","file_name":"teoriaCuanticaBasica.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"541173885","text":"# -*- coding: utf-8 -*-\n\"\"\"\nScript to query the updated water use and allocation results.\n\"\"\"\n\nfrom pandas import merge, read_csv, DataFrame\nfrom query_use_allo_v01 import w_query\n\n#################################\n### Parameters\n\nseries_csv = 'C:/ecan/base_data/usage/usage_takes_series_sw_up2_with_cav.csv'\nallo_csv = 'C:/ecan/base_data/usage/takes_results2.csv'\n\nallo_cols = ['crc', 'wap', 'take_type', 'catchment', 'irr_area', 'gw_zone', 'sw_zone', 'use_type', 'catchment_num', 'cwms_zone']\ncwms_zone = ['Ashburton']\nyears = [2015]\nuse_type = ['stockwater']\ngrp_by = ['dates']\nallo_col = ['ann_allo_m3', 'up_allo_m3']\n\nexport_path = 'C:/ecan/Projects/requests/cwms/set2/ash_results_up_with_cav.csv'\n\n\n#################################\n### Read in allocation and usage data and merge data\n\nseries = read_csv(series_csv)\nallo = read_csv(allo_csv)[allo_cols]\n\nallo_use1 = merge(series, allo, on=['crc', 'wap'])\n\n### Read in input data to be used in the query\n\n#################################\n### Query data\n\nq1 = w_query(allo_use1, grp_by=grp_by, allo_col=allo_col, use_type=use_type, years=years, cwms_zone=cwms_zone, export_path=export_path, debug=True)\n\n\n\n\n","sub_path":"python_scripts/usage/requests/WUS_ROS_query_ashburton.py","file_name":"WUS_ROS_query_ashburton.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"529761585","text":"#!/usr/bin/env python\n\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer\nimport SocketServer\nimport json\nimport urlparse\nimport subprocess\nfrom ChatbotGet import ChatbotGet\n\nchatbotGet=ChatbotGet()\n\nclass S(BaseHTTPRequestHandler):\n\tdef _set_headers(self):\n\t\tself.send_response(200)\n\t\tself.send_header('Content-type', 'application/json')\n\t\tself.end_headers()\n\n\tdef do_GET(self):\n\t\tself._set_headers()\n\t\tparsed_path = urlparse.urlparse(self.path)\n\t\trequest_id = parsed_path.path[1:]\n\t\t# response = subprocess.check_output([\"python\", request_id+'.py'])\n\t\t# self.wfile.write(json.dumps(response))\n\t\tself.wfile.write(json.dumps(chatbotGet.return_message(request_id)))\n\n\tdef do_POST(self):\n\t\tself._set_headers()\n\t\tparsed_path = urlparse.urlparse(self.path)\n\t\trequest_id = parsed_path.path\n\t\tprint(\"request_id of post\")\n\t\tprint(request_id)\n\t\tresponse = subprocess.check_output([\"python\", request_id])\n\t\tself.wfile.write(json.dumps(response))\n\n\tdef do_HEAD(self):\n\t\tself._set_headers()\n\ndef run(server_class=HTTPServer, handler_class=S, port=8000):\n\tserver_address = ('', port)\n\thttpd = server_class(server_address, handler_class)\n\tprint('Starting httpd...')\n\thttpd.serve_forever()\n\nif __name__ == \"__main__\":\n\tfrom sys import argv\n\n\tif len(argv) == 2:\n\t\trun(port=int(argv[1]))\n\telse:\n\t\trun()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"284631479","text":"'''\nHand class\n'''\n\nclass Hand:\n\t'''\n\t@input cards: initial cards\n\t'''\n\tdef __init__(self):\n\t\tself.cards = []\n\n\t'''\n\t@input: tuple card (suit, rank)\n\t'''\n\tdef __str__(self):\n\t\tstr = \"\"\n\t\tfor card in self.cards:\n\t\t\tstr += f\"{card[1]}-of-{card[0]}\\n\"\n\t\treturn str\n\n\t'''\n\t@return score: check the score on current hand\n\t'''\n\tdef getscore(self):\n\t\tscore = 0\n\t\tnumbers_A = 0\n\t\tfor card in self.cards:\n\t\t\tif card[1] == \"King\" or card[1] == \"Queen\" or card[1] == \"Jack\" or card[1] == \"10\": \n\t\t\t\tscore += 10\n\t\t\telif card[1] == \"Ace\":\n\t\t\t\tscore += 11\n\t\t\t\tnumbers_A += 1\n\t\t\telse:\n\t\t\t\tscore += int(card[1])\n\n\t\tif score <= 21:\n\t\t\treturn score\n\t\telse:\n\t\t\tfor i in range(numbers_A):\n\t\t\t\tscore -= 10\n\t\t\t\tif score <= 21:\n\t\t\t\t\treturn score\n\t\t\treturn \"Bust\"\n","sub_path":"08-Milestone Project - 2/black_jack/module/hand.py","file_name":"hand.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"419306075","text":"import logging\nimport requests\nimport time\n\nfrom threading import Event\nfrom wrapt.decorators import synchronized\n\n\nclass ConnectionStorage:\n @property\n def connected(self) -> Event:\n return self.__connected\n\n @property\n def connection_timeout(self) -> int:\n return self.__connection_timeout\n\n @property\n @synchronized\n def session_id(self) -> str:\n if not self.__session_id:\n raise ConnectionError(\"Connection required.\")\n\n if self.is_timeout_expired():\n self.__connected.clear()\n raise TimeoutError(\"Connection has probably expired.\")\n\n return self.__session_id\n\n @session_id.setter\n @synchronized\n def session_id(self, session_id: str):\n if session_id:\n self.__session_id = session_id\n self.__connected.set()\n else:\n self.__session_id = session_id\n self.__connected.clear()\n\n def __init__(\n self,\n connection_timeout: int = 15,\n ):\n self.__connection_timeout = connection_timeout\n\n self.__connected = Event()\n self.__last_success = 0\n self.__logger = logging.getLogger(self.__module__)\n self.__session_id = \"\"\n\n @synchronized\n def is_timeout_expired(self):\n if not self.__last_success:\n return False\n\n return (time.monotonic() - self.__last_success) > self.__connection_timeout\n\n @synchronized\n def response_hook(self, response, *args, **kwargs):\n \"\"\"This hook will intercept all the \"requests.Response\".\"\"\"\n\n timestamp = time.monotonic()\n status_code = response.status_code\n\n if self.__last_success < timestamp and status_code == 200:\n self.__last_success = timestamp\n\n def setup_hooks(self, session: requests.Session):\n hooks = {\"response\": [self.response_hook]}\n session.hooks.update(hooks)\n","sub_path":"degiro_connector/quotecast/models/connection_storage.py","file_name":"connection_storage.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"310796351","text":"# USB VCP example.\n# This example shows how to use the USB VCP class to send an image to PC on demand.\n#\n# WARNING:\n# This script should NOT be run from the IDE or command line, it should be saved as main.py\n# Note the following commented script shows how to receive the image from the host side.\n#\n# #!/usr/bin/env python2.7\n# import sys, serial, struct\n# port = '/dev/ttyACM0'\n# sp = serial.Serial(port, baudrate=115200, bytesize=serial.EIGHTBITS, parity=serial.PARITY_NONE,\n# xonxoff=False, rtscts=False, stopbits=serial.STOPBITS_ONE, timeout=None, dsrdtr=True)\n# sp.setDTR(True) # dsrdtr is ignored on Windows.\n# sp.write(\"snap\")\n# sp.flush()\n# size = struct.unpack(' 1:\n cv2.putText(image,\"All good!\", \n bottomLeftCornerOfText, \n font, \n fontScale,\n goodFontColor,\n lineType)\n # print(\"All good!\")\n else:\n cv2.putText(image,\"I can't see your face\", \n bottomLeftCornerOfText, \n font, \n fontScale,\n goodFontColor,\n lineType)\n # print(\"I can't see your face\")\n\n \n cv2.line(image, tuple(point1), tuple(\n point2), color, line_width, cv2.LINE_AA)\n cv2.polylines(image, [point_2d], True, color, line_width, cv2.LINE_AA)\n cv2.line(image, tuple(point_2d[1]), tuple(\n point_2d[6]), color, line_width, cv2.LINE_AA)\n cv2.line(image, tuple(point_2d[2]), tuple(\n point_2d[7]), color, line_width, cv2.LINE_AA)\n cv2.line(image, tuple(point_2d[3]), tuple(\n point_2d[8]), color, line_width, cv2.LINE_AA)\n\n def get_pose_marks(self, marks):\n \"\"\"Get marks ready for pose estimation from 68 marks\"\"\"\n pose_marks = []\n pose_marks.append(marks[30]) # Nose tip\n pose_marks.append(marks[8]) # Chin\n pose_marks.append(marks[36]) # Left eye left corner\n pose_marks.append(marks[45]) # Right eye right corner\n pose_marks.append(marks[48]) # Left Mouth corner\n pose_marks.append(marks[54]) # Right mouth corner\n return np.array(pose_marks)\n","sub_path":"pose_estimator.py","file_name":"pose_estimator.py","file_ext":"py","file_size_in_byte":7833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"8075204","text":"import tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nfrom tornado.options import define, options\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\nclass IndexHandler(tornado.web.RequestHandler):\n def get(self,input):\n self.write(input[::-1])\n def write_error(self,status_code,**kwargs):\n swlf.write(\"Gosh Darnit % error\" %staticmethod)\nclass WrapHandler(tornado.web.RequestHandler):\n def post(self):\n text1 = self.get_argument('text')\n self.write(text1)\nif __name__ == \"__main__\":\n tornado.options.parse_command_line()\n app = tornado.web.Application(handlers = [(r\"/in/(\\w+)\",IndexHandler),\n (r\"/wrap\",WrapHandler)])\n httpserverp = tornado.httpserver.HTTPServer(app)\n httpserverp.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n","sub_path":"hellowtornado.py","file_name":"hellowtornado.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"10059226","text":"\"\"\"\n哈夫曼二叉树树\n\n给定n个权值作为n个叶子结点,构造一棵二叉树,若带权路径长度达到最小,称这样的二叉树为最优二叉树,也称为哈夫曼树(Huffman Tree)。哈夫曼树是带权路径长度最短的树,权值较大的结点离根较近。\n\n构造哈夫曼树的算法\n\n哈夫曼提出了一个算法,它能从任意的时数集合构造出与之对应的哈夫曼树。这个构造算法描述如下:\n 1,算法的输入为实数集 W = {w0, w1,...w(m-1)}\n 2,在构造中维护一个包含k颗二叉树的集合F,开始时k=m,且F={T0,T1,...T(m-1)},其中每个T1是一颗只包含权为w(i)的跟结点的单点二叉树。\n 3,算法过程中重复执行下面两个步骤,直到集合F中剩下一棵树为止\n a:构造一颗新的二叉树,其左右子树是从集合F中选取的两颗权最小的二叉树,其根结点的权值设置为这两颗子树的根节点的权值之和\n b:将所选的两颗二叉树从F中删除,把新构造的二叉树加入F,这个步骤每做一次,F里的二叉树就减少了一颗,这就保证了本算法必定结束\n\n如何实现\n\n显然,构造算法执行中需要维护一组二叉树,而且要直到每棵树(其树根结点)的权值,可以考虑使用二叉树的结点类构造哈夫曼树,在树根结点记录树的权值。\n\n在算法执行过程中需要不断选出权值最小的两颗二叉树, 并基于他们构造出一颗新的二叉树,很容易想到,我们需要最佳的选择就是用优先队列存放这组二叉树,按照二叉树的跟结点的权值排列优先顺序,从小到大\n\n算法开始时建立起一组单结点的二叉树,以权值作为优先码存入优先队列,要求先取出队列里的最小元素,然后反复做下面的事情\n 1,从优先队列里面弹出两个权值最小的元素(两颗二叉树)\n 2,基于所取的二叉树构造一颗新的二叉树,其权值取两颗子树权值之和,并向构造的新二叉树压入优先队列\n\n需要解决的问题:\n 1.需要为二叉树定义一个序,权值小的二叉树在前,\n 2。需要检查优先队列中的元素个数,以便在剩一颗时结束,这些都可以通过扩充前面已经定义的类型实现\n\n应用:哈夫曼编码\n\"\"\"\n\nfrom binary_tree import BinTreeNode\nfrom priority_queue_list import PriorityQueue\n\n# 以二叉树结点类作为基类,定义一个专门为构造哈夫曼树用的结点类,其特点是怎加一个小于比较操作符\nclass HaffmanNode(BinTreeNode):\n def __lt__(self, othernode):\n return self.data < othernode.data\n\n# 定义一个专门为哈夫曼算法服务的优先队列类,增加了一个检查队列中元素个数的方法\nclass HuffmanPriorityQueue(PriorityQueue):\n def number(self):\n return len(self._elems)\n\n# 定义哈夫曼树\ndef HuffmanTree(weights):\n trees = HuffmanPriorityQueue() # 实例化优先队列\n\n for w in weights: # 将每��权重构造成单点二叉树,然后全部加入优先队列\n each_tree = HaffmanNode(i)\n trees.enqueue(each_tree)\n\n while trees.number() > 1: # 直到队列中只剩下一个树,就是哈夫曼树\n t1 = trees.dequeue()\n t2 = trees.dequeue() # 取出最小的两棵树\n x = t1.data + t2.data # 得到新树的权重\n new_tree = HaffmanNode(x, t1, t2) # 构造新树\n trees.enqueue(new_tree) # 将新树加入优先队列\n\n return trees.dequeue()\n","sub_path":"haffman_tree.py","file_name":"haffman_tree.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"607778442","text":"def chop(lst):\n del lst[0]\n del lst[-1]\n \n\n\ndef middle(lst):\n new = lst[1:-1]\n \n return new\n\n\nmy_list = [1, 2, 3, 4]\nmy_list2 = [1, 2, 3, 4]\n\nchop_list = chop(my_list)\nprint(my_list) # Should be [2,3]\nprint(chop_list) # Should be None\n\nmiddle_list = middle(my_list2)\nprint(my_list2) # Should be unchanged\nprint(middle_list) \n","sub_path":"Python txt book exercise/8.1.py","file_name":"8.1.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"369459412","text":"import argparse\nimport numpy as np\n\nimport utils\nimport os\nimport time\nimport sys\nfrom concurrent.futures import (\n ProcessPoolExecutor,\n as_completed\n)\n\ndef get_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--inString',\n '-i',\n type=str,\n help=' factorization target')\n parser.add_argument('--K', '-K', type=int, help='Number of process')\n\n return parser.parse_args()\n\n\ndef factor_opt(M, search_range):\n\n for i in search_range:\n if M % i == 0:\n return str(i)\n return 'no'\n\n\n\ndef main(args):\n\n M = int(args.inString)\n M_prime = int(np.sqrt(M))\n search_range = np.array([i + 2 for i in range(M_prime - 2)])\n splitted = np.array_split(search_range, args.K)\n\n res = 'no'\n\n with ProcessPoolExecutor() as e:\n futures = set([e.submit(factor_opt, M, split_range)\n for split_range in splitted])\n\n\n for future in as_completed(futures):\n temp = future.result()\n if temp !='no':\n res = temp\n\n return res\n\n # すべてnoだった場合はこちらを返す\n return res\n\n\nif __name__ == '__main__':\n\n arg = get_arguments()\n res = main(arg)\n print(res)\n","sub_path":"problemsets/chapter8/factor_multiprocess.py","file_name":"factor_multiprocess.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"401876263","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 5 17:00:59 2021\n\n@author: 孔湘涵\n\"\"\"\nimport numpy as np\nimport time\nimport random\nimport matplotlib.pyplot as plt\n\n# =============================================================================\n# Spectral clustering/ Normalized cut functions\n# =============================================================================\n\ndef spectral_cluster_slow(data):\n '''\n The classic spetral clustering algorithm.\n arguments:\n - data: np.ndarray of shape [no_data, no_dimensions]\n input data points\n returns:\n - U: top k eigenvectors, np.ndarray of shape [no_data, k] \n '''\n time1 = time.time()\n number = data.shape[0]\n #Step 1: compute matrix W\n w1 = np.broadcast_to(data, (number,number)) #w1[:,0,:] is same\n w2 = w1.T\n w = np.float32(w1)-np.float32(w2)\n W = np.exp(-w**2)\n time2 = time.time()\n print('Time of compute matrix W is ',time2-time1) \n \n #Step2: compute matrix D\n d = W.sum(axis = 0)\n D = np.diag(d)\n time3 = time.time()\n print('Time of compute matrix D is ',time3-time2) \n \n #Step3: compute Graph Laplacian matrix\n L = np.linalg.inv(D)**0.5 @ (D - W) @ np.linalg.inv(D)**0.5\n time4 = time.time()\n print('Time of compute matrix L is ',time4-time3)\n \n #Step4: do eigenvalue decomposition of L\n values, vectors = np.linalg.eigh(L)\n time5 = time.time()\n print('Time of compute eigendecomposition is ',time5-time4)\n\n #Step5: find k smallest eigenvalues\n gap = np.zeros(len(values)-1)\n for i in range(gap.shape[0]):\n gap[i] = values[i+1] - values[i]\n k = np.argmax(gap)+1\n U = vectors[:,:k]\n print('Total time is ',time.time()-time1)\n \n return U\n\ndef spectral_cluster_nystrom(A, B, sample_indices, remain_indices):\n '''\n The fast spetral clustering algorithm using Nystrom method.\n arguments:\n - A: similarity sub-matrix shape [no_samples, no_samples]\n - B: similarity sub-matrix shape [no_samples, no_remaining_points]\n -sample_indices: np.ndarray of shape [no_samples]\n -remain_indices: np.ndarray of shape [no_remaining_points]\n \n returns:\n - V: top k eigenvectors, np.ndarray of shape [no_data, k] \n '''\n num_points = A.shape[1] + B.shape[1]\n num_samples = sample_indices.shape[0]\n \n #1. compute row sums of w which is d, and reset the samples location at front\n d1 = np.sum(A,axis=1) + np.sum(B,axis=1)\n d2 = np.sum(B,axis=0) + np.dot(B.T,np.dot(np.linalg.pinv(A),np.sum(B,axis=1)))\n dhat = np.reshape(np.sqrt(1/np.concatenate([d1,d2])),[num_points,1])\n \n #2. get new A & B\n A = A * np.dot(dhat[0:num_samples],dhat[0:num_samples].T)\n B = B * np.dot(dhat[0:num_samples],dhat[num_samples:].T)\n \n #3.compute s and diagonalize it\n Asi = np.linalg.pinv(A**0.5)\n BBT = np.dot(B,B.T)\n S = A + np.dot(Asi,np.dot(BBT,Asi))\n us,gammas,_ = np.linalg.svd(S)\n gammas = np.diag(gammas)\n\n #4, choose the first k singular vectors\n k = 8\n ABT = np.zeros((num_points,num_samples))\n ABT[sample_indices,:] = A\n ABT[remain_indices,:] = B.T\n V = ABT @ Asi @ us[:,1:k] @ np.linalg.pinv(gammas[1:k,1:k]**0.5) \n v = V / np.broadcast_to(np.linalg.norm(V,axis=1).reshape(-1,1), (V.shape)) #data normalization\n return v\n\n# =============================================================================\n# Similarity matrix construction functions\n# =============================================================================\n\ndef sample(row,col,num_sample):\n '''\n Produce some samples.\n arguments:\n - row,col: image size.\n - num_sample: no_samples, number of samples\n \n returns:\n - sample_indices: np.ndarray of shape [no_samples]\n - remain_indices: np.ndarray of shape [no_remaining_points]\n '''\n sample_indices = np.array(random.sample(range(row*col), num_sample))\n remain_indices = np.delete(range(row*col), sample_indices)\n return sample_indices, remain_indices\n\ndef similarity(data, sample_indices, method='fully'):\n '''\n Compute similarity sub-matrix A & B.\n arguments:\n - data: np.ndarray of shape [no_data, no_dimensions]\n - sample_indices np.ndarray of shape [no_samples]\n - method choose the type of similaritymatrix, default is 'fully connected graph',\n if want to use 'ε- neighborhood graph', set method as other value\n \n returns:\n - A similarity sub-matrix shape [no_samples, no_samples]\n - B similarity sub-matrix shape [no_samples, no_remaining_points]\n \n Hint: data needs normalized to 1, in case AB=0.\n '''\n data=np.float32(data/data.max())\n length = data.shape[0]\n AB = np.zeros((len(sample_indices),length)) \n samples = data[sample_indices,:]\n sigma=1 \n for i in range(len(sample_indices)):\n # use Gaussian kernel to define the similarity\n AB[i,:] = np.exp((-np.linalg.norm((samples[i,:] - data), axis = 1)**2)/sigma) #fully connected sigma=1 \n if method != 'fully':\n AB = AB[AB>np.exp(-0.8)**2].reshape((len(sample_indices),length)) #ε- neighborhood default is 0.8\n print('ε- neighborhood graph.')\n else:\n print('fully connected graph.')\n A = AB[:,sample_indices]\n B = AB[:,np.delete(range(length), sample_indices)]\n return A,B\n\n# =============================================================================\n# K-means functions\n# =============================================================================\n\ndef k_means_1d(X, centroids, n_iterations):\n '''\n standard k-means algorithm\n arguments:\n - X: np.ndarray of shape [no_data]\n input data points\n - centroids: np.ndarray of shape [k]\n centres of initial custers\n - n_iterations: integer, number of iterations to run k-means for\n returns:\n - which_component: np.ndarray of shape [no_data] and integer data\n type, contains values in [0, k-1] indicating which\n cluster each data point belongs to\n - centroids: np.ndarray of shape [k], centres of \n final custers, ordered in such way as indexed by\n `which_component`\n '''\n k = centroids.shape[0]\n for _ in range(n_iterations):\n # reassign data points to components\n distances = np.linalg.norm(np.expand_dims(X, axis=1) - centroids, axis=-1, ord=2)\n \n which_component = np.argmin(distances, axis=-1)\n # calcuate centroid for each component\n centroids = np.stack(list( X[which_component==i].mean(axis=0) for i in range(k) ), axis=0)\n\n return which_component, centroids\n\ndef k_means_pp_1d(X, k):\n '''\n Compute initial custer for k-means\n arguments:\n - X: np.ndarray of shape [no_data]\n input data points\n returns:\n - centroids: np.ndarray of shape [k]\n centres of initial custers\n '''\n channels = 1\n num_data = X.shape[0] \n\n #step1: get a random point as the first center\n index1 = int(np.random.random_sample()*num_data) \n centroids = np.zeros((k,channels))\n centroids[0] = X[index1] \n index = np.zeros(k) #the index of centers in dataset\n index[0] = index1\n for i in range(1,k):\n #step2: compute every point's distance to the nearest existing centroid\n distance = np.ones(num_data) \n for j in range(num_data): #for all data\n dis = np.ones(i+1)\n for m in range(0,i+1):\n #each distance between center m and every point\n dis[m] = np.linalg.norm(X[j] - centroids[m])\n #assign each point to the nearest center with minimum distance\n distance[j] = dis.min() \n if distance[j] == 0:\n distance[j] += 1e-5\n #step3: choose one point as the centre of a new cluster with probability proportional to distance**2\n index[i] = np.argmax(distance) \n centroids[i] = X[int(index[i])]\n return centroids\n\ndef k_means(X, centroids, n_iterations):\n '''\n standard k-means algorithm\n arguments:\n - X: np.ndarray of shape [no_data, no_dimensions]\n input data points\n - centroids: np.ndarray of shape [k, no_dimensions]\n centres of initial custers\n - n_iterations: integer, number of iterations to run k-means for\n returns:\n - which_component: np.ndarray of shape [no_data] and integer data\n type, contains values in [0, k-1] indicating which\n cluster each data point belongs to\n - centroids: np.ndarray of shape [k, no_dimensions], centres of \n final custers, ordered in such way as indexed by\n `which_component`\n '''\n k = centroids.shape[0]\n for _ in range(n_iterations):\n # reassign data points to components\n distances = np.linalg.norm(np.expand_dims(X, axis=1) - centroids, axis=-1, ord=2)\n which_component = np.argmin(distances, axis=-1)\n # calcuate centroid for each component\n centroids = np.stack(list( X[which_component==i].mean(axis=0) for i in range(k) ), axis=0)\n\n return which_component, centroids\ndef k_means_pp(X, k):\n '''\n Compute initial custer for k-means\n arguments:\n - X: np.ndarray of shape [no_data, no_dimensions]\n input data points\n returns:\n - centroids: np.ndarray of shape [k, no_dimensions]\n centres of initial custers\n '''\n num_data, channels = X.shape \n\n #step1: get a random point as the first center\n index1 = int(np.random.random_sample()*num_data) \n centroids = np.zeros((k,channels))\n centroids[0,:] = X[index1,:] \n index = np.zeros(k) #the index of centers in dataset\n index[0] = index1\n for i in range(1,k):\n #step2: compute every point's distance to the nearest existing centroid\n distance = np.ones(num_data) \n for j in range(num_data): #for all data\n dis = np.ones(i+1)\n for m in range(0,i+1):\n #each distance between center m and every point\n dis[m] = np.linalg.norm(X[j,:] - centroids[m,:])\n #assign each point to the nearest center with minimum distance\n distance[j] = dis.min() \n if distance[j] == 0:\n distance[j] += 1e-5\n #step3: choose one point as the centre of a new cluster with probability proportional to distance**2\n index[i] = np.argmax(distance) \n centroids[i,:] = X[int(index[i]),:]\n return centroids\n\n# =============================================================================\n# Display functions\n# =============================================================================\n\ndef display_clusters(img, which_component, k=-1):\n '''\n Display the cluster result as the row image color.\n \n Param:\n img color RGB image, row*col*channels\n which_component 1d size = row*col, each point represent which cluster belongs to.\n (k) set by default, number of clusters\n '''\n row,col = img.shape[:2]\n which_component = which_component.astype(np.int64)\n if k==-1:\n k=which_component.max()+1\n else:\n pass\n center_value = np.zeros((k,3))\n result = np.zeros(img.shape)\n #calculate the mean value of each clusters\n for n in range(k):\n mask = np.array([which_component==n]).reshape((row,col))\n number = mask.sum()\n center_value[n,0] = (mask*img[:,:,0]).sum()/number\n center_value[n,1] = (mask*img[:,:,1]).sum()/number\n center_value[n,2] = (mask*img[:,:,2]).sum()/number\n result[:,:,0] += mask*center_value[n,0]\n result[:,:,1] += mask*center_value[n,1]\n result[:,:,2] += mask*center_value[n,2]\n plt.figure()\n plt.imshow(np.uint8(result)) \n# plt.title('Clustering result (RGB & fully).')\n return np.uint8(result)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":12251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"587567591","text":"import time\nplayer_life=130\nplayer_attack=15\n\nenemy_life=150\nenemy_attack=10\n\nwhile player_life>0 and enemy_life>0:\n player_life-=enemy_attack\n enemy_life-=player_attack\n print('敌人发动攻击后,玩家的血量:'+str(player_life))\n print('玩家发动攻击后,敌人的学量:'+str(enemy_life))\n time.sleep(1.5)\nif player_life>0 and enemy_life<=0:\n print(\"玩家获胜\")\nelse:\n print(\"敌人获胜\")","sub_path":"first_step/a_game.py","file_name":"a_game.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"537253643","text":"# Copy List with Random Pointer\n\n# A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n\n# Return a deep copy of the list.\n\n# The Linked List is represented in the input/output as a list of n nodes. Each node is represented as a pair of [val, random_index] where:\n\n# val: an integer representing Node.val\n# random_index: the index of the node (range from 0 to n-1) where random pointer points to, or null if it does not point to any node.\n\n# Example 1:\n# Input: head = [[7,null],[13,0],[11,4],[10,2],[1,0]]\n# Output: [[7,null],[13,0],[11,4],[10,2],[1,0]]\n\n# Example 2:\n# Input: head = [[1,1],[2,1]]\n# Output: [[1,1],[2,1]]\n\n# Example 3:\n# Input: head = [[3,null],[3,0],[3,null]]\n# Output: [[3,null],[3,0],[3,null]]\n\n# Example 4:\n# Input: head = []\n# Output: []\n# Explanation: Given linked list is empty (null pointer), so return null.\n\n# Constraints:\n\n# -10000 <= Node.val <= 10000\n# Node.random is null or pointing to a node in the linked list.\n# Number of Nodes will not exceed 1000.\n\n\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, next, random):\n self.val = val\n self.next = next\n self.random = random\n\"\"\"\nclass Solution(object):\n def copyRandomList(self, head):\n \"\"\"\n :type head: Node\n :rtype: Node\n \"\"\"\n nodeDict = dict()\n dummy = Node(0, None, None)\n nodeDict[head] = dummy\n newHead, pointer = dummy, head\n while pointer:\n node = Node(pointer.val, pointer.next, None)\n nodeDict[pointer] = node\n newHead.next = node\n newHead, pointer = newHead.next, pointer.next\n pointer = head\n while pointer:\n if pointer.random:\n nodeDict[pointer].random = nodeDict[pointer.random]\n pointer = pointer.next\n return dummy.next\n","sub_path":"138.py","file_name":"138.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"375063585","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\nPE_0357\n\nPrime generating integers\n\nCreated on Thu Nov 3 11:31:19 2016\n@author: mbh\n\"\"\"\nimport numpy as np\nimport time\n\ndef p357(limit):\n t=time.clock()\n \n primes=np.ones(limit+1,dtype=bool) \n for i in range(2, int((limit+1)**0.5+1)):\n if primes[i]:\n primes[2*i::i]=False\n\n sf=np.ones(limit+1,dtype=bool) \n for i in range(2, int((limit+1)**0.5+1)):\n if sf[i]:\n sf[i**2::i**2]=False\n \n nsum=1 \n for n in range(2,limit,4):\n if primes[n+1] and primes[n//2+2] and sf[n] and all(primes[d+n//d] for d in range(3,int(n**.5)+1) if not n%d):\n nsum+=n\n\n print(nsum,time.clock()-t)\n \n\n","sub_path":"PE_0357/PE_0357.py","file_name":"PE_0357.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"449040498","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime, timedelta\nimport json\nimport locale\nimport sys\nimport random\n\nfrom flask_login import current_user\n\nfrom dash import Dash\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_bootstrap_components as dbc\nimport dash_html_components as html\nimport dash_table\nimport plotly.graph_objs as go\nfrom plotly.subplots import make_subplots\n\nfrom dashboards.dash_functions import apply_layout_with_auth, create_toolip\nfrom dashboards.dash_configs import layout, engine, colors_le, tooltip_text\n\nif sys.platform == 'win32':\n locale.setlocale(locale.LC_ALL, 'rus_rus')\nelse:\n locale.setlocale(locale.LC_ALL, 'ru_RU.UTF-8')\n\nurl_base = '/dash/dashfte/'\n\n\ndef input_to_list(input_value):\n if not isinstance(input_value, list):\n return [input_value]\n else:\n return input_value\n\n\ndates_list = sorted(list(\n pd.read_sql('SELECT DISTINCT month_start FROM hc_data_main',\n con=engine, parse_dates=['month_start'])['month_start'].dropna()))\ndates_marks = {i: \"\" for i in range(len(dates_list))}\ndates_slider = dcc.RangeSlider(\n id='dates_slider',\n value=[len(dates_list)-13, len(dates_list)-1],\n min=0,\n max=len(dates_list),\n className=\"dcc_control\"\n)\n\nn_function_show_slider = dcc.Slider(\n id='n_functions_show_slider',\n value=10,\n min=0,\n max=40,\n className='dcc_control'\n)\n\nle_list = list(pd.read_sql('''\nSELECT DISTINCT legal_entity_short\nFROM hc_data_main\n''', con=engine)['legal_entity_short'])\nle_options = [{'label': str(dept), 'value': str(dept)} for dept in le_list]\nle_checklist = dcc.Checklist(\n id='le_checklist',\n options=le_options,\n value='ГРС',\n className='dcc_control',\n labelStyle={'display': 'block'}\n )\n\nle_dict = pd.read_sql('SELECT DISTINCT legal_entity_short_eng, legal_entity_short FROM hc_data_main',\n con=engine).set_index('legal_entity_short_eng').to_dict()['legal_entity_short']\nle_options_dict = [{'label': str(value), 'value': str(key)} for key, value in le_dict.items()]\n\nle_dropdown = dcc.Dropdown(\n id='le_dropdown',\n options=le_options_dict,\n multi=True,\n value=['grs', 'medcorp', 'inrosmed', 'renprime', 'holdingrs', 'renconsult', 'renfinance'],\n className='dcc_control'\n)\n\nle_radio = dcc.RadioItems(\n id='le_radio',\n options=[\n {'label': 'Все', 'value': 'all'},\n {'label': 'Все активные', 'value': 'active'},\n {'label': 'Только ГРС', 'value': 'grs_only'},\n {'label': 'Только РЗ', 'value': 'rz_only'},\n ],\n value='all',\n labelStyle={'display': 'inline-block'},\n className='dcc_control'\n )\n\n\nfunctions_list = sorted(list(\n pd.read_sql('SELECT DISTINCT function FROM hc_data_main',\n con=engine)['function'].dropna()))\nfunctions_options = [{'label': str(item), 'value': str(item)} for item in functions_list]\nfunctions_dropdown = dcc.Dropdown(\n id='functions_dropdown',\n options=functions_options,\n multi=True,\n value=functions_list[0],\n className='dcc_control'\n)\n\nwf_type_radio = dcc.RadioItems(\n id='wf_type_radio',\n options=[\n {'label': 'Топ N изменений', 'value': 'top_n'},\n {'label': 'По выбранным функциям', 'value': 'selected_functions'},\n ],\n value='top_n',\n labelStyle={'display': 'inline-block'},\n className='dcc_control'\n)\n\ninclude_maternity_checkbox = dcc.Checklist(\n id='include_maternity_checkbox',\n options=[\n {'label': 'Включить декретниц', 'value': 'include_maternity'},\n ],\n value=[]\n)\n\ninclude_all_function_selected = dcc.Checklist(\n id='include_all_function_selected',\n options=[\n {'label': 'Суммировать выбранные функции', 'value': 'include_all_function_selected'},\n ],\n value=[]\n)\n\nlayout = html.Div([\n\n dcc.Tabs([\n dcc.Tab(label='Динамика по Юр. Лицам.',\n className='custom-tab',\n selected_className='custom-tab--selected',\n children=[\n html.Div([\n html.Div([\n create_toolip('fte_main_controls', tooltip_text['fte_main_controls']),\n html.Div(id='selected_dates_text', className=\"control_label\"),\n dates_slider,\n html.Div(\"Отобрать по Юр. лицам\", className=\"control_label\"),\n le_radio,\n le_dropdown,\n ],\n className='pretty_container six columns'),\n html.Div([\n create_toolip('current_fte_card', tooltip_text['current_fte_card']),\n html.H6(id='current_fte_value'),\n html.P(id='current_fte_text')],\n id='current_fte_container',\n className=\"mini_container three columns\",\n ),\n\n html.Div(\n [\n create_toolip('changes_fte_card', tooltip_text['changes_fte_card']),\n html.H6(id='change_fte_value'),\n html.P(id='change_fte_text')],\n id='change_fte_container',\n className=\"mini_container three columns\",\n )],\n id=\"info-container\",\n className=\"row flex_display\",\n ),\n html.Div([\n dcc.Graph(id='total_fte_graph'),\n create_toolip('main_fte_graph', tooltip_text['main_fte_graph']),\n ], className='pretty_container'),\n html.Div(\n id='fte_table_container',\n className='pretty_container'),\n html.Div([\n create_toolip('main_hc_graph', tooltip_text['main_hc_graph']),\n include_maternity_checkbox,\n dcc.Graph(id='total_hc_graph')\n ], className='pretty_container'),\n ]),\n dcc.Tab(label='Изменения по функциям',\n className='custom-tab',\n selected_className='custom-tab--selected',\n children=[\n html.Div([\n html.Div([\n wf_type_radio,\n html.Div(\n id='selected_n_functions_text',\n className=\"control_label\"),\n n_function_show_slider,\n functions_dropdown,\n create_toolip('fte_functions_controls', tooltip_text['fte_functions_controls']),\n ], className='pretty_container three columns'),\n html.Div([\n dcc.Graph(id='change_fte_wf'),\n create_toolip('fte_waterfall', tooltip_text['fte_waterfall']),\n ], className='pretty_container nine columns'),\n ], className=\"row flex_display\"),\n html.Div([\n html.Div([\n dcc.Graph(id='function_fte_graph'),\n create_toolip('fte_graph_function', tooltip_text['fte_graph_function'])\n ], className='pretty_container four columns'),\n html.Div([\n html.Div(id='detailed_function_table_container'),\n create_toolip('fte_table_detailed', tooltip_text['fte_table_detailed'])\n ], className='pretty_container eight columns'),\n ], className=\"row flex_display\"),\n html.Div(\n id='detailed_people_table_container',\n className='pretty_container'),\n ]),\n ])\n])\n\n\ndef register_dash(server):\n external_stylesheets = [\n {\n 'href': '../../static/build/css/dash_styles.css',\n 'rel': 'stylesheet'\n },\n {\n 'href': '../../static/vendors/font-awesome/css/font-awesome.min.css',\n 'rel': 'stylesheet'\n },\n ]\n app = Dash(server=server, url_base_pathname=url_base, external_stylesheets=external_stylesheets)\n apply_layout_with_auth(app, layout)\n\n @app.callback(\n Output(\"le_dropdown\", \"value\"),\n [Input(\"le_radio\", \"value\")]\n )\n def selection_drop(selected_radio):\n if selected_radio == \"all\":\n return list(le_dict.keys())\n elif selected_radio == 'active':\n return [x for x in le_dict.keys() if x not in ['bos', 'intouch', 'welbi']]\n elif selected_radio == 'grs_only':\n return ['grs']\n elif selected_radio == 'rz_only':\n return ['rz']\n else:\n return list(le_dict.keys())\n\n # Панелька с датаслайдером\n @app.callback(Output('selected_dates_text', 'children'),\n [Input('dates_slider', 'value')])\n def get_selected_dates_text(dates_range):\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n text_string = 'Данные за период: {}'.format(\n str(datetime.strftime(start_date, '%b %Y')) +\n \" - \" +\n str(datetime.strftime(end_date, '%b %Y')))\n return text_string\n\n # Панель с текущей численностью\n @app.callback(Output('current_fte_value', 'children'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value')])\n def get_current_fte_value(dates_range, selected_le):\n current_date = dates_list[dates_range[1] - 1]\n period = datetime.strftime(current_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT legal_entity_short_eng, fte\n FROM hc_data_main\n WHERE period = \"{}\"\n '''.format(period), con=engine)\n dff = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n current_fte = round(dff['fte'].sum(), 1)\n return current_fte\n\n # Текст в панельке с текущей численностью (какой месяц)\n @app.callback(Output('current_fte_text', 'children'),\n [Input('dates_slider', 'value')])\n def get_current_fte_text(dates_range):\n current_date = dates_list[dates_range[1] - 1]\n current_period = datetime.strftime(current_date, '%Y_%m')\n date_for_card = pd.read_sql('SELECT DISTINCT month_end FROM hc_data_main WHERE period = \"{}\"'.format(\n current_period), con=engine\n )['month_end'][0]\n period_text = \"Численность на \" + datetime.strftime(date_for_card, '%d.%m.%Y')\n return period_text\n\n # Панель с изменением численности в процентах (значение)\n @app.callback(Output('change_fte_value', 'children'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value')])\n def get_change_fte_value(dates_range, selected_le):\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT period, legal_entity_short_eng, SUM(fte) AS fte\n FROM hc_data_main\n WHERE period = \"{}\" OR period = \"{}\"\n GROUP BY period, legal_entity_short_eng\n '''.format(start_period, end_period), con=engine)\n df = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n start_fte = df[df['period'] == start_period]['fte'].sum()\n end_fte = df[df['period'] == end_period]['fte'].sum()\n fte_change_percent = \"{0:+.1%}\".format(end_fte / start_fte - 1)\n fte_change_absolute = \"{0:+}\".format(round(end_fte - start_fte, 1))\n fte_change = \"{} ({} FTE)\".format(fte_change_percent, fte_change_absolute)\n return fte_change\n\n @app.callback(Output('change_fte_text', 'children'),\n [Input('dates_slider', 'value')])\n def get_change_fte_text(dates_range):\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n text_string = 'Изменение FTE с {}'.format(\n str(datetime.strftime(start_date, '%B %Y')) +\n \" по \" +\n str(datetime.strftime(end_date, '%B %Y')))\n return text_string\n\n @app.callback(\n Output('total_fte_graph', 'figure'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value')])\n def get_total_fte_graph(dates_range, selected_le):\n df = pd.read_sql('''\n SELECT month_start, legal_entity_short_eng, legal_entity_group, SUM(fte) AS fte\n FROM hc_data_main\n GROUP BY month_start, legal_entity_group, legal_entity_short_eng\n ''', con=engine, parse_dates=['month_start'])\n df = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n df = df.groupby(['month_start', 'legal_entity_group']).agg(\n fte=('fte', 'sum')\n ).reset_index()\n\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n traces = []\n if 'ГРС' in df['legal_entity_group'].unique():\n le_for_graph = ['ГРС'] + [le for le in df['legal_entity_group'].unique() if le != 'ГРС']\n else:\n le_for_graph = df['legal_entity_group'].unique()\n for le in le_for_graph:\n colors = []\n for clr in dates_list:\n if start_date <= clr <= end_date:\n colors.append(colors_le[le][0])\n else:\n colors.append(colors_le[le][1])\n df_le = df[df['legal_entity_group'] == le]\n trace = go.Bar(\n x=df_le['month_start'],\n y=df_le['fte'],\n name=le,\n marker={\n 'color': colors,\n },\n hovertemplate='' + le + ': %{y:,.0f} ',\n )\n traces.append(trace)\n df_total = df.groupby('month_start').agg(fte=('fte', 'sum')).reset_index()\n totals_trace = go.Scatter(\n x=df_total['month_start'],\n y=df_total['fte'].round(0),\n name='Всего',\n text=df_total['fte'].round(0),\n textposition='top center',\n mode='text',\n hoverinfo='skip',\n hovertemplate='Всего: %{y:,.0f} '\n )\n traces.append(totals_trace)\n xaxis_range = [dates_list[-36] + timedelta(days=15),\n dates_list[-1] + timedelta(days=15)]\n fte_graph_layout = go.Layout(\n title_text=\"Динамика FTE группы компаний\",\n autosize=True,\n margin=dict(l=30, r=30, b=20, t=40),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n barmode='stack',\n legend=dict(font=dict(size=10), orientation=\"h\"),\n xaxis=dict(range=xaxis_range, tickformat='%m.%Y', nticks=12),\n\n )\n figure = {'data': traces, 'layout': fte_graph_layout}\n return figure\n\n @app.callback(\n Output('fte_table_container', 'children'),\n [Input('le_dropdown', 'value')])\n def get_fte_table(le_selected):\n # dates_selected = dates_list[dates_range[0]:dates_range[1]]\n le_selected = input_to_list(le_selected)\n print(le_selected)\n df = pd.read_sql('''\n SELECT legal_entity_short_eng, month, year, SUM(fte) AS fte\n FROM hc_data_main\n GROUP BY year, month, legal_entity_short_eng\n ''', con=engine)\n df = df[df['legal_entity_short_eng'].isin(le_selected)]\n df = df.groupby(['year', 'month']).agg(fte=('fte', 'sum')).reset_index()\n df['rounded_fte'] = df['fte'].round(1)\n df['month'] = df['month'].astype('int')\n dff = pd.pivot_table(\n df,\n index='year',\n columns='month',\n values='rounded_fte',\n aggfunc=np.sum,\n fill_value='-'\n ).reset_index()\n dff_t = dff.round(1)\n data = dff.to_dict('records')\n result_table = dash_table.DataTable(\n data=data,\n id='total_fte_table',\n style_as_list_view=True,\n columns=[\n {'name': 'Год', 'id': 'year'},\n {'name': 'Янв', 'id': '1'},\n {'name': 'Фев', 'id': '2'},\n {'name': 'Мар', 'id': '3'},\n {'name': 'Апр', 'id': '4'},\n {'name': 'Май', 'id': '5'},\n {'name': 'Июн', 'id': '6'},\n {'name': 'Июл', 'id': '7'},\n {'name': 'Авг', 'id': '8'},\n {'name': 'Сен', 'id': '9'},\n {'name': 'Окт', 'id': '10'},\n {'name': 'Ноя', 'id': '11'},\n {'name': 'Дек', 'id': '12'},\n ],\n style_cell={\n 'backgroundColor': '#EDEDED',\n 'textOverflow': 'ellipsis',\n }\n )\n\n return result_table\n\n @app.callback(\n Output('total_hc_graph', 'figure'),\n [Input('dates_slider', 'value'),\n Input('le_dropdown', 'value'),\n Input('include_maternity_checkbox', 'value')])\n def get_total_hc_graph(dates_range, selected_le, include_maternity):\n df = pd.read_sql('''\n SELECT\n month_start,\n legal_entity_short_eng,\n legal_entity_group,\n state_maternity_month_end,\n SUM(main_employee_entry) as headcount\n FROM\n hc_data_main\n WHERE\n headcount_month_end_raw = 1\n GROUP BY \n month_start,\n legal_entity_group,\n legal_entity_short_eng,\n state_maternity_month_end\n ''', con=engine, parse_dates=['month_start'])\n df = df[df['legal_entity_short_eng'].isin(input_to_list(selected_le))]\n if 'include_maternity' not in include_maternity:\n df = df[df['state_maternity_month_end'] != 1]\n df = df.groupby(['month_start', 'legal_entity_group']).agg(\n headcount=('headcount', 'sum')).reset_index()\n start_date = dates_list[dates_range[0] - 1]\n end_date = dates_list[dates_range[1] - 1]\n traces = []\n if 'ГРС' in df['legal_entity_group'].unique():\n le_for_graph = ['ГРС'] + [le for le in df['legal_entity_group'].unique() if le != 'ГРС']\n else:\n le_for_graph = df['legal_entity_group'].unique()\n for le in le_for_graph:\n colors = []\n for clr in dates_list:\n if start_date <= clr <= end_date:\n colors.append(colors_le[le][0])\n else:\n colors.append(colors_le[le][1])\n df_le = df[df['legal_entity_group'] == le]\n trace = go.Bar(\n x=df_le['month_start'],\n y=df_le['headcount'],\n name=le,\n marker={\n 'color': colors,\n },\n hovertemplate='' + le + ': %{y:,.0f} ',\n )\n traces.append(trace)\n df_total = df.groupby('month_start').agg(headcount=('headcount', 'sum')).reset_index()\n totals_trace = go.Scatter(\n x=df_total['month_start'],\n y=df_total['headcount'].round(0),\n name='Всего',\n text=df_total['headcount'].round(0),\n textposition='top center',\n mode='text',\n hoverinfo='skip',\n hovertemplate='Всего: %{y:,.0f} '\n )\n traces.append(totals_trace)\n xaxis_range = [dates_list[-36] + timedelta(days=15),\n dates_list[-1] + timedelta(days=15)]\n fte_graph_layout = go.Layout(\n title_text=\"Динамика численности группы компаний\",\n autosize=True,\n margin=dict(l=30, r=30, b=20, t=40),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n barmode='stack',\n legend=dict(font=dict(size=10), orientation=\"h\"),\n xaxis=dict(range=xaxis_range, tickformat='%m.%Y', nticks=12),\n )\n figure = {'data': traces, 'layout': fte_graph_layout}\n return figure\n\n @app.callback(\n Output('selected_n_functions_text', 'children'),\n [Input('n_functions_show_slider', 'value')])\n def get_selected_n_functions_text(n_functions_selected):\n text_string = 'Показать детализацию по {} функциям'.format(n_functions_selected)\n return text_string\n\n @app.callback(\n Output('change_fte_wf', 'figure'),\n [Input('dates_slider', 'value'),\n Input('wf_type_radio', 'value'),\n Input('n_functions_show_slider', 'value'),\n Input('functions_dropdown', 'value'),\n Input('le_dropdown', 'value')])\n def get_change_fte_wf(dates_range, wf_type, n_functions_selected, functions_selected, le_selected):\n if not isinstance(le_selected, list):\n le_selected = [le_selected]\n if not isinstance(functions_selected, list):\n functions_selected = [functions_selected]\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n n_cases = n_functions_selected\n df_periods_total = pd.read_sql('''\n SELECT period, legal_entity_short_eng, function, SUM(fte) as fte \n FROM hc_data_main\n WHERE period = \"{}\" OR period = \"{}\" \n GROUP BY month_start, legal_entity_short_eng, function\n '''.format(start_period, end_period), con=engine)\n df_start = df_periods_total[df_periods_total['legal_entity_short_eng'].isin(le_selected) &\n (df_periods_total['period'] == start_period)]\n df_start = df_start.groupby('period').agg(fte=('fte', 'sum')).reset_index()\n df_start['title'] = 'FTE на ' + datetime.strftime(start_date, '%B %Y')\n df_start['measure'] = 'absolute'\n df_end = df_periods_total[df_periods_total['legal_entity_short_eng'].isin(le_selected) &\n (df_periods_total['period'] == end_period)]\n df_end = df_end.groupby('period').agg(fte=('fte', 'sum')).reset_index()\n df_end['title'] = 'FTE на ' + datetime.strftime(end_date, '%B %Y')\n df_end['measure'] = 'absolute'\n\n df_change = df_periods_total[df_periods_total['legal_entity_short_eng'].isin(le_selected)].copy()\n df_change['function'].fillna('Не опознаны', inplace=True)\n dff = pd.pivot_table(\n df_change,\n index='function',\n columns='period',\n values='fte',\n aggfunc='sum',\n fill_value=0\n ).reset_index()\n\n dff.rename(columns={start_period: 'start_fte', end_period: 'end_fte'},\n inplace=True)\n dff['change'] = dff['end_fte'] - dff['start_fte']\n dff['rank'] = dff['change'].abs().rank(method='first', ascending=False)\n if wf_type == 'top_n':\n dff.loc[dff['rank'] > n_cases, 'function'] = 'Другие'\n else:\n dff.loc[~dff['function'].isin(functions_selected), 'function'] = 'Другие'\n\n dff = dff.groupby(['function']).agg(change=('change', 'sum')).reset_index()\n dff['measure'] = 'relative'\n dff.sort_values(by='change', ascending=False, inplace=True)\n dff.loc[dff['function'] == 'Другие', 'sorter'] = 1\n dff.loc[dff['function'] != 'Другие', 'sorter'] = 0\n dff.sort_values(by='sorter', ascending=True, inplace=True)\n dff.rename(columns={'function': 'title', 'change': 'fte'}, inplace=True)\n df_result = pd.concat([\n df_start[['title', 'fte', 'measure']],\n dff[['title', 'fte', 'measure']],\n df_end[['title', 'fte', 'measure']],\n ]).round({'fte': 1})\n fig = go.Figure(\n go.Waterfall(\n orientation=\"v\",\n measure=df_result['measure'],\n x=df_result['title'],\n textposition=\"outside\",\n text=df_result['fte'],\n y=df_result['fte'],\n hovertemplate='%{x}: %{text} ',\n decreasing={\"marker\": {\"color\": 'rgba(211, 94, 96, 1)'}},\n increasing={\"marker\": {\"color\": 'rgba(135, 186, 91, 1)'}},\n totals={\"marker\": {\"color\": 'rgba(114, 147, 203, 1)'}}\n ))\n title_string = 'Изменения по функциям в рамках общего изменения чис��енности с {}'.format(\n str(datetime.strftime(start_date, '%b %Y')) +\n \" по \" +\n str(datetime.strftime(end_date, '%b %Y')))\n fig.update_layout(\n title_text=title_string,\n autosize=True,\n margin=dict(l=30, r=30, b=100, t=60),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n legend=dict(font=dict(size=10), orientation=\"h\")\n )\n max_value = df_result['fte'].max()\n fig.update_yaxes(range=[0, max_value * 1.3])\n\n return fig\n\n @app.callback(\n Output('function_fte_graph', 'figure'),\n [Input('change_fte_wf', 'clickData'),\n Input('dates_slider', 'value'),\n Input('functions_dropdown', 'value')])\n def get_functions_fte(clickData, dates_range, selected_functions):\n functions = list(pd.read_sql('''\n SELECT DISTINCT function\n FROM hc_data_main''', con=engine)['function'].dropna())\n try:\n function = clickData['points'][0]['x']\n except TypeError:\n function = None\n if function is None or function not in functions:\n function = input_to_list(selected_functions)[0]\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT month_start, legal_entity_group, function, SUM(fte) AS fte\n FROM hc_data_main\n WHERE function = \"{}\"\n GROUP BY month_start, legal_entity_group\n '''.format(function), con=engine, parse_dates=['month_start'])\n df = df[(df['month_start'] >= start_date) & (df['month_start'] <= end_date)]\n traces = []\n for le in df['legal_entity_group'].unique():\n df_le = df[df['legal_entity_group'] == le]\n trace = go.Bar(\n x=df_le['month_start'],\n y=df_le['fte'],\n name=le,\n marker={\n 'color': colors_le[le][0],\n }\n )\n traces.append(trace)\n\n fte_graph_layout = go.Layout(\n title_text=\"Динамика FTE \" + function,\n autosize=True,\n margin=dict(l=30, r=30, b=20, t=40),\n plot_bgcolor=\"#EDEDED\",\n paper_bgcolor=\"#EDEDED\",\n hovermode='x',\n barmode='stack',\n legend=dict(font=dict(size=10), orientation=\"h\"),\n xaxis=dict(tickformat='%m.%Y', nticks=5),\n )\n figure = {'data': traces, 'layout': fte_graph_layout}\n return figure\n\n @app.callback(\n Output('detailed_function_table_container', 'children'),\n [Input('change_fte_wf', 'clickData'),\n Input('dates_slider', 'value'),\n Input('functions_dropdown', 'value')])\n def get_detailed_functions_table(clickData, dates_range, selected_functions):\n functions = list(pd.read_sql('''\n SELECT DISTINCT function\n FROM hc_data_main''', con=engine)['function'].dropna())\n try:\n function = clickData['points'][0]['x']\n except TypeError:\n function = None\n if function is None or function not in functions:\n function = input_to_list(selected_functions)[0]\n\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT\n hc.period,\n hc.cost_center,\n SUM(hc.fte) AS fte,\n ccf.function_detailed\n FROM hc_data_main hc\n LEFT JOIN ref_cost_center_functions ccf\n ON hc.cost_center = ccf.cost_center\n WHERE \n hc.function = \"{}\" AND\n (hc.period = \"{}\" OR hc.period = \"{}\")\n GROUP BY hc.period, hc.cost_center, ccf.function_detailed\n '''.format(function, start_period, end_period), con=engine)\n df['fte'] = df['fte'].round(1)\n dff = pd.pivot_table(\n df,\n index='function_detailed',\n columns='period',\n values='fte',\n aggfunc='sum',\n fill_value=0\n ).reset_index()\n dff.rename(columns={start_period: 'start_period', end_period: 'end_period'},\n inplace=True)\n dff['change'] = (dff['end_period'] - dff['start_period']).round(1)\n dff.sort_values(by=['change'], ascending=False, inplace=True)\n data = dff.to_dict('records')\n result_table = dash_table.DataTable(\n data=data,\n id='detailed_function_table',\n style_as_list_view=True,\n columns=[\n {'name': 'Функция', 'id': 'function_detailed'},\n {'name': 'Было', 'id': 'start_period'},\n {'name': 'Стало', 'id': 'end_period'},\n {'name': 'Изменение', 'id': 'change'},\n ],\n fixed_rows={'headers': True, 'data': 0},\n style_cell_conditional=[\n {'if': {'column_id': 'start_period'},\n 'width': '50px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'end_period'},\n 'width': '50px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'change'},\n 'width': '50px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'function_detailed'},\n 'width': '35%',\n 'textAlign': 'left'}\n ],\n style_cell={\n 'backgroundColor': '#EDEDED',\n 'textOverflow': 'ellipsis',\n 'font-size': '0.8rem',\n },\n style_table={\n 'width': '98%',\n 'maxHeight': '450px'\n },\n sort_action=\"native\",\n sort_mode=\"multi\",\n )\n\n return result_table\n\n\n @app.callback(\n Output('detailed_people_table_container', 'children'),\n [Input('change_fte_wf', 'clickData'),\n Input('dates_slider', 'value'),\n Input('functions_dropdown', 'value')])\n def get_detailed_people_table(clickData, dates_range, selected_functions):\n functions = list(pd.read_sql('''\n SELECT DISTINCT function\n FROM hc_data_main''', con=engine)['function'].dropna())\n try:\n function = clickData['points'][0]['x']\n except TypeError:\n function = None\n if function is None or function not in functions:\n function = input_to_list(selected_functions)[0]\n\n start_date = dates_list[dates_range[0] - 1]\n start_period = datetime.strftime(start_date, '%Y_%m')\n end_date = dates_list[dates_range[1] - 1]\n end_period = datetime.strftime(end_date, '%Y_%m')\n df = pd.read_sql('''\n SELECT\n hc.period,\n hc.cost_center,\n hc.city,\n hc.employee_id,\n hc.employee_name,\n hc.position,\n hc.hire_date,\n hc.exit_date,\n hc.fte,\n ccf.function_detailed\n FROM hc_data_main hc\n LEFT JOIN ref_cost_center_functions ccf\n ON hc.cost_center = ccf.cost_center\n WHERE \n hc.function = \"{}\" AND\n (hc.period = \"{}\" OR hc.period = \"{}\")\n '''.format(function, start_period, end_period), con=engine)\n df['fte'] = df['fte'].round(1)\n\n if current_user.role_id not in [1, 2, 3]:\n accesses = current_user.accesses\n test_list = [f'{x}' for x in accesses]\n df_ac = pd.DataFrame({\n 'access': accesses\n })\n df_ac['access'] = df_ac['access'].astype('str')\n df_ac = df_ac['access'].str.split(': ', expand=True)\n df_ac.columns = ['user_id', 'cost_center', 'city']\n available_records = list(df_ac['cost_center'] + '_' + df_ac['city'])\n df['check'] = df['cost_center'] + '_' + df['city']\n df = df[df['check'].isin(available_records)]\n df.drop(columns=['check'])\n if df.shape[0] == 0:\n return ''\n\n fill_date = pd.Timestamp(2099, 1, 1)\n df['exit_date'].fillna(fill_date, inplace=True)\n\n dff = pd.pivot_table(\n df,\n index=['function_detailed',\n 'employee_id',\n 'employee_name',\n 'position',\n 'hire_date',\n 'exit_date',\n ],\n columns='period',\n values='fte',\n aggfunc='sum',\n fill_value=0\n ).reset_index()\n dff.rename(columns={start_period: 'start_period', end_period: 'end_period'},\n inplace=True)\n dff['change'] = (dff['end_period'] - dff['start_period']).round(1)\n hire_check_date = start_date + pd.DateOffset(month=1)\n exit_check_date = end_date + pd.offsets.MonthEnd(0)\n dff.loc[dff['change'] == 0, 'type'] = 'Без изменений'\n dff.loc[(dff['change'] > 0) &\n (dff['hire_date'] >= hire_check_date) &\n (dff['hire_date'] <= exit_check_date),\n 'type'] = 'Прием'\n dff.loc[(dff['change'] > 0) &\n (dff['exit_date'] >= (hire_check_date - pd.Timedelta(days=1))) &\n (dff['exit_date'] < exit_check_date),\n 'type'] = 'Увольнение'\n dff['type'].fillna('Перевод/Декрет', inplace=True) # todo check with olga if other cases\n dff['hire_date'] = dff['hire_date'].dt.strftime('%d.%m.%Y')\n dff['exit_date'] = dff['exit_date'].dt.strftime('%d.%m.%Y')\n dff['exit_date'] = dff['exit_date'].replace({'01.01.2099': '-'})\n dff.drop(columns=['employee_id'], inplace=True)\n data = dff.to_dict('records')\n result_table = dash_table.DataTable(\n data=data,\n id='detailed_people_table',\n style_as_list_view=True,\n columns=[\n {'name': 'Функция', 'id': 'function_detailed'},\n {'name': 'Сотрудник', 'id': 'employee_name'},\n {'name': 'Должность', 'id': 'position'},\n {'name': 'Дата приема', 'id': 'hire_date'},\n {'name': 'Дата увольнения', 'id': 'exit_date'},\n {'name': 'Было', 'id': 'start_period'},\n {'name': 'Стало', 'id': 'end_period'},\n {'name': 'Изменение', 'id': 'change'},\n {'name': 'Причина', 'id': 'type'},\n ],\n fixed_rows={'headers': True, 'data': 0},\n style_cell_conditional=[\n {'if': {'column_id': 'start_period'}, 'width': '80px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'end_period'}, 'width': '80px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'change'}, 'width': '80px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'hire_date'}, 'width': '100px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'exit_date'}, 'width': '100px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'type'},\n 'width': '150px',\n 'textAlign': 'left'},\n {'if': {'column_id': 'function_detailed'},\n 'width': '15%',\n 'textAlign': 'left'},\n {'if': {'column_id': 'employee_name'},\n 'width': '16%',\n 'textAlign': 'left'},\n {'if': {'column_id': 'position'},\n 'width': '7%',\n 'textAlign': 'left'}\n ],\n style_cell={\n 'backgroundColor': '#EDEDED',\n 'textOverflow': 'ellipsis',\n },\n style_table={\n 'width': '98%',\n 'maxHeight': '450px',\n 'font-size': '0.8rem',\n 'text-align': 'left'\n },\n filter_action=\"native\",\n sort_action=\"native\",\n sort_mode=\"multi\",\n )\n return result_table\n\n return app.server\n","sub_path":"dashboards/dash_fte.py","file_name":"dash_fte.py","file_ext":"py","file_size_in_byte":38583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"401756935","text":"#!/usr/bin/env python\n\n# File bloodPressure.py\n\nimport datetime\nimport json\nimport sys\nsys.path.append('./healthFact')\nimport measurements\nimport datastoreDAO\n\nif __name__ == '__main__':\n# bp = measurements.BloodPressure(120,80)\n\n datastoreDAO.makeBPEntry(datetime.datetime.today(), 120, 80, 60)\n datastoreDAO.makeWeightEntry(datetime.datetime.today(), 172, 'lbm', 14.2)\n\n w = measurements.Weight(172.7, 'lbm')\n\n try:\n w1=measurements.Weight(172.7, 'g')\n except AttributeError as ae:\n print (ae.message)\n \n try:\n print(w.convert(1.0, 'kg', 'lbm'))\n print(w.convert(1.0, 'lbm', 'kg'))\n print(w.convert(1.0, 'g', 'kg'))\n except KeyError as ke:\n print(ke.message)\n \n print(json.dumps(w.toEntity()))\n\n b = measurements.BodyFat(15.5)\n print(json.dumps(b.toEntity()))\n\n (lbm, bmr) = measurements.calcLBM(w,b)\n print (lbm, bmr)\n \n","sub_path":"measuresTest.py","file_name":"measuresTest.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"68685299","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('pollsnew', '0008_auto_20160124_1500'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='choice',\n name='addedBy',\n field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),\n ),\n migrations.AlterField(\n model_name='category',\n name='date_created',\n field=models.DateTimeField(default=datetime.datetime(2016, 1, 24, 10, 7, 44, 275000, tzinfo=utc), verbose_name=b'date created'),\n ),\n migrations.AlterField(\n model_name='question',\n name='pub_date',\n field=models.DateTimeField(default=datetime.datetime(2016, 1, 24, 10, 7, 44, 275000, tzinfo=utc), verbose_name=b'date published'),\n ),\n ]\n","sub_path":"mysite-project/pollsnew/migrations/0009_auto_20160124_1537.py","file_name":"0009_auto_20160124_1537.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"568932735","text":"\"\"\"\r\nplot confusion_matrix of PublicTest and PrivateTest\r\n\"\"\"\r\nimport itertools\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport os\r\nimport argparse\r\n#from utils import dataloader\r\nfrom utils.dataloader import DataLoader\r\nfrom torch.autograd import Variable\r\nimport torchvision\r\nimport transforms as transforms\r\nfrom sklearn.metrics import confusion_matrix\r\nfrom models import *\r\nfrom models.resnet_cut import *\r\nfrom models.peleenet import *\r\n\r\nparser = argparse.ArgumentParser(description='plot_rgb_confusion_matrix')\r\nparser.add_argument('--model_path', help='input model path', type=str)\r\nparser.add_argument('--input_shape', help='data type', default=128, type=int)\r\nparser.add_argument('--split', type=str, default='PrivateTest', help='split')\r\nopt = parser.parse_args()\r\n\r\ninput_shape = opt.input_shape\r\ntransform_test = transforms.Compose([\r\n transforms.CenterCrop(input_shape),\r\n transforms.ToTensor(),\r\n #normalize,\r\n])\r\ndef plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n \"\"\"\r\n This function prints and plots the confusion matrix.\r\n Normalization can be applied by setting `normalize=True`.\r\n \"\"\"\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title, fontsize=16)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n\r\n plt.ylabel('True label', fontsize=18)\r\n plt.xlabel('Predicted label', fontsize=18)\r\n plt.tight_layout()\r\n\r\nclass_names = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']\r\n\r\n# Model\r\ndevice_ids=[0]\r\n#net = ResNet18_cut()\r\nnet = Peleenet()\r\nnet = torch.nn.DataParallel(net, device_ids=device_ids)\r\ncheckpoint = torch.load(opt.model_path)\r\nnet.load_state_dict(checkpoint['net'])\r\nnet.cuda()\r\nnet.eval()\r\nPrivateTestset = DataLoader(split = opt.split, transform=transform_test)\r\nTestloader = torch.utils.data.DataLoader(PrivateTestset, batch_size=16, shuffle=False)\r\ncorrect = 0\r\ntotal = 0\r\nall_target = []\r\nfor batch_idx, (inputs, targets) in enumerate(Testloader):\r\n use_cuda = torch.cuda.is_available()\r\n if use_cuda:\r\n inputs, targets = inputs.cuda(device=device_ids[0]), targets.cuda(device=device_ids[0])\r\n inputs, targets = Variable(inputs), Variable(targets)\r\n outputs = net(inputs)\r\n score = F.softmax(outputs)\r\n #print(score)\r\n _, predicted = torch.max(outputs, 1)\r\n total += targets.size(0)\r\n correct += predicted.eq(targets.data).cpu().sum()\r\n # inputs, targets = Variable(inputs), Variable(targets)\r\n # outputs = net(inputs)\r\n # _, predicted = torch.max(outputs.data, 1)\r\n\r\n # total += targets.size(0)\r\n # correct += predicted.eq(targets.data).cpu().sum()\r\n if batch_idx == 0:\r\n all_predicted = predicted\r\n all_targets = targets\r\n else:\r\n all_predicted = torch.cat((all_predicted, predicted),0)\r\n all_targets = torch.cat((all_targets, targets),0)\r\n\r\nacc = 100. * float(correct) / total\r\nprint(\"accuracy: %0.3f\" % acc)\r\n\r\n# Compute confusion matrix\r\nmatrix = confusion_matrix(all_targets.data.cpu().numpy(), all_predicted.cpu().numpy())\r\nnp.set_printoptions(precision=2)\r\n\r\n# Plot normalized confusion matrix\r\nplt.figure(figsize=(10, 8))\r\nplot_confusion_matrix(matrix, classes=class_names, normalize=True,\r\n title= opt.split+' Confusion Matrix (Accuracy: %0.3f%%)' %acc)\r\nplt.savefig(os.path.join('./output_results/', opt.split + '_cm.png'))\r\nplt.close()","sub_path":"plot_rgb_confusion_matrix.py","file_name":"plot_rgb_confusion_matrix.py","file_ext":"py","file_size_in_byte":4209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"93690215","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n'''\nHow to call this function:\n\nIn the header of your program, type:\n\nfrom speccounts import *\n\nusing the code, type into your program:\n\nspecin_countsout(x_array, y_array)\n\nNOTE: the inputs given to w_f_in do not literally have to be 'wavez' and 'fluxz'.\nThey are just place holder variables for the code below.\n\nMachinery of this code is similar to spectrophot_v2, except without the prompt asking you for a supernova.\nNeed the all the filter curve .txt in the same directory to run successfully\n\nThe variables within the function only exist within the defined function. They can not be called outside of the\nfunction. What this function does is the desired spectrophotometry and spits out the calculated magnitudes\nin order w2,m2,w1,u,b,v. So I just recommend running the function with inputs and copy and paste the result into\na new array if you plan to do anything else with the magnitudes.\n\n'''\n\n\n\n#Vega for reference#\n\nvega_wave,vega_flux = np.loadtxt('../spectra/vega.dat',dtype=float,usecols=(0,1),unpack=True)\n\n# input vega_wave and vega_flux into w_f_in to test #\n\n#####################\n\ndef specin_countsout(wavez,fluxz):\n\n h = 6.6260755e-27\n c = 2.99792458e18\n hc = h*c\n\n\n files = ['filters/UVW2_2010.txt','filters/UVM2_2010.txt','filters/UVW1_2010.txt','filters/U_UVOT.txt','filters/B_UVOT.txt', 'filters/V_UVOT.txt']\n\n filter_WL = []\n filter_A = []\n\n for item in files:\n #Necessary to have \"../\" when running in /python/ directory\n f = open(\"../\" + item,'r')\n\n#\tprint(item)\n\n filter_lambda = []\n filter_area = []\n for line in f:\n \tline = line.rstrip()\n column = line.split()\n#\t\tprint(column)\n wavelen = column[0]\n area = column[1]\n filter_lambda.append(float(wavelen))\n filter_area.append(float(area))\n\n filter_lambda = np.asarray(filter_lambda,dtype=float)\n filter_area = np.asarray(filter_area,dtype=float)\n\n nonzero = np.where(filter_area > 0.0)\n\n filter_lambda = filter_lambda[nonzero]\n filter_area = filter_area[nonzero]\n\n filter_WL.append(filter_lambda)\n filter_A.append(filter_area)\n\n f.close()\n\n\n\n ##########################################\n\n\n filtercurves = ['UVW2_2010','UVM2_2010','UVW1_2010','U_UVOT','B_UVOT','V_UVOT'] ### STRING LIST\n\n zeropoints = [17.38, 16.85, 17.44, 18.34, 19.11, 17.89] ### PHOTOMETRIC ZEROPOINTS BASED ON VEGA\n\n\n filtereffwavelength=[2030,2231,2634,3501,4329,5402] ### EFFECTIVE VEGA WAVELENGTH FOR EACH FILTER (IN SAME ORDER)\n\n mag_array = np.zeros(len(filtercurves))\n\n counts_array = np.zeros(len(filtercurves))\n\n\n filter_array = np.array([filter_A[0],filter_A[1],filter_A[2],filter_A[3],filter_A[4],filter_A[5]])\n\n filter_wave = np.array([filter_WL[0],filter_WL[1],filter_WL[2],filter_WL[3],filter_WL[4],filter_WL[5]])\n\n\n\n for x in range(len(filtercurves)):\n\n sp_ea = np.interp(wavez,filter_wave[x],filter_array[x]) ### spectrum effective area\n\n counts_array[x] = np.trapz(sp_ea*fluxz*wavez/hc,wavez) ### Integrating under the curve using numpy\n\n mag_array[x] = -2.5*np.log10(counts_array[x])+zeropoints[x] ### Calculated magnitudes\n\n return counts_array, mag_array\n\n\n\n'''\nNOTE on mag_array: mag_array has 6 components, one for each filter used. This means that the first\ncomponent is the calculated w2 magnitude, the second component is the m2 calculated magnitude, all\nthe way to v band calculated magnitude. The order of the magnitude reflects the order of filtercurves.\n'''\n\n","sub_path":"python/speccounts.py","file_name":"speccounts.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"429658139","text":"#!/usr/bin/python3\n\nfrom time import sleep\nimport sys, os\nfrom ev3dev.ev3 import *\n\n#connect motors\nrightMotor = LargeMotor(OUTPUT_A)\nassert rightMotor.connected\nleftMotor = LargeMotor(OUTPUT_D)\nassert leftMotor.connected\n\nprint(\"Motors connected\")\n\n#connect gyro\ngs = GyroSensor()\nassert gs.connected\nprint(\"Gyro connected\")\ngs.mode = 'GYRO-RATE'\t# Changing the mode resets the gyro\ngs.mode = 'GYRO-ANG'\n\n#connect servo\nservo = Motor(OUTPUT_C)\nassert servo.connected\nservo.reset()\nservo.stop()\n\nprint(\"Servo connected\")\n\n#connect ultrasonic\nus = UltrasonicSensor()\nassert us.connected\n\nprint(\"Ultrasonic Connected\")\n\n#all connected\nSound.speak('Get Ready... Go!').wait()\nprint(\"Everything connected\")\n\n#DEFINE GLOBAL VARIABLES\n\n\n#FUNCTION DECLARATIONS\n\ndef stop():\n # Brake the motors of the robot.\n leftMotor.stop(stop_action='brake')\n rightMotor.stop(stop_action='brake')\n\ndef drive_square():\n #drive only a certain time\n rightMotor.run_timed(time_sp=3000, speed_sp=100)\n leftMotor.run_timed(time_sp=3000, speed_sp=100)\n print(\"moving forward 1 square\")\n\n#def turn(clockwise):\n\ndef scan(destination):\n servo.run_to_abs_pos(position_sp=destination, speed_sp=75, ramp_down_sp=90)\n #print(\"destination angle \", destination)\n\n'''\n the main loop of this program will\n move forward a certain distance\n scan in 3 directions\n store each direction results\n print out scan results\n'''\n\ndef print_array(input):\n detection_distance = 60\n\n output_string = \"\"\n\n #left\n if input[0] <= detection_distance:\n output_string += \"left clear\"\n else:\n output_string += \"left blocked\"\n output_string += str(input[0])\n\n #center\n if input[1] <= detection_distance:\n output_string += \" center clear\"\n else:\n output_string += \" center blocked\"\n output_string += str(input[1])\n\n #right\n if input[2] <= detection_distance:\n output_string += \" right clear\"\n else:\n output_string += \" right blocked\"\n output_string += str(input[2])\n\n print(output_string)\n\n\n\ndef main():\n # Left Center Right Array\n LCR = [0,0,0]\n\n while True:\n drive_square()\n # It will return to the main area while the robot moves\n scan(0)\n sleep(5)\n #set array front value\n LCR[1] = us.value()\n\n\n scan(90)\n sleep(5)\n # set array left value\n LCR[0] = us.value()\n\n scan(-90)\n sleep(5)\n # set array front value\n LCR[2] = us.value()\n\n print_array(LCR)\n #reset array\n LCR = [0,0,0]\n\n print(\"It has slept for 5 seconds\")\n\n\n\nmain()\n","sub_path":"Lyall-s_Files/basic_maze_nav.py","file_name":"basic_maze_nav.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"218411425","text":"from threading import Thread\nfrom queue import Queue\nimport re\nimport json\nimport lxml.html as lxhtml\nimport time\nfrom components import Attr, Template, Getter\n\n\nclass ParseWorker(Thread):\n '''\n A generic parser that executes the functions specified in the\n self.css variable. For use without parent Thread supply keyword\n arguments:\n name = str,\n domain = str,\n next_q = queue.Queue(),\n store_q = queue.Queue(),\n\n The ParseWorker expects the following tuple to be present in the queue:\n (url_meta[dict], html[str], url[str])\n '''\n def __init__(self, parent=None, objects=dict, raw_html=dict,\n next_q=Queue(), **kwargs):\n super(ParseWorker, self).__init__()\n if parent or kwargs and next_q:\n self.parent = parent\n self.name = parent.name\n self.domain = parent.domain\n # self.templates = templates\n self.raw_html = raw_html\n self.get_q = Queue()\n self.next_q = parent.get_q\n self.output_q = parent.output_q\n self.seen = set()\n self.forward = set()\n self.average = []\n self.parsed = 0\n\n else:\n raise Exception('Not enough specified, please read the docstring')\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def run(self):\n while True:\n item = self.get_q.get()\n if item is None:\n break\n\n getter = item\n self.seen.add(getter.url)\n\n html = lxhtml.fromstring(getter.got)\n html.make_links_absolute(self.domain)\n\n start = time.time()\n for template in self.templates:\n to_store = template.store\n selected = self._get_selected(html, template)\n\n if template.store:\n to_store.objects = self.make_objects(template,\n selected, getter)\n\n if not to_store.objects and template.required:\n print('nothing found')\n self._handle_empty()\n self.output_q.put(to_store)\n else:\n self.make_objects(template, selected, getter)\n took = time.time() - start\n self.average.append(took)\n self.get_q.task_done()\n\n def _get_selected(self, html, template):\n if not template.js_regex:\n selected = template.selector(html) if template.selector else [html]\n else:\n regex = re.compile(template.js_regex)\n selected = []\n # Find all the scripts that match the regex.\n scripts = (regex.findall(s.text_content())[0] for s in\n html.cssselect('script')\n if regex.search(s.text_content()))\n\n # Set selected to the scripts\n for script in scripts:\n selected.extend(json.loads(script))\n return selected\n\n def make_objects(self, template, selected, getter):\n objects = []\n # print('aantal links', len(selected))\n for sel in selected:\n objct = Template(name=template.name)\n objct.url = getter.url\n\n # Set predefined attributes from the getter.\n #print('aantal attrs', len(getter.attrs))\n for attr in getter.attrs:\n objct.attrs.append(attr.duplicate())\n\n # Set the attribute values\n for temp_attr in template.attrs:\n parsed = temp_attr.func(sel, temp_attr.selector,\n **temp_attr.kws)\n attr = Attr(name=temp_attr.name, value=parsed)\n objct.attrs.append(attr)\n\n # Create a request from the attribute if desirable\n if temp_attr.getter and parsed:\n if type(parsed) != list:\n parsed = [parsed]\n\n for value in parsed:\n new_getter = Getter(**temp_attr.getter)\n new_getter.url = value\n self._handle_getter(new_getter)\n\n if template.getter:\n self._handle_object_getter(objct)\n objects.append(objct)\n return objects\n\n def _handle_object_getter(self, objct):\n getter = objct.getter\n url_params = {attr.name: attr.value for attr in objct.attrs}\n\n if getter.method == 'post':\n getter.data = url_params\n else:\n getter.params = url_params\n self._handle_getter(objct.getter, url_params)\n\n def _handle_getter(self, getter):\n if getter.url and getter.url not in self.seen:\n if getter.active:\n self.next_q.put(getter)\n else:\n self.forward.add(getter)\n\n self.seen.add(getter.url)\n\n def _handle_empty(self):\n '''\n Gracefull shutdown if no more objects are found.\n with self.next_q.mutex:\n print('clearing')\n self.next_q.queue.clear()\n self.get_q.queue.clear()\n\n for _ in self.parent.get_workers:\n self.next_q.put(None)\n self.get_q.put(None)\n '''\n\n while not self.next_q.empty():\n try:\n self.next_q.get(False)\n except Empty:\n continue\n self.next_q.task_done()\n","sub_path":"modelscraper/workers/parse_worker.py","file_name":"parse_worker.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"457040424","text":"class LightState(object):\n on = False\n brightness = 0\n hue = 0\n saturation = 0\n xy = []\n ct = 0\n alert = 'none'\n effect = 'none'\n colormode = 'hs' # hs,xy,ct\n reachable = True # currently always true\n\n def __init__(self, data):\n if data == None: return\n self.bulkset(data)\n\n def set(self, stateKey, stateValue):\n if stateKey in ['bri', 'hue', 'sat', 'ct']:\n stateValue = int(stateValue)\n\n setattr(self, stateKey, stateValue)\n\n def bulkset(self, data):\n if 'on' in data:\n self.on = data['on']\n\n if 'bri' in data:\n self.brightness = int(data['bri'])\n\n if 'hue' in data:\n self.hue = int(data['hue'])\n\n if 'sat' in data:\n self.saturation = int(data['sat'])\n\n if 'xy' in data:\n self.xy = data['xy']\n\n if 'ct' in data:\n self.ct = int(data['ct'])\n\n if 'alert' in data:\n self.alert = data['alert']\n\n if 'effect' in data:\n self.effect = data['effect']\n\n if 'colormode' in data:\n self.colormode = data['colormode']\n\n if 'reachable' in data:\n self.reachable = data['reachable']","sub_path":"pyhueapi/light_state.py","file_name":"light_state.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"534013172","text":"from django.contrib import admin\r\n\r\nfrom titles.models import Title\r\n\r\n\r\nclass TitleAdmin(admin.ModelAdmin):\r\n list_display = (\"id\", \"name\", 'rating')\r\n search_fields = (\"text\",)\r\n empty_value_display = '-пусто-'\r\n\r\n\r\nadmin.site.register(Title, TitleAdmin)\r\n","sub_path":"titles/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"301166716","text":"\"\"\"\nnpy2categories_utils.py: link the obtained categories to the already existing npy_sentece files\n\nnpy2sentences_utils.py path_to_npy_file path_to_sentence_file path_to_target_folder\n\npath_to_npy_file: set path to the .npy file containing all the train, val or test data\npath_to_sentence_file: set path to transformed (cleaned, processed) .txt-file containing all sentences\ne.g. how2sign.train.id_transformed.txt\n - e.g. a line in the file: ad4_GWc5XRo_10 one two three\npath_to_target_folder where the new file should be saved to\n\nDIFFERENCE to npy2sentences:\n vid_speaker = kp[:11]\n\n Just compare the first 11 characters instead of adding the part to it\n\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport sys\nfrom pathlib import Path\n\n\nclass CategoriesToNpy:\n\n def __init__(self, path_to_numpy_file, path_to_csv, path_to_target):\n self.path_to_numpy_file = Path(path_to_numpy_file)\n self.path_to_csv = Path(path_to_csv)\n self.path_to_target = Path(path_to_target)\n old = np.load\n np.load = lambda *a, **k: old(*a, **k, allow_pickle=True)\n\n def main(self):\n self.categories2sentence()\n\n def categories2sentence(self):\n \"\"\" load from .npy file \"\"\"\n kp_files = np.load(self.path_to_numpy_file).item()\n df_kp = pd.DataFrame(kp_files.keys(), columns=[\"keypoints\"])\n kp2sentence = []\n\n d = {'keypoints': [], 'text': []}\n with open(self.path_to_csv) as f:\n for line in f:\n d['keypoints'].append(line.split(\" \")[0])\n d['text'].append(\" \".join(line.split()[1:]))\n df_text = pd.DataFrame(d)\n\n speaker = []\n counter = 0\n for kp in df_kp[\"keypoints\"]:\n vid_speaker = kp[:11]\n speaker.append(vid_speaker)\n for idx in range(len(df_text['keypoints'])):\n if vid_speaker in df_text['keypoints'][idx]:\n kp2sentence.append([kp, df_text['text'][idx]])\n break\n\n if counter % 250 == 0:\n print(\"Folder %d of %d\" % (counter, len(df_kp[\"keypoints\"])))\n counter += 1\n df_kp_text_train = pd.DataFrame(kp2sentence, columns=[\"keypoints\", \"text\"])\n df_kp_text_train.to_csv(self.path_to_target / str(str(self.path_to_csv.name) + \"_2npy.txt\"), index=False)\n\n\nif __name__ == '__main__':\n # file with sentences\n if len(sys.argv) > 1:\n path_to_numpy_file = sys.argv[1]\n else:\n print(\"Set path to npy file\")\n sys.exit()\n\n # sentences file\n if len(sys.argv) > 2:\n path_to_csv = sys.argv[2]\n else:\n print(\"Set path to transformed file containing categories\")\n sys.exit()\n\n # target folder\n if len(sys.argv) > 3:\n path_to_target = sys.argv[3]\n else:\n print(\"Set path to target folder\")\n sys.exit()\n\n npy = CategoriesToNpy(path_to_numpy_file, path_to_csv, path_to_target)\n npy.main()\n","sub_path":"utils/npy2categories_utils.py","file_name":"npy2categories_utils.py","file_ext":"py","file_size_in_byte":2968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"281319463","text":"import sys\nsys.path.insert(0, '/tensorflowvgg')\nimport os\nimport pickle\nfrom os.path import isfile, isdir\nimport numpy as np\nimport tensorflow as tf\nfrom keras.preprocessing.image import load_img, img_to_array\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.model_selection import train_test_split, cross_val_score, ShuffleSplit, cross_val_predict, RepeatedKFold\nfrom sklearn.utils import shuffle\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.gaussian_process.kernels import RBF\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.decomposition import PCA\nfrom sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier\nfrom sklearn.manifold import TSNE\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score, confusion_matrix, auc, roc_curve, mean_absolute_error\nimport argparse\nimport tensorflowvgg.vgg19 as vgg19\nimport utility_functions\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport scipy\nimport json\n\ncodes_path = './codes'\nlabels_path = './labels'\nnames_path = './names'\nradio_input_classify, radio_input_confidence = utility_functions.loadRadiologistData(\"../RadiologistData/radiologistInput.csv\", 1, 0)\n\n\nimages_normal_train, labels_normal_train, names_normal_train = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/NormalTrain\",), (0,))\nimages_normal_test, labels_normal_test, names_normal_test = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/NormalTest\",), (0,))\nimages_abnormal_train, labels_abnormal_train, names_abnormal_train = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/AbnormalTrain\",), (1,))\nimages_abnormal_test, labels_abnormal_test, names_abnormal_test = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/AbnormalTest\",), (1,))\nimages_contralateral_test, labels_contralateral_test, names_contralateral_test = utility_functions.loadImagesFromDir((\"../Images/CherryPickedWithRadiologistInputAllWhite/ContralateralTest\",), (0,))\nnames_all = np.append(np.append(np.append(names_normal_train, names_normal_test, axis=0), names_abnormal_train, axis=0), names_abnormal_test, axis=0)\nlabels_all = np.append(np.append(np.append(labels_normal_train, labels_normal_test, axis=0), labels_abnormal_train, axis=0), labels_abnormal_test, axis=0)\n\nsess = tf.Session()\nprint(\"Session start\")\n\nvgg = vgg19.Vgg19()\ninput_ = tf.placeholder(tf.float32, [None, 224, 224, 3])\nwith tf.name_scope(\"content_vgg\"):\n vgg.build(input_)\n# Get the values from the relu6 layer of the VGG network\nfeed_dict_normal_train = {input_: images_normal_train}\nfeed_dict_normal_test = {input_: images_normal_test}\nfeed_dict_cancer_train = {input_: images_abnormal_train}\nfeed_dict_cancer_test = {input_: images_abnormal_test}\nfeed_dict_contralateral = {input_: images_contralateral_test}\n\ncodes_normal_train = sess.run(vgg.relu6, feed_dict=feed_dict_normal_train)\ncodes_normal_test = sess.run(vgg.relu6, feed_dict=feed_dict_normal_test)\ncodes_cancer_train = sess.run(vgg.relu6, feed_dict=feed_dict_cancer_train)\ncodes_cancer_test = sess.run(vgg.relu6, feed_dict=feed_dict_cancer_test)\ncodes_contralateral = sess.run(vgg.relu6, feed_dict=feed_dict_contralateral)\nsess.close()\n\n\"\"\" next block is for TSNE plot \"\"\"\n\ncodes_all = np.append(np.append(np.append(codes_normal_train, codes_normal_test, axis=0), codes_cancer_train, axis=0), codes_cancer_test, axis=0)\n#codes_all = PCA(n_components=50).fit_transform(codes_all)\ntsne_embedding = TSNE(n_components=2, perplexity=5).fit_transform(codes_all)\njson_dict = {}\ni=0\nfor name in names_all:\n json_dict[name] = {}\n json_dict[name][\"position\"] = tsne_embedding[i].tolist()\n json_dict[name][\"label\"] = str(labels_all[i])\n i = i + 1\n\"\"\"\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.scatter(tsne_embedding[0:len(codes_normal_train)+len(codes_normal_test),0], tsne_embedding[0:len(codes_normal_train)+len(codes_normal_test),1], edgecolors='none', c=\"blue\", label=\"normal\")\nax.scatter(tsne_embedding[len(codes_normal_train)+len(codes_normal_test):,0], tsne_embedding[len(codes_normal_train)+len(codes_normal_test):,1], edgecolors='none', c=\"red\", label=\"cancer\")\nplt.legend(loc='lower right', fontsize='x-large')\nplt.title(\"t-sne embedding\")\nplt.xlim([min(tsne_embedding[:,0]-1), max(tsne_embedding[:,0]+1)])\nplt.ylim([min(tsne_embedding[:,1]-1), max(tsne_embedding[:,1]+1)])\nplt.show()\n\"\"\"\n\nclf = LinearSVC(C=0.0001)\n\nX_train = np.append(codes_normal_train, codes_cancer_train, axis=0)\nX_test = np.append(codes_normal_test, codes_cancer_test, axis=0)\n\ny_train = np.append(labels_normal_train, labels_abnormal_train, axis=0)\ny_test = np.append(labels_normal_test, labels_abnormal_test, axis=0)\n\nnames_train = np.append(names_normal_train, names_abnormal_train, axis=0)\nnames_test = np.append(names_normal_test, names_abnormal_test, axis=0)\n\"\"\"\nC_values = [1000000, 500000, 100000, 50000, 10000, 5000, 1000, 500, 100, 50, 10, 5, 1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0005, 0.0001, 0.00005, 0.00001, 0.000005, 0.000001]\nC_value_scores = []\nfor spot in range(len(C_values)):\n clf = LinearSVC(C=C_values[spot])\n kFolds = 5\n iterations = 50\n random_state = 4597834\n i = 0\n averageScore = 0\n rollingAverage = 0\n rkf = RepeatedKFold(n_splits=kFolds, n_repeats=iterations, random_state=random_state)\n\n for train_index, test_index in rkf.split(X_train):\n X_train_CV, X_test_CV = X_train[train_index], X_train[test_index]\n y_train_CV, y_test_CV = y_train[train_index], y_train[test_index]\n clf.fit(X_train_CV, y_train_CV)\n score = clf.score(X_test_CV, y_test_CV)\n averageScore = averageScore + score\n rollingAverage = rollingAverage + score\n i = i + 1\n if i % kFolds == 0:\n print(\"Average for \" + str(kFolds) + \"-split \" + str(i / kFolds) + \": \" + str (rollingAverage / kFolds))\n rollingAverage = 0\n\n averageScore = averageScore / i\n\n print(\"Average score: \" + str(averageScore))\n C_value_scores.append(averageScore)\nprint(C_value_scores)\n\"\"\"\nclf.fit(X_train, y_train)\nscore = clf.score(X_test, y_test)\nfpr, tpr, thresholds = roc_curve(y_test, clf.decision_function(X_test))\nroc_auc = auc(fpr, tpr)\nprint(\"AUC for model: \" + str(roc_auc))\nprint(\"Stdev for AUC: \" + str())\nprint(\"Final accuracy for model: \" + str(score))\n#print(\"Overall score: \" + str(clf.score(np.append(X_train, X_test, axis=0), np.append(y_train, y_test, axis=0))))\n\n\nmodel_confidence = {}\nmodel_classification = {}\nmodel_classification_contralateral = {}\nmodel_confidence_contralateral = {}\n\nconfidence_values = clf.decision_function(X_test)\nscaler = MinMaxScaler(feature_range=(-1, 1))\nconfidence_values = scaler.fit_transform(np.array(confidence_values).reshape(-1, 1)).reshape(-1)\ni = 0\nfor item in confidence_values:\n model_confidence[names_test[i]] = abs(item)\n i = i + 1\n\npredictions = clf.predict(X_test)\ni = 0\nfor item in predictions:\n model_classification[names_test[i]] = item\n i = i + 1\n\npredictions_contralateral = clf.predict(codes_contralateral)\ni = 0\nfor item in predictions_contralateral:\n model_classification_contralateral[names_contralateral_test[i]] = item\n i = i + 1\n\nconfidence_values_contralateral = clf.decision_function(codes_contralateral)\ni = 0\nfor item in confidence_values_contralateral:\n model_confidence_contralateral[names_contralateral_test[i]] = abs(item)\n i = i + 1\n\nmodel_confidence_all = {}\nmodel_classification_all = {}\n\nconfidence_values_all = clf.decision_function(np.append(X_train, X_test, axis=0))\ni = 0\nfor item in confidence_values_all:\n model_confidence_all[names_all[i]] = abs(item)\n i = i + 1\n\npredictions_all = clf.predict(np.append(X_train, X_test, axis=0))\ni = 0\nfor item in predictions_all:\n model_classification_all[names_all[i]] = item\n i = i + 1\n\nfor name in names_all:\n if name in model_confidence_all.keys():\n json_dict[name][\"model_confidence\"] = str(model_confidence_all[name])\n if name in model_classification_all.keys():\n json_dict[name][\"model_classification\"] = str(model_classification_all[name])\n if name in radio_input_classify.keys():\n json_dict[name][\"radiologist_classification\"] = str(radio_input_classify[name])\n else:\n json_dict[name][\"radiologist_classification\"] = \"N/A\"\n if name in radio_input_confidence.keys():\n json_dict[name][\"radiologist_confidence\"] = str(radio_input_confidence[name])\n else:\n json_dict[name][\"radiologist_confidence\"] = \"N/A\"\n i = i + 1\nwith open('js/VisualizationInformation.txt', 'w') as json_file:\n json.dump(json_dict, json_file)\n\n#utility_functions.printListInOrder(y_test)\n#print(\"break\")\n#utility_functions.printDictionaryInOrder(names_test, radio_input_classify)\n#print(\"break\")\n#utility_functions.printDictionaryInOrder(names_test, radio_input_confidence)\n\n\nradio_confidence = []\nfor name in names_test:\n radio_confidence.append(radio_input_classify[name])\nfpr, tpr, thresholds = roc_curve(y_test, radio_confidence)\nroc_auc = auc(fpr, tpr)\nprint(\"AUC for radiologists: \" + str(roc_auc))\n\nconfidence_values_model = []\nconfidence_values_radiologist = []\nfor i in range(len(names_test)):\n if names_test[i] in radio_input_confidence.keys():\n if model_classification[names_test[i]] == 1:\n confidence_values_model.append(-model_confidence[names_test[i]])\n else: \n confidence_values_model.append(model_confidence[names_test[i]])\n if radio_input_confidence[names_test[i]] == 1:\n confidence_values_radiologist.append(-radio_input_confidence[names_test[i]])\n else:\n confidence_values_radiologist.append(radio_input_confidence[names_test[i]])\nscaler = MinMaxScaler(feature_range=(0, 1))\nconfidence_values_model = scaler.fit_transform(np.array(confidence_values_model).reshape(-1, 1)).reshape(-1)\nr, p = scipy.stats.pearsonr(confidence_values_model, confidence_values_radiologist)\n#print(\"Pearson r: \" + str(r) + \", p-value: \" + str(p))\n\nplt.plot(fpr, tpr, 'darkorange',\n label='AUC = %0.2f'% roc_auc)\nplt.legend(loc='lower right', fontsize='x-large')\nplt.title(\"ROC Curve - Linear SVM\")\nplt.plot([0, 1], [0, 1], color='#67809f', linestyle='--')\nplt.xlim([-0.1, 1.0])\nplt.ylim([-0.1, 1.0])\nplt.ylabel('True Positive Rate', fontsize=14)\nplt.xlabel('False Positive Rate', fontsize=14)\nplt.show()\n\n\n\"\"\"\n\nThe following code is to add a voting system in hopes to increase accuracy\n\n\"\"\"\nfor i in range(len(names_test)):\n name = names_test[i]\n radio_score = radio_input_confidence[name]\n model_score = model_confidence[name]\n if radio_score > model_score:\n predictions[i] = radio_input_classify[name]\n confidence_values[i] = radio_score\n else:\n predictions[i] = model_classification[name]\n confidence_values[i] = model_score\n if predictions[i] == 0:\n confidence_values[i] = -confidence_values[i]\n\nnumCorrect = 0\nfor i in range(len(names_test)):\n if predictions[i] == y_test[i]:\n numCorrect = numCorrect + 1\nnewAccuracy = float(numCorrect) / len(names_test)\nprint(\"Voting system accuracy: \" + str(newAccuracy))\nfpr, tpr, thresholds = roc_curve(y_test, confidence_values)\nroc_auc = auc(fpr, tpr)\nprint(\"Voting system AUC: \"+str(roc_auc))\nplt.plot(fpr, tpr, 'darkorange',\n label='AUC = %0.2f'% roc_auc)\nplt.legend(loc='lower right', fontsize='x-large')\nplt.title(\"ROC Curve - Voting System\")\nplt.plot([0, 1], [0, 1], color='#67809f', linestyle='--')\nplt.xlim([-0.1, 1.0])\nplt.ylim([-0.1, 1.0])\nplt.ylabel('True Positive Rate', fontsize=14)\nplt.xlabel('False Positive Rate', fontsize=14)\nplt.show()\n\n\nutility_functions.printListInOrder(predictions)\nprint(\"break\")\nutility_functions.printDictionaryInOrder(names_test, model_confidence)\nprint(\"break\")\nutility_functions.printDictionaryInOrder(names_test, model_classification)\n","sub_path":"Solution3/Code/CherryPickedClassifier.py","file_name":"CherryPickedClassifier.py","file_ext":"py","file_size_in_byte":12150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"408542523","text":"import unittest\nfrom sqlalchemy import create_engine, Column, Integer, String, ForeignKey, Table\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_testing import TestCase\nfrom sqlalchemy.orm import sessionmaker, relationship\nfrom flask import Flask\nfrom openpatch_core.database.elastic_query import ElasticQuery\nfrom openpatch_core.models import Base\nfrom openpatch_core.database import db\nfrom sqlalchemy.dialects import mysql\n\n\nclass City(Base):\n __tablename__ = \"city\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n users = relationship(\"User\", back_populates=\"city\")\n\n def __repr__(self):\n return str(self.id)\n\n\nclass UserColor(Base):\n __tablename__ = \"user_color\"\n\n user_id = Column(Integer, ForeignKey(\"users.id\"), primary_key=True)\n color_id = Column(Integer, ForeignKey(\"colors.id\"), primary_key=True)\n\n user = relationship(\"User\", back_populates=\"user_colors\")\n color = relationship(\"Color\", back_populates=\"user_colors\")\n\n\nclass User(Base):\n __tablename__ = \"users\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n lastname = Column(String)\n uid = Column(Integer)\n city_id = Column(Integer, ForeignKey(City.id))\n city = relationship(City, back_populates=\"users\")\n\n user_colors = relationship(\"UserColor\", back_populates=\"user\")\n\n def __repr__(self):\n return str(self.id)\n\n\nassociation_art_color = Table(\n \"art_color\",\n Base.metadata,\n db.Column(\"art_id\", Integer, ForeignKey(\"arts.id\")),\n db.Column(\"color_id\", Integer, ForeignKey(\"colors.id\")),\n)\n\n\nclass Color(Base):\n __tablename__ = \"colors\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n user_colors = relationship(\"UserColor\", back_populates=\"color\")\n arts = relationship(\"Art\", secondary=association_art_color, back_populates=\"colors\")\n\n\nclass Art(Base):\n __tablename__ = \"arts\"\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n\n colors = relationship(\n \"Color\", secondary=association_art_color, back_populates=\"arts\"\n )\n\n\nclass ElasticQueryTest(TestCase):\n def create_app(self):\n app = Flask(__name__)\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///:memory:\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n db.init_app(app)\n return app\n\n def setUp(self):\n db.create_all()\n db.session.add_all(\n [\n User(name=\"Jhon\", lastname=\"Galt\", uid=\"19571957\", city_id=1),\n User(name=\"Steve\", lastname=\"Jobs\", uid=\"20092009\", city_id=2),\n User(name=\"Iron\", lastname=\"Man\", uid=\"19571957\", city_id=1),\n City(name=\"Cordoba\"),\n City(name=\"New York\"),\n Color(name=\"red\"),\n Color(name=\"blue\"),\n UserColor(user_id=1, color_id=1),\n UserColor(user_id=1, color_id=2),\n UserColor(user_id=2, color_id=2),\n Art(name=\"Starry Night\"),\n Art(name=\"Mona Lisa\"),\n ]\n )\n db.session.commit()\n\n art1 = Art.query.get(1)\n art1.colors.append(Color.query.get(1))\n art2 = Art.query.get(2)\n art2.colors.append(Color.query.get(2))\n\n db.session.commit()\n\n def test_setup_is_ok(self):\n \"\"\" Demo test \"\"\"\n assert User.query.count() == 3\n\n def test_simple_query(self):\n \"\"\" test simple query \"\"\"\n query_string = '{\"filter\" : {\"uid\" : {\"like\" : \"%1957%\"} } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 2\n assert count == 2\n query_string = (\n '{\"filter\" : {\"name\" : {\"like\" : \"%Jho%\"}, \"lastname\" : \"Galt\" } }'\n )\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 1\n assert count == 1\n\n def test_limit_operator(self):\n query_string = '{\"limit\": 2}'\n query, count, page = User.elastic_query(query_string)\n query = query = page()\n assert query.count() == 2\n assert count == User.query.count()\n\n def test_offset_operator(self):\n query_string = '{\"offset\": 2}'\n query, count, page = User.elastic_query(query_string)\n query = page()\n assert query.count() == 1\n assert count == User.query.count()\n\n query_string = '{\"limit\": 2, \"offset\": 2}'\n query, count, page = User.elastic_query(query_string)\n query = page()\n assert query.count() == 1\n assert count == User.query.count()\n\n def test_and_operator(self):\n \"\"\" test and operator \"\"\"\n query_string = '{\"filter\" : {\"and\" : {\"name\" : {\"like\" : \"%Jho%\"}, \"lastname\" : \"Galt\", \"uid\" : {\"like\" : \"%1957%\"} } } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 1\n assert count == 1\n\n def test_or_operator(self):\n \"\"\" test or operator \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jobs\", \"lastname\" : \"Man\", \"uid\" : \"19571957\" } } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 2\n assert count == 2\n\n def test_or_and_operator(self):\n \"\"\" test or and operator \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jhon\", \"lastname\" : \"Galt\" }, \"and\" : { \"uid\" : \"19571957\" } } }'\n query, count, page = User.elastic_query(query_string)\n assert query.count() == 1\n assert count == 1\n\n def test_sorting(self):\n \"\"\" test operator levels \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jhon\", \"lastname\" : \"Man\" } }, \"sort\": { \"name\" : \"asc\" } }'\n results = User.elastic_query(query_string)[0].all()\n assert results[0].name == \"Iron\"\n\n def test_in_operator(self):\n \"\"\" test operator in \"\"\"\n query_string = '{\"filter\" : {\"name\" : {\"in\" : [\"Jhon\", \"Peter\", \"Iron\"] } } }'\n assert User.elastic_query(query_string)[0].count() == 2\n\n query_string = '{\"filter\" : {\"name\" : {\"in\" :[\"Jhon\", \"Peter\", \"Iron\"]}, \"lastname\" : \"Galt\" } }'\n assert User.elastic_query(query_string)[0].count() == 1\n\n def test_allow_fields_option(self):\n \"\"\" test allow_fields option \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"name\" : \"Jhon\", \"lastname\" : \"Man\" } }, \"sort\": { \"name\" : \"asc\" } }'\n enabled_fields = [\"name\"]\n results = User.elastic_query(query_string, enabled_fields=enabled_fields)[\n 0\n ].all()\n assert results[0].name == \"Jhon\"\n\n def test_search_for_levels(self):\n \"\"\" test search for levels \"\"\"\n query_string = '{\"filter\" : {\"or\" : { \"city.name\" : \"New York\", \"lastname\" : \"Man\" } }, \"sort\": { \"name\" : \"asc\" } }'\n results = User.elastic_query(query_string)[0].all()\n assert results[0].name == \"Iron\"\n\n query_string = '{\"filter\" : { \"city.name\" : \"New York\" } }'\n results = User.elastic_query(query_string)[0].all()\n assert results[0].name == \"Steve\"\n\n query_string = '{\"filter\" : { \"city.name\" : {\"like\" : \"%New%\"} } }'\n query = User.elastic_query(query_string)\n results = query[0].all()\n assert results[0].name == \"Steve\"\n\n def test_many_to_many_relationship(self):\n query_string = '{\"filter\" : { \"user_colors.color.name\" : \"red\" } }'\n query = User.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 1\n\n query_string = '{\"filter\" : { \"user_colors.color.name\" : \"blue\" } }'\n query = User.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 2\n\n query_string = '{\"filter\": { \"colors.name\": \"blue\" }}'\n query = Art.elastic_query(query_string)\n\n results = query[0].all()\n assert len(results) == 1\n\n def test_many_to_many_relationship_deep(self):\n query_string = (\n '{\"filter\" : { \"user_colors.color.arts.name\" : \"Starry Night\" } }'\n )\n query = User.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 1\n assert results[0].name == \"Jhon\"\n\n def test_one_to_many_relationship(self):\n query_string = '{\"filter\" : { \"users.name\" : \"Jhon\" } }'\n query = City.elastic_query(query_string)\n results = query[0].all()\n\n assert len(results) == 1\n\n\ndef print_query(query):\n print(\n query[0].statement.compile(\n dialect=mysql.dialect(), compile_kwargs={\"literal_binds\": True}\n )\n )\n","sub_path":"tests/database/test_elastic_query.py","file_name":"test_elastic_query.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"230265906","text":"import ffmpeg\nimport cv2\nimport subprocess\n\nppath = \"C:\\\\Users\\\\CrymeAriver\\\\PycharmProjects\\\\tomwaitforitmy_v_scr\\\\\"\nfile_name = 'qwe.flv'\n\nprobe = ffmpeg.probe(ppath+file_name)\nvideo_info = next(s for s in probe['streams'] if s['codec_type'] == 'video')\nwidth = int(video_info['width']/20)\nheight = int(video_info['height']/8)\n\ncap = cv2.VideoCapture(ppath+file_name)\n\ndef cutf(path, name, startframe, fps, endframe, outname):\n command = [\"ffmpeg\", '-i', path+name, '-ss', str(startframe/fps), '-t', str((endframe-startframe)/fps),\n '-c:v', 'libx264',\n '-c:a', 'aac',\n path+outname]\n return command\n\nind = 0\nchanged = False\ncounter = 0\narr = []\ns_counter = 0\n\nwhile(cap.isOpened()):\n start_pos = 0\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n # hist = cv2.calcHist([gray], [0], mask, [256], [0, 256])\n # print(len(hist))\n video = gray[0:height]\n tt = []\n for i in video:\n tt.append(i[0:width])\n val = 0\n for i in tt:\n for o in i:\n val += o[2]\n x = val / (width * height)\n\n if x < 15 and not changed:\n print(x, ind)\n changed = True\n arr.append(ind)\n print(arr)\n if len(arr) == 2:\n a = cutf(ppath, file_name, arr[0], 30, arr[1], 'out' + str(ind) + '.flv')\n arr = []\n process = subprocess.Popen(a, stdout=subprocess.PIPE)\n output, error = process.communicate()\n arr = []\n\n if x > 72 and changed:\n print(x, ind)\n arr.append(ind)\n print(arr)\n changed = False\n\n ind += 1\n # cv2.imshow('frame', frame)\n # print(gray)\n # break\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"lfs_cutter.py","file_name":"lfs_cutter.py","file_ext":"py","file_size_in_byte":1785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"94508094","text":"from tkinter import *\nmaster = Tk()\nmaster.geometry(\"700x600\")\n#==================Label\nl_show= Label(master, text=\"三酷猫:\")\nphoto=PhotoImage(file=\"kwsupicon1.gif\")\nl_show1= Label(master,image=photo)\nl_show.pack(side=\"left\")\nl_show1.pack(side=\"left\")\n#===================Entry\ne_show=Entry(master,width=10)\ne_show.pack(side=\"left\")\n#===================Text\nt_show=Text(master,width=10,height=4)\nt_show.pack(side=\"bottom\")\n#===================Checkbutton\nvar = StringVar()\nc_show=Checkbutton(master,text=\"蓝猫\", variable=var,\n onvalue=\"RGB\", offvalue=\"L\",fg=\"blue\")\nc_show.pack(side=\"top\")\n#=====================Radiobutton\nv = IntVar()\nr_show=Radiobutton(master,text=\"One\",variable=v,value=1)\nr_show.pack(anchor=W)\n#=====================Frame\nf_show=Frame(master,height=200,width=200,bd=1,bg='white',relief=SUNKEN)\nf_show.pack(anchor=\"center\")\n#======================LabelFrame\nlf_show=LabelFrame(master, text=\"Group\",padx=5, pady=5)\nlf_show.pack(padx=10, pady=10,expand=\"yes\")\ne1=Entry(lf_show,width=10)\ne1.pack()\ne2=Entry(lf_show,width=10)\ne2.pack()\n#======================Listbox\nlb_show=Listbox(master,bg=\"yellow\",height=5,width=20)\nlb_show.pack(side=\"top\")\nfor item in [\"one\",\"two\",\"three\",\"four\"]:\n lb_show.insert(END, item)\n#=======================Scrollbar\ns_show=Scrollbar(master)\ns_show.pack(side=RIGHT, fill=Y)\nlb_show1=Listbox(master,fg=\"red\",height=5,width=20)\nlb_show1['yscrollcommand']=s_show.set\nlb_show1.pack(side=\"right\")\nfor item in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\"]:\n lb_show1.insert(END, item)\ns_show.config(command=lb_show.yview)\n#========================Scale\nsc_show= Scale(master,from_=0,to=100)\nsc_show.pack(side=\"right\")\n#========================Message及Button\ndef showMessage(event):\n m1=Message(master,text=\"非常好!\",width=60)\n m1.pack()\nb_show=Button(master,text=\"确认\",fg=\"black\")\nb_show.bind(\"\",showMessage) \nb_show.pack(side=\"left\")\n#========================Spinbox\nsb_show=Spinbox(master,from_=0,to=10)\nsb_show.pack(side=\"left\")\n#========================Toplevel\ntL_show=Toplevel(master)\ntL_show.wm_attributes(\"-topmost\",1)\ntL_show.title(\"OK!\")\nt1_show=Text(tL_show,width=10,height=4)\nt2_show=Text(tL_show,width=10,height=4)\nt1_show.pack()\nt2_show.pack()\n#========================PanedWindow\npw=PanedWindow(orient=VERTICAL,bg=\"green\")\npw.pack(fill=BOTH,expand=1)\nfor w in [Label,Button,Checkbutton,Radiobutton]:\n pw.add(w(pw,text = 'hello'))\nmainloop()\n\n","sub_path":"第11章/base_easy.py","file_name":"base_easy.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"312497899","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pypeerassets/pavoteproto_pb2.py\n# Compiled at: 2018-10-13 10:32:19\n# Size of source mod 2**32: 5042 bytes\nimport sys\n_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))\nfrom google.protobuf import descriptor as _descriptor\nfrom google.protobuf import message as _message\nfrom google.protobuf import reflection as _reflection\nfrom google.protobuf import symbol_database as _symbol_database\nfrom google.protobuf import descriptor_pb2\n_sym_db = _symbol_database.Default()\nDESCRIPTOR = _descriptor.FileDescriptor(name='0005-on-chain-voting-transaction-specification.proto',\n package='',\n syntax='proto3',\n serialized_pb=(_b('\\n40005-on-chain-voting-transaction-specification.proto\"Ý\\x01\\n\\x04Vote\\x12\\x0f\\n\\x07version\\x18\\x01 \\x01(\\r\\x12\\x13\\n\\x0bdescription\\x18\\x02 \\x01(\\t\\x12\\x13\\n\\x0bstart_block\\x18\\x03 \\x01(\\r\\x12\\x11\\n\\tend_block\\x18\\x04 \\x01(\\r\\x12\\x12\\n\\ncount_mode\\x18\\x05 \\x01(\\r\\x12\\x0f\\n\\x07choices\\x18\\x06 \\x03(\\t\\x12\\x15\\n\\rvote_metainfo\\x18\\x07 \\x01(\\x0c\"K\\n\\x04MODE\\x12\\x08\\n\\x04NONE\\x10\\x00\\x12\\n\\n\\x06SIMPLE\\x10\\x01\\x12\\x17\\n\\x13WEIGHT_CARD_BALANCE\\x10\\x03\\x12\\x14\\n\\x10WEIGHT_CARD_DAYS\\x10\\x07b\\x06proto3')))\n_VOTE_MODE = _descriptor.EnumDescriptor(name='MODE',\n full_name='Vote.MODE',\n filename=None,\n file=DESCRIPTOR,\n values=[\n _descriptor.EnumValueDescriptor(name='NONE',\n index=0,\n number=0,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='SIMPLE',\n index=1,\n number=1,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='WEIGHT_CARD_BALANCE',\n index=2,\n number=3,\n options=None,\n type=None),\n _descriptor.EnumValueDescriptor(name='WEIGHT_CARD_DAYS',\n index=3,\n number=7,\n options=None,\n type=None)],\n containing_type=None,\n options=None,\n serialized_start=203,\n serialized_end=278)\n_sym_db.RegisterEnumDescriptor(_VOTE_MODE)\n_VOTE = _descriptor.Descriptor(name='Vote',\n full_name='Vote',\n filename=None,\n file=DESCRIPTOR,\n containing_type=None,\n fields=[\n _descriptor.FieldDescriptor(name='version',\n full_name='Vote.version',\n index=0,\n number=1,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='description',\n full_name='Vote.description',\n index=1,\n number=2,\n type=9,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=(_b('').decode('utf-8')),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='start_block',\n full_name='Vote.start_block',\n index=2,\n number=3,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='end_block',\n full_name='Vote.end_block',\n index=3,\n number=4,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='count_mode',\n full_name='Vote.count_mode',\n index=4,\n number=5,\n type=13,\n cpp_type=3,\n label=1,\n has_default_value=False,\n default_value=0,\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='choices',\n full_name='Vote.choices',\n index=5,\n number=6,\n type=9,\n cpp_type=9,\n label=3,\n has_default_value=False,\n default_value=[],\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None),\n _descriptor.FieldDescriptor(name='vote_metainfo',\n full_name='Vote.vote_metainfo',\n index=6,\n number=7,\n type=12,\n cpp_type=9,\n label=1,\n has_default_value=False,\n default_value=(_b('')),\n message_type=None,\n enum_type=None,\n containing_type=None,\n is_extension=False,\n extension_scope=None,\n options=None)],\n extensions=[],\n nested_types=[],\n enum_types=[\n _VOTE_MODE],\n options=None,\n is_extendable=False,\n syntax='proto3',\n extension_ranges=[],\n oneofs=[],\n serialized_start=57,\n serialized_end=278)\n_VOTE_MODE.containing_type = _VOTE\nDESCRIPTOR.message_types_by_name['Vote'] = _VOTE\n_sym_db.RegisterFileDescriptor(DESCRIPTOR)\nVote = _reflection.GeneratedProtocolMessageType('Vote', (_message.Message,), dict(DESCRIPTOR=_VOTE,\n __module__='0005_on_chain_voting_transaction_specification_pb2'))\n_sym_db.RegisterMessage(Vote)","sub_path":"pycfiles/pypeflow-0.0.1-py3-none-any/pavoteproto_pb2.cpython-36.py","file_name":"pavoteproto_pb2.cpython-36.py","file_ext":"py","file_size_in_byte":5023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"393881411","text":"# -*- encoding: utf-8 -*-\nfrom PyQt4 import QtGui\n\n\nVERSION_NUMBER = 5\n\nMRAY_URI = \"http://mRay.org\"\nMRAY_VERSION_FILE = MRAY_URI + \"/currentVersion.txt\"\nMRAY_ONLINE_HELP = MRAY_URI + \"/documentation/index.html\"\nMRAY_WEBSITE = MRAY_URI + \"/index.html\"\nGITHUB_SITE = \"https://github.com/hmcontroller/microRay\"\n\n\nAVAILABLE_FRAMEWORKS = [\n # {\"macroName\": \"MBED_2_UDP\", \"displayName\": u\"Mbed 2 UDP\", \"interface\": \"UDP\", \"template\": \"mbed_2_udp.c\"},\n # {\"macroName\": \"MBED_2_SERIAL\", \"displayName\": u\"Mbed 2 Serial\", \"interface\": \"SERIAL\", \"template\": \"mbed_2_serial.c\"},\n {\"macroName\": \"MBED_OS_UDP\", \"displayName\": u\"Mbed OS UDP\", \"interface\": \"UDP\", \"template\": \"mbed_os_udp.c\"},\n {\"macroName\": \"MBED_OS_SERIAL\", \"displayName\": u\"Mbed OS Serial\", \"interface\": \"SERIAL\", \"template\": \"mbed_os_serial.c\"},\n {\"macroName\": \"ARDUINO_UDP\", \"displayName\": u\"Arduino UDP\", \"interface\": \"UDP\", \"template\": \"arduino_udp.c\"},\n {\"macroName\": \"ARDUINO_SERIAL\", \"displayName\": u\"Arduino Serial\", \"interface\": \"SERIAL\", \"template\": \"arduino_serial.c\"},\n {\"macroName\": \"CUBE_IDE_UDP\", \"displayName\": u\"Cube IDE UDP\", \"interface\": \"UDP\", \"template\": \"cube_ide_udp.c\"},\n {\"macroName\": \"CUBE_IDE_SERIAL\", \"displayName\": u\"Cube IDE Serial\", \"interface\": \"SERIAL\", \"template\": \"cube_ide_serial.c\"}\n]\n\nRELATIVE_PATH_TO_APPLICATION_SETTINGS = \"applicationSettings.json\"\n\n\nCHECK_BOX_FONT = QtGui.QFont()\nCHECK_BOX_FONT.setPointSize(8)\n\nUSER_INPUT_WARNING_COLOR = QtGui.QColor(255, 165, 0)\nCONFIRMATION_TIMEOUT_WARNING_COLOR = QtGui.QColor(210, 0, 0)\n# NEGATIVE_CONFIRMATION_WARNING_COLOR = QtGui.QColor(50, 200, 50)\nNEGATIVE_CONFIRMATION_WARNING_COLOR = QtGui.QColor(210, 30, 0)\n\nHOVER_COLOR = QtGui.QColor(200, 200, 200)\nMOUSE_DOWN_COLOR = QtGui.QColor(150, 150, 150)\n\nPENDING_VALUE_COLOR = QtGui.QColor(210, 0, 0)\n\nCABLE_PEN = QtGui.QPen()\nCABLE_PEN.setColor(QtGui.QColor(0, 0, 0))\nCABLE_PEN.setCosmetic(True)\nCABLE_PEN.setWidth(2)\n\n","sub_path":"gui/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"373671889","text":"from __future__ import division\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nimport torch.nn.init as weight_init\nimport torch.nn.functional as F\nimport numpy as np\nfrom cuda_IndRNN_onlyrecurrent import IndRNN_onlyrecurrent as IndRNN\n#if no cuda, then use the following line\n#from IndRNN_onlyrecurrent import IndRNN_onlyrecurrent as IndRNN \n\n\nfrom __main__ import parser,args,U_bound\nMAG=args.MAG\n#U_bound=np.power(10,(np.log10(MAG)/args.seq_len))\nU_lowbound=np.power(10,(np.log10(1.0/MAG)/args.seq_len)) \nfrom utils import Batch_norm_overtime,Linear_overtime_module,Dropout_overtime\nBN=Batch_norm_overtime\nLinear_overtime=Linear_overtime_module\ndropout_overtime=Dropout_overtime.apply\n\n\nclass IndRNNwithBN(nn.Sequential):\n def __init__(self, hidden_size, seq_len,bn_location='bn_before'):\n super(IndRNNwithBN, self).__init__()\n # print(bn_location)\n # if bn_location==\"bn_before\":\n # self.add_module('norm1', BN(hidden_size, args.seq_len))\n # self.add_module('indrnn1', IndRNN(hidden_size))\n # if bn_location==\"bn_after\":\n # self.add_module('norm1', BN(hidden_size, args.seq_len))\n # if (bn_location!='bn_before') and (bn_location!='bn_after'):\n # print('Please select a batch normalization mode.')\n # assert 2==3\n self.add_module('norm1', BN(hidden_size, args.seq_len))\n\nclass stackedIndRNN_encoder(nn.Module):\n def __init__(self, input_size, outputclass):\n super(stackedIndRNN_encoder, self).__init__() \n hidden_size=args.hidden_size\n\n self.DIs=nn.ModuleList()\n denseinput=Linear_overtime(input_size, hidden_size)\n self.DIs.append(denseinput)\n for x in range(args.num_layers - 1):\n denseinput = Linear_overtime(hidden_size, hidden_size)\n self.DIs.append(denseinput)\n\n self.RNNs = nn.ModuleList()\n for x in range(args.num_layers):\n rnn = IndRNNwithBN(hidden_size=hidden_size, seq_len=args.seq_len,bn_location=args.bn_location) #IndRNN\n self.RNNs.append(rnn) \n \n self.classifier = nn.Linear(hidden_size, outputclass, bias=True)\n self.init_weights()\n\n def init_weights(self):\n for name, param in self.named_parameters():\n if 'weight_hh' in name:\n param.data.uniform_(0,U_bound) \n if args.u_lastlayer_ini and 'RNNs.'+str(args.num_layers-1)+'.weight_hh' in name:\n param.data.uniform_(U_lowbound,U_bound) \n if ('fc' in name) and 'weight' in name:#'denselayer' in name and \n nn.init.kaiming_uniform_(param, a=8, mode='fan_in')#\n if 'classifier' in name and 'weight' in name:\n nn.init.kaiming_normal_(param.data)\n if ('norm' in name or 'Norm' in name) and 'weight' in name:\n param.data.fill_(1)\n if 'bias' in name:\n param.data.fill_(0.0)\n\n\n def forward(self, input):\n rnnoutputs={} \n rnnoutputs['outlayer-1']=input\n for x in range(len(self.RNNs)):\n rnnoutputs['dilayer%d'%x]=self.DIs[x](rnnoutputs['outlayer%d'%(x-1)])\n rnnoutputs['outlayer%d'%x]= self.RNNs[x](rnnoutputs['dilayer%d'%x]) \n if args.dropout>0:\n rnnoutputs['outlayer%d'%x]= dropout_overtime(rnnoutputs['outlayer%d'%x],args.dropout,self.training)\n temp=rnnoutputs['outlayer%d'%(len(self.RNNs)-1)][-1]\n output = self.classifier(temp)\n # output=F.softmax(output,dim=1)\n return output \n \n \n","sub_path":"Indrnn_plainnet.py","file_name":"Indrnn_plainnet.py","file_ext":"py","file_size_in_byte":3689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"402613799","text":"# rospy for the subscriber\nimport rospy\nimport threading\nfrom thread import start_new_thread\nimport json\nimport urllib2\n# ROS Image message\nfrom sensor_msgs.msg import Image\n# ROS Image message -> OpenCV2 image converter\nfrom cv_bridge import CvBridge, CvBridgeError\n# OpenCV2 for saving an image\nimport cv2\nimport base64\nimport requests\n# Instantiate CvBridge\n# from config import cpp_server\nbridge = CvBridge()\n\nlock = threading.Lock()\ncv = threading.Condition(lock)\nbuffer = {}\n\nfrom ws4py.client.threadedclient import WebSocketClient\nclass CameraSocket(WebSocketClient):\n def opened(self):\n print(\"Connection opened\")\n\n# soc = CameraSocket('ws://localhost:9980')\n\n\ndef image_callback(msg):\n #print(\"Received an image!\")\n \n image = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8')\n image = cv2.resize(image, (50, 50)) \n _, image = cv2.imencode('.png', image)\n lock.acquire()\n buffer['image'] = base64.b64encode(image)\n cv.notifyAll()\n lock.release()\n\n\ndef depth_callback(msg):\n #print(\"Received an image!\")\n # Convert your ROS Image message to OpenCV2\n depth = bridge.imgmsg_to_cv2(msg, desired_encoding='passthrough')\n depth = cv2.resize(depth, (50, 50)) \n depth = cv2.imencode('.png', depth)[1]\n\n lock.acquire()\n buffer['depth'] = base64.b64encode(depth)\n cv.notifyAll()\n lock.release()\n\ndef socket_writer():\n print('bonjour!')\n\n while True:\n cv.acquire()\n while not len(buffer.keys()) == 2:\n cv.wait()\n\n message = json.dumps(buffer)\n kek = json.loads(message)\n\n print('depth len', len(buffer['depth']))\n print('image len', len(buffer['image']))\n print('len of depth', len(kek['depth']))\n print('len of image', len(kek['image']))\n\n r = requests.post('http://psi:9980', data=message)\n \n buffer.clear()\n cv.release()\n break\n\ndef main():\n rospy.init_node('image_listener')\n # Define your image topic\n image_topic = \"/camera/color/image_raw\"\n depth_topic = \"/camera/depth/image_raw\"\n\n lock = threading.Lock()\n thr1 = start_new_thread(socket_writer, ())\n\n # Set up your subscriber and define its callback\n rospy.Subscriber(image_topic, Image, image_callback, queue_size=1)\n rospy.Subscriber(depth_topic, Image, depth_callback, queue_size=1)\n\n # Spin until ctrl + c\n rospy.spin()\n thr1.join()\n\nif __name__ == '__main__':\n main()","sub_path":"frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":2432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"452156509","text":"import hashlib, time, json, requests, node, threading, os\n\nHASHALGO = hashlib.blake2s\nENCTYPE = 'utf-8'\n\nFORCE_MINING_THRESHOLD = 10\n\ndef validateChain(chain):\n\tif isinstance(chain, Blockchain):\n\t\tchain = chain.chain\n\telif isinstance(chain, dict):\n\t\tchain = chain['chain']\n\telif isinstance(chain, list):\n\t\tpass\n\telse:\n\t\traise TypeError('Expected type of chain: either instance of Blockchain or a Blockchain object\\'s dict')\n\tfor i in range(1, len(chain)):\n\t\tif chain[i]['prev hash'] != hash(chain[i - 1]):\n\t\t\treturn False\n\t\tif not validateProof(chain[i]['proof'], chain[i - 1]['proof'], hash(chain[i - 1])):\n\t\t\treturn False\n\treturn True\n\t\ndef hash(block):\n\tblockString = json.dumps(block, sort_keys = True).encode(ENCTYPE)\n\treturn HASHALGO(blockString).hexdigest()\n\t\ndef validateProof(proof, lastProof, prevHash):\n\tguessString = '{}{}{}'.format(prevHash, lastProof, proof).encode(ENCTYPE)\n\tguessHash = HASHALGO(guessString).hexdigest()\n\treturn guessHash[:4] == '0000'\n\t\ndef proofOfWork(lastProof, prevHash):\n\tproof = 0\n\twhile not validateProof(proof, lastProof, prevHash):\n\t\tproof += 1\n\treturn proof\n\t\nclass Blockchain:\n\tdef __init__(self, dictionary = None):\n\t\tif dictionary:\n\t\t\tif isinstance(dictionary, dict):\n\t\t\t\tself.__dict__ = dictionary\n\t\t\telse:\n\t\t\t\traise TypeError('dictionary is required as a arg, given: {}'.format(type(dictionary)))\n\t\telse:\n\t\t\tself.chain = [\n\t\t\t\t{\n\t\t\t\t\t'index': 1,\n\t\t\t\t\t'timestamp': time.time(),\n\t\t\t\t\t'proof': 1,\n\t\t\t\t\t'transactions': {'msg': 'Genesis'},\n\t\t\t\t\t'total transactions': 1,\n\t\t\t\t\t'prev hash': '1',\n\t\t\t\t}\n\t\t\t]\n\t\t\tself.pendingTransactions = {}\n\t\t\tself.mineableTransactions = {}\n\t\t\tself.totalTransactionsInChain = 1\n\t\tself.mineableTransactionsMutex = threading.BoundedSemaphore()\n\t\tself.pendingTransactionsMutex = threading.BoundedSemaphore()\n\t\n\tdef __len__(self):\n\t\treturn len(self.chain)\n\t\t\n\tdef isMineable(self):\n\t\tif len(self.pendingTransactions) > 0 or len(self.mineableTransactions) > 0:\n\t\t\tself.pendingTransactionsMutex.acquire()\n\t\t\tfor k, v in self.pendingTransactions.items():\n\t\t\t\tif k not in self.mineableTransactions:\n\t\t\t\t\tself.mineableTransactionsMutex.acquire()\n\t\t\t\t\tself.mineableTransactions[k] = v\n\t\t\t\t\tself.mineableTransactionsMutex.release()\n\t\t\tself.pendingTransactions = {}\n\t\t\tself.pendingTransactionsMutex.release()\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef gottaMine(self):\n\t\tself.mineableTransactionsMutex.acquire()\n\t\tl = len(self.mineableTransactions) >= FORCE_MINING_THRESHOLD\n\t\tself.mineableTransactionsMutex.release()\n\t\treturn l\n\t\t\n\tdef __repr__(self):\n\t\tA = '[\\n'\n\t\tfor i in self.chain:\n\t\t\tA += '\\t{\\n'\n\t\t\tfor k, v in sorted(i.items()):\n\t\t\t\tA += '\\t\\t{}: {}\\n'.format(k, v)\n\t\t\tA += '\\t}\\n'\n\t\tA += ']\\n'\n\t\treturn A\n\t\n\tdef printChain(self):\n\t\tprint('[')\n\t\tfor i in self.chain:\n\t\t\tprint('\\t{')\n\t\t\tfor k, v in sorted(i.items()):\n\t\t\t\tprint('\\t\\t{}: {}'.format(k, v))\n\t\t\tprint('\\t}')\n\t\tprint(']')\n\t\t\n\t@property\n\tdef lastBlock(self):\n\t\tif len(self) != 0:\n\t\t\treturn self.chain[-1]\n\t\t\t\n\tdef validateTransaction(self, key, transaction):\n\t\tself.pendingTransactionsMutex.acquire()\n\t\tif key not in self.pendingTransactions:\n\t\t\tself.pendingTransactionsMutex.release()\n\t\t\tself.mineableTransactionsMutex.acquire()\n\t\t\tif key not in self.mineableTransactions:\n\t\t\t\tself.mineableTransactionsMutex.release()\n\t\t\t\tfor block in self.chain:\n\t\t\t\t\tif key in block['transactions']:\n\t\t\t\t\t\treturn False\n\t\t\t\treturn True\n\t\t\tself.mineableTransactionsMutex.release()\n\t\t\treturn False\n\t\tself.pendingTransactionsMutex.release()\n\t\treturn False\n\t\t\n\tdef addTransaction(self, key, transaction):\n\t\tif self.validateTransaction(key, transaction):\n\t\t\tself.pendingTransactionsMutex.acquire()\n\t\t\tself.pendingTransactions[key] = transaction\n\t\t\tself.pendingTransactionsMutex.release()\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef validateBlock(self, block):\n\t\tfor k, v in block['transactions'].items():\n\t\t\tfor blk in self.chain:\n\t\t\t\tif k in blk['transactions']:\n\t\t\t\t\treturn False\n\t\treturn validateProof(block['proof'], self.lastBlock['proof'], hash(self.lastBlock))\n\t\t\t\n\tdef addBlock(self, block):\n\t\tif self.validateBlock(block):\n\t\t\tself.chain.append(block)\n\t\t\tself.totalTransactionsInChain += block['total transactions']\n\t\t\treturn True\n\t\treturn False\n\t\t\n\tdef writeChain(self):\n\t\tf = open('chain.json', 'w')\n\t\tf.write(json.dumps(self.chain, sort_keys = True))\n\t\tf.close()\n\t\t\n\tdef delChain(self):\n\t\tos.remove('chain.json')\n","sub_path":"agent/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"398293661","text":"import matplotlib.pyplot as plt\n\nimport streamlit as st\n\ndef show_image_ui(image, **imshow_arg):\n \"\"\"Show image in UI.\n \n It is instead of cv2.imshow to wait for user to check a image.\n Usually it blocked, but this doesn't block and just draw in streamtlit \n when call in streamlit thread.\n \"\"\"\n is_streamlit_thread = st._is_running_with_streamlit\n fig = plt.figure(frameon=False)\n ax = plt.Axes(fig, [0., 0., 1., 1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(image, aspect='auto', **imshow_arg)\n\n if is_streamlit_thread:\n st.pyplot(fig)\n else:\n plt.show()","sub_path":"remimi/utils/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"609320936","text":"import datetime\nfrom discord_components import Button, ButtonStyle, Select, SelectOption\nimport validators\nfrom src import db\nfrom src import utils\nfrom src import office_hours\nfrom src import cal\n\n\nasync def get_times(ctx, bot, event_type):\n \"\"\"\n Function:\n get_times\n Description:\n Helper function for acquiring the times an instructor wants an event to be held during\n Inputs:\n - ctx: context of the message\n - bot: discord bot object\n - event_type: type of the event\n Output:\n The begin & end times of the event\n \"\"\"\n\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid time is entered.\n while True:\n await ctx.send(\n 'Enter in format `-`, and times should be in 24-hour format.\\n'\n f'For example, setting {event_type} from 9:30am to 1pm can be done as 9:30-13\\n'\n + \"Type 'NA' if none. Type 'quit' to abort.\"\n )\n\n msg = await bot.wait_for('message', check=check)\n user_input = msg.content\n\n # Checking whether user entered 'quit' or 'NA'.\n if await check_quit(ctx, user_input):\n return\n elif user_input == 'NA':\n return False\n\n times = msg.content.strip().split('-')\n if len(times) != 2:\n await ctx.send(\"Incorrect input. Please enter the time in the expected format.\\n\")\n continue\n\n new_times = []\n new_time = None\n for t in times:\n parts = t.split(':')\n if len(parts) == 1:\n new_time = (int(parts[0]), 0)\n elif len(parts) == 2:\n new_time = (int(parts[0]), int(parts[1]))\n new_times.append(new_time)\n\n if len(new_times) != 2:\n await ctx.send(\"Incorrect input. Please enter the time in the expected format.\\n\")\n continue\n return new_times\n\n\nasync def get_due_time(ctx, bot):\n \"\"\"\n Function:\n get_due_time\n Description:\n Helper function for acquiring the due time of an event\n Inputs:\n - ctx: context of the message\n - bot: discord bot object\n Output:\n The begin & end times of the event\n \"\"\"\n\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid time is entered.\n while True:\n await ctx.send(\"Enter in 24-hour format. e.g. an assignment due at 11:59pm \"\n \"can be inputted as 23:59. Type 'NA' if none. Type 'quit to abort.\")\n msg = await bot.wait_for(\"message\", check=check)\n time = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, time):\n return\n elif time == 'NA':\n return False\n\n # Checking whether the format is valid. If invalid, continue the loop.\n try:\n time = datetime.datetime.strptime(time, '%H:%M')\n except ValueError:\n try:\n time = datetime.datetime.strptime(time, '%H')\n except ValueError:\n await ctx.send(\"Incorrect input. Please enter the time in the expected format.\\n\")\n continue\n return time\n\n\nasync def check_quit(ctx, value):\n \"\"\"\n Function:\n check_quit\n Description:\n Helper function for checking whether user entered 'quit'.\n Input:\n - ctx: context of the message\n - value: parameter that holds user input\n Output:\n True if user input is 'quit', False otherwise.\n \"\"\"\n if value == 'quit':\n await ctx.send(\"Aborting event creation. Type '!create' to restart.\")\n return True\n return False\n\n\nasync def get_date(ctx, bot):\n \"\"\"\n Function:\n get_date\n Description:\n Helper function for acquiring the date or due date of an event\n Input:\n - ctx: context of the message\n - bot: discord bot object\n Output:\n The date or the due date of the event.\n \"\"\"\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid date is entered.\n while True:\n await ctx.send(\"Enter in format `MM-DD-YYYY`. Type NA if none. Type 'quit' to abort\")\n msg = await bot.wait_for(\"message\", check=check)\n date = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, date):\n return\n elif date == 'NA':\n return False\n\n # Checking whether the format is valid. If invalid, continue the loop.\n try:\n datetime.datetime.strptime(date, '%m-%d-%Y')\n except ValueError:\n await ctx.send(\"Invalid date. Please enter the date in the expected format.\\n\")\n continue\n return date\n\n\nasync def get_url(ctx, bot):\n \"\"\"\n Function:\n get_url\n Description:\n Helper function for acquiring the associated url of an event\n Input:\n - ctx: context of the message\n - bot: discord bot object\n Output:\n The url associated with the event, or False if user enters 'NA'.\n \"\"\"\n\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n # Looping until a valid URL is entered (or 'quit'/'NA' is entered).\n while True:\n await ctx.send(\"Enter the URL. Type NA if none. Type 'quit' to abort.\")\n msg = await bot.wait_for(\"message\", check=check)\n link = msg.content.strip()\n\n if await check_quit(ctx, link):\n return\n elif link == 'NA':\n return False\n elif link and not validators.url(link):\n await ctx.send(\"Invalid URL. Please enter a valid URL.\\n\")\n else:\n return link\n\n\nasync def create_event(ctx, bot, testing_mode):\n \"\"\"\n Function:\n create_event\n Description:\n Event creation subroutine\n Input:\n - ctx: context of the message\n - bot: discord bot object\n - testing_mode: flag indicating whether this event is being created during a system test\n Output:\n A new event is created in the database and calendar is updated with the new event.\n \"\"\"\n # creating buttons for event types\n if ctx.channel.name == 'instructor-commands':\n await ctx.send(\n 'Which type of event would you like to create?',\n components=[\n Button(style=ButtonStyle.blue, label='Assignment', custom_id='assignment'),\n Button(style=ButtonStyle.green, label='Exam', custom_id='exam'),\n Button(style=ButtonStyle.red, label='Office Hour', custom_id='office-hour'),\n Button(style=ButtonStyle.gray, label='Custom Event', custom_id='custom-event')\n ],\n )\n # Getting the ID of the clicked button\n button_clicked = ((await utils.wait_for_msg(bot, ctx.channel)).content\n if testing_mode else (await bot.wait_for('button_click')).custom_id)\n\n # If 'Assignment' is clicked, this will run\n if button_clicked == 'assignment':\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n await ctx.send(\"What would you like the assignment to be called? \"\n \"(Type 'quit' to abort)\")\n msg = await bot.wait_for(\"message\", check=check)\n title = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, title):\n return\n\n # Getting associated url of the event.\n await ctx.send(\"Is there a link associated with this assignment?\\n \")\n link = await get_url(ctx, bot)\n if link is None:\n return\n\n await ctx.send(\"Extra description for assignment? Type NA if none. \"\n \"Type 'quit' to abort\")\n msg = await bot.wait_for(\"message\", check=check)\n description = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, description):\n return\n\n # Getting the due date.\n await ctx.send(\"What is the due date of this assignment?\\n \")\n date = await get_date(ctx, bot)\n if date is None:\n return\n\n # Getting the due time.\n await ctx.send(\"What time is this assignment due?\\n \")\n time = await get_due_time(ctx, bot)\n if time is None:\n return\n # If due time is entered as 'NA', this part will run\n elif not time:\n db.mutation_query(\n 'INSERT INTO assignments VALUES (?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, 0, 0]\n )\n await ctx.send('Assignment successfully created!')\n await cal.display_events(None)\n return\n\n # If there's a valid due time, this will execute\n db.mutation_query(\n 'INSERT INTO assignments VALUES (?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, time.hour, time.minute]\n )\n\n await ctx.send('Assignment successfully created!')\n await cal.display_events(None)\n return\n\n # If 'exam' is clicked, this will run\n elif button_clicked == 'exam':\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n await ctx.send(\"What is the title of this exam? (Type 'quit' to abort)\")\n msg = await bot.wait_for(\"message\", check=check)\n title = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, title):\n return\n\n await ctx.send(\"What content is this exam covering? (Type 'quit' to abort)\")\n msg = await bot.wait_for('message', check=check)\n description = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, description):\n return\n\n # Getting the date.\n await ctx.send(\"What is the date of this exam?\\n \")\n date = await get_date(ctx, bot)\n if date is None:\n return\n\n # Getting the exam start/end times.\n await ctx.send(\"Type the start & end times of the exam\\n\")\n times = await get_times(ctx, bot, 'exam')\n if times is None:\n return\n # This part will run if user entered 'NA'.\n elif not times:\n db.mutation_query(\n 'INSERT INTO exams VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, description, date,\n 0, 0, 0, 0]\n )\n await ctx.send('Exam successfully created!')\n await cal.display_events(ctx)\n return\n\n ((begin_hour, begin_minute), (end_hour, end_minute)) = times\n db.mutation_query(\n 'INSERT INTO exams VALUES (?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, description, date,\n begin_hour, begin_minute, end_hour, end_minute]\n )\n\n await ctx.send('Exam successfully created!')\n await cal.display_events(ctx)\n return\n\n # If 'Office Hour' is clicked, this will run\n elif button_clicked == 'office-hour':\n # Adding instructors in the server to a list\n all_instructors = []\n for mem in ctx.guild.members:\n is_instructor = next((role.name == 'Instructor'\n for role in mem.roles), None) is not None\n if is_instructor:\n all_instructors.append(mem)\n\n if len(all_instructors) < 1:\n await ctx.send('There are no instructors in the server. Aborting event creation.')\n return\n\n options = [SelectOption(label=instr.name, value=instr.name)\n for instr in all_instructors]\n\n await ctx.send(\n 'Which instructor will this office hour be for?',\n components=[\n Select(\n placeholder='Select an instructor',\n options=options\n )\n ]\n )\n\n instructor = ((await utils.wait_for_msg(bot, ctx.channel)).content\n if testing_mode else (await bot.wait_for('select_option')).values[0])\n\n await ctx.send(\n 'Which day would you like the office hour to be on?',\n components=[\n Select(\n placeholder='Select a day',\n options=[\n SelectOption(label='Monday', value='Mon'),\n SelectOption(label='Tuesday', value='Tue'),\n SelectOption(label='Wednesday', value='Wed'),\n SelectOption(label='Thursday', value='Thu'),\n SelectOption(label='Friday', value='Fri'),\n SelectOption(label='Saturday', value='Sat'),\n SelectOption(label='Sunday', value='Sun')\n ]\n )\n ]\n )\n\n day = (\n (await utils.wait_for_msg(bot, ctx.channel)).content\n if testing_mode else\n (await bot.wait_for('select_option', check=lambda x: x.values[0] in ('Mon', 'Tue', 'Wed', 'Thu', 'Fri',\n 'Sat', 'Sun'))).values[0]\n )\n\n day_num = ('Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun').index(day)\n\n # Looping until a valid time range is entered (or 'quit' is entered).\n await ctx.send(\"Type the start & end times of your office hours.\\n\")\n while True:\n times = await get_times(ctx, bot, 'office hour')\n if times is None:\n return\n if not times:\n await ctx.send(\"You must enter a time range for office hours\\n\")\n continue\n break\n ((begin_hour, begin_minute), (end_hour, end_minute)) = times\n\n office_hours.add_office_hour(\n ctx.guild,\n office_hours.TaOfficeHour(\n instructor,\n day_num,\n (datetime.time(hour=begin_hour, minute=begin_minute),\n datetime.time(hour=end_hour, minute=end_minute))\n )\n )\n\n db.mutation_query(\n 'INSERT INTO ta_office_hours VALUES (?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, instructor, day_num, begin_hour, begin_minute, end_hour, end_minute]\n )\n\n await ctx.send('Office hour successfully created!')\n\n # If 'Custom Event' is clicked, this will run\n elif button_clicked == 'custom-event':\n def check(m):\n return m.content is not None and m.channel == ctx.channel and m.author == ctx.author\n\n await ctx.send(\"What would you like this event to be called? \"\n \"(Type 'quit' to abort)\")\n msg = await bot.wait_for(\"message\", check=check)\n title = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, title):\n return\n\n await ctx.send(\"Extra description for the event? Type 'NA' if none. \"\n \"Type 'quit' to abort\")\n msg = await bot.wait_for(\"message\", check=check)\n description = msg.content.strip()\n\n # Aborting if user entered 'quit'.\n if await check_quit(ctx, description):\n return\n\n # Getting associated url of the event.\n await ctx.send(\"Is there an associated link for this event?\")\n link = await get_url(ctx, bot)\n if link is None:\n return\n\n # Getting the associated date.\n await ctx.send(\"Is there a date or a due date for this event?\\n\")\n date = await get_date(ctx, bot)\n if date is None:\n return\n\n # send this message if there's an associated date.\n if date:\n await ctx.send(\"Is there a due time for this event?\\n\")\n time = await get_due_time(ctx, bot)\n if time is None:\n return\n elif time:\n db.mutation_query(\n 'INSERT INTO custom_events VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, time.hour, time.minute, 0, 0, 0, 0]\n )\n await ctx.send('Event successfully created!')\n await cal.display_events(None)\n return\n\n await ctx.send(\"What are the start & end times of this event?\\n\")\n times = await get_times(ctx, bot, 'event')\n if times is None:\n return\n elif not times:\n db.mutation_query(\n 'INSERT INTO custom_events VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, 0, 0, 0, 0, 0, 0]\n )\n await ctx.send('Event successfully created!')\n await cal.display_events(None)\n return\n\n ((begin_hour, begin_minute), (end_hour, end_minute)) = times\n db.mutation_query(\n 'INSERT INTO custom_events VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',\n [ctx.guild.id, title, link, description, date, 0, 0, begin_hour, begin_minute, end_hour, end_minute]\n )\n\n await ctx.send('Assignment successfully created!')\n await cal.display_events(None)\n return\n\n else:\n await ctx.author.send('`!create` can only be used in the `instructor-commands` channel')\n await ctx.message.delete()\n return\n","sub_path":"src/event_creation.py","file_name":"event_creation.py","file_ext":"py","file_size_in_byte":18623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"379337252","text":"def agrupa_por_idade(x):\n fet = {'crianca': [], 'adolescente': [], 'adulto': [], 'idoso': []}\n for k, v in x.items():\n if v <= 11:\n fet['crianca'].append(k)\n if v <= 17:\n fet['adolescente'].append(k)\n if v <= 59:\n fet['adulto'].append(k)\n else:\n fet['idoso'].append(k)\n print(fet)\n","sub_path":"backup/user_079/ch153_2020_04_13_20_27_38_934479.py","file_name":"ch153_2020_04_13_20_27_38_934479.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"452825518","text":"import sys\r\nimport os\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom sqlalchemy import create_engine\r\nfrom db import Base, Pylypets, Drahobrat, Podobovets, Slavsko, Krasiya\r\nfrom psr import Weather_Data\r\nimport datetime\r\nimport tkinter as tk\r\nimport tkinter.messagebox as box\r\n\r\nURL_DRAG = 'http://ru.snow-forecast.com/resorts/Drahobrat/6day/mid'\r\nURL_PYL = 'https://ru.snow-forecast.com/resorts/Pylypets/6day/mid'\r\nURL_POD = 'https://ru.snow-forecast.com/resorts/Podobovets/6day/mid'\r\nURL_SLV = 'https://ru.snow-forecast.com/resorts/Slavsko/6day/mid'\r\nURL_KRS = 'https://ru.snow-forecast.com/resorts/Krasiya/6day/mid'\r\n\r\n\r\ndef err():\r\n root = tk.Tk()\r\n root.overrideredirect(1)\r\n root.withdraw()\r\n box.showerror('Error!', 'Check internet connection')\r\n root.quit()\r\n sys.exit()\r\n\r\n\r\ndef msg():\r\n root = tk.Tk()\r\n root.overrideredirect(1)\r\n root.withdraw()\r\n box.showinfo('Snow_Alert', '{}: snow coming!!!'.format(resort))\r\n root.quit()\r\n\r\n\r\ndef alert(resort):\r\n total_snow = 0\r\n for snow in resort.snow_list:\r\n try:\r\n total_snow += int(snow)\r\n except ValueError:\r\n pass\r\n if total_snow >= 50:\r\n msg()\r\n\r\n\r\ndef update_db(resort, db):\r\n day = 0\r\n for i in range(0, 18, 3):\r\n upd = session.merge(db(date=datetime.date.today() + datetime.timedelta(days=day),\r\n t_max_m=resort.temp_max[i],\r\n t_max_d=resort.temp_max[i + 1],\r\n t_max_n=resort.temp_max[i + 2],\r\n t_min_m=resort.temp_min[i],\r\n t_min_d=resort.temp_min[i + 1],\r\n t_min_n=resort.temp_min[i + 2],\r\n snow_m=resort.snow_list[i],\r\n snow_d=resort.snow_list[i + 1],\r\n snow_n=resort.snow_list[i + 2],\r\n wind_speed_m=resort.wind_list[i][0],\r\n wind_speed_d=resort.wind_list[i + 1][0],\r\n wind_speed_n=resort.wind_list[i + 2][0],\r\n wind_dir_m=resort.wind_list[i][1],\r\n wind_dir_d=resort.wind_list[i + 1][1],\r\n wind_dir_n=resort.wind_list[i + 2][1],\r\n weather_m=resort.weather_list[i][1],\r\n weather_d=resort.weather_list[i + 1][1],\r\n weather_n=resort.weather_list[i + 2][1]))\r\n day += 1\r\n session.add(upd)\r\n session.commit()\r\n\r\n\r\ntry:\r\n pyl = Weather_Data(URL_PYL)\r\n drag = Weather_Data(URL_DRAG)\r\n pod = Weather_Data(URL_POD)\r\n slv = Weather_Data(URL_SLV)\r\n krs = Weather_Data(URL_KRS)\r\nexcept AttributeError:\r\n err()\r\n\r\nengine = create_engine('sqlite:///weather_stats.db')\r\nBase.metadata.bind = engine\r\nDBSession = sessionmaker(bind=engine)\r\nsession = DBSession()\r\n\r\nfor resort, db in zip((drag, pyl, pod, slv, krs), \\\r\n (Drahobrat, Pylypets, Podobovets, Slavsko, Krasiya)):\r\n update_db(resort, db)\r\n\r\nfor resort in (drag, pyl, pod, slv, krs):\r\n alert(resort)\r\n\r\n# time check before update!!!\r\n","sub_path":"db_upd.py","file_name":"db_upd.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"54388138","text":"\n# Find Eulerian Tour\n#\n# Write a function that takes in a graph\n# represented as a list of tuples\n# and return a list of nodes that\n# you would follow on an Eulerian Tour\n#\n# For example, if the input graph was\n# [(1, 2), (2, 3), (3, 1)]\n# A possible Eulerian tour would be [1, 2, 3, 1]\n\ngraph = [(1, 2), (2, 3), (3, 1)]\ngraph = [(1, 2), (2, 3), (3, 1), (3, 4)]\ngraph = [(1, 2), (2, 3), (3, 1), (3, 4), (4,2)]\ngraph = [(0, 1), (1, 5), (1, 7), (4, 5),(4, 8), (1, 6), (3, 7), (5, 9),(2, 4), (0, 4), (2, 5), (3, 6), (8, 9)]\n\n\ndef get_degrees(graph):\n degrees = {}\n for e in graph:\n for i in e:\n if i not in degrees:\n degrees[i] = 1\n else:\n degrees[i] += 1\n return degrees\n\n#print graph\n#print get_degrees(graph)\n\ndef get_nodes(graph):\n nodes = []\n for e in graph:\n if e[0] not in nodes:\n nodes.append(e[0])\n if e[1] not in nodes:\n nodes.append(e[1])\n return nodes\n\n#print get_nodes(graph)\n\n\ndef find_edges_including(graph, node):\n edges = []\n for e in graph:\n if e[0]==node or e[1]==node:\n edges.append(e)\n return edges\n\n#print find_edges_including(graph, 2)\n\n\ndef is_eulerian(graph, starting_node):\n degrees = get_degrees(graph)\n num_odd = 0\n for d in degrees:\n if degrees[d]%2 != 0:\n num_odd += 1\n if num_odd>2:\n return False\n if num_odd>0:\n if degrees[starting_node]%2 == 0:\n return False #starting node must be odd if there are odd nodes\n if len(degrees)>0 and starting_node not in degrees:\n return False #must be able to move from starting node to remaining edges, if there are any\n return True\n\n#print is_eulerian(graph,1)\n\ndef subtract_edge(graph, edge):\n new_graph = []\n for e in graph:\n if e != edge:\n new_graph.append(e)\n return new_graph\n\n#print subtract_edge(graph, (1,2))\n\ndef find_eulerian_tour(graph):\n # your code here\n tour = []\n tour_nodes = []\n\n degrees = get_degrees(graph)\n nodes = get_nodes(graph)\n\n for n in nodes:\n if not is_eulerian(graph, n):\n continue\n cur_node = n\n tour_nodes.append(cur_node)\n next_edges = find_edges_including(graph, cur_node)\n while len(next_edges)>0:\n e = next_edges.pop()\n if e[0] == cur_node:\n next_node = e[1]\n else:\n next_node = e[0]\n next_graph = subtract_edge(graph, e)\n if is_eulerian(next_graph, next_node):\n tour_nodes.append(next_node)\n graph = next_graph\n cur_node = next_node\n next_edges = find_edges_including(graph, cur_node)\n break\n if tour_nodes == []:\n return None\n return tour_nodes\n\n#print find_eulerian_tour(graph)\n\n","sub_path":"Find_eulerian_tour.py","file_name":"Find_eulerian_tour.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"446539183","text":"\r\n'''\r\n*****************************************************************************************\r\n*\r\n* \t\t===============================================\r\n* \t\tRapid Rescuer (RR) Theme (eYRC 2019-20)\r\n* \t\t===============================================\r\n*\r\n* This script is to implement Task 1A of Rapid Rescuer (RR) Theme (eYRC 2019-20).\r\n*\r\n* This software is made available on an \"AS IS WHERE IS BASIS\".\r\n* Licensee/end user indemnifies and will keep e-Yantra indemnified from\r\n* any and all claim(s) that emanate from the use of the Software or\r\n* breach of the terms of this agreement.\r\n*\r\n* e-Yantra - An MHRD project under National Mission on Education using ICT (NMEICT)\r\n*\r\n*****************************************************************************************\r\n'''\r\n\r\n\r\n# Team ID:\t\t\t[ 5151 ]\r\n# Author List:\t\t[ Rohan Mehta, Arnav Saha ]\r\n# Filename:\t\t\ttask_1a.py\r\n# Functions:\t\treadImage, solveMaze, buildGraph, findNeighbours, isSafe, findPath,constructPath\r\n# \t\t\t\t\t[ Comma separated list of functions in this file ]\r\n# Global variables:\tCELL_SIZE\r\n# \t\t\t\t\t[ List of global variables defined in this file ]\r\n\r\n\r\n# Import necessary modules\r\n# Do not import any other modules\r\nimport cv2\r\nimport numpy as np\r\nimport os\r\n\r\n\r\n# To enhance the maze image\r\nimport image_enhancer\r\n\r\n\r\n# Maze images in task_1a_images folder have cell size of 20 pixels\r\nCELL_SIZE = 20\r\n\r\n\r\ndef readImage(img_file_path):\r\n \"\"\"\r\n Purpose:\r\n ---\r\n the function takes file path of original image as argument and returns it's binary form\r\n\r\n Input Arguments:\r\n ---\r\n `img_file_path` :\t\t[ str ]\r\n file path of image\r\n\r\n Returns:\r\n ---\r\n `original_binary_img` :\t[ numpy array ]\r\n binary form of the original image at img_file_path\r\n\r\n Example call:\r\n ---\r\n original_binary_img = readImage(img_file_path)\r\n\r\n \"\"\"\r\n\r\n binary_img = None\r\n\r\n #############\tAdd your Code here\t###############\r\n # read image from given path\r\n binary_img = cv2.imread(img_file_path)\r\n # convert image to grayscale\r\n binary_img = cv2.cvtColor(binary_img, cv2.COLOR_BGR2GRAY)\r\n ###################################################\r\n\r\n return binary_img\r\n\r\n\r\ndef solveMaze(original_binary_img, initial_point, final_point, no_cells_height, no_cells_width):\r\n \"\"\"\r\n Purpose:\r\n ---\r\n the function takes binary form of original image, start and end point coordinates and solves the maze\r\n to return the list of coordinates of shortest path from initial_point to final_point\r\n\r\n Input Arguments:\r\n ---\r\n `original_binary_img` :\t[ numpy array ]\r\n binary form of the original image at img_file_path\r\n `initial_point` :\t\t[ tuple ]\r\n start point coordinates\r\n `final_point` :\t\t\t[ tuple ]\r\n end point coordinates\r\n `no_cells_height` :\t\t[ int ]\r\n number of cells in height of maze image\r\n `no_cells_width` :\t\t[ int ]\r\n number of cells in width of maze image\r\n\r\n Returns:\r\n ---\r\n `shortestPath` :\t\t[ list ]\r\n list of coordinates of shortest path from initial_point to final_point\r\n\r\n Example call:\r\n ---\r\n shortestPath = solveMaze(\r\n original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n \"\"\"\r\n\r\n shortestPath = []\r\n\r\n #############\tAdd your Code here\t###############\r\n lengthOfPath = 0\r\n # initialise visited cell array with 0\r\n visited = np.zeros([no_cells_width, no_cells_height], dtype=int)\r\n # build all neighbours graph for each cell\r\n graph = buildGraph(original_binary_img, no_cells_width, no_cells_height)\r\n # build a child:parent relationship to recontruct the shortest path\r\n parentDict = findPath(graph, visited,\r\n initial_point[0], initial_point[1], final_point[0], final_point[1], lengthOfPath)\r\n # reconstruct the shortest path\r\n shortestPath = constructPath(parentDict, initial_point, final_point)\r\n ###################################################\r\n\r\n return shortestPath\r\n\r\n\r\n#############\tYou can add other helper functions here\t\t#############\r\ndef buildGraph(original_binary_img, no_cells_width, no_cells_height):\r\n graph = {}\r\n for i in range(0, no_cells_width):\r\n for j in range(0, no_cells_height):\r\n graph[(i, j)] = findNeighbours(original_binary_img, i, j)\r\n return graph\r\n\r\n\r\ndef findNeighbours(original_binary_img, row, column):\r\n x_start = column * CELL_SIZE\r\n y_start = row * CELL_SIZE\r\n neighbours = []\r\n top = True\r\n bottom = True\r\n left = True\r\n right = True\r\n for x in range(x_start, x_start + CELL_SIZE):\r\n if(original_binary_img[y_start, x] == 255 and top):\r\n # when no black bars are found on top of any cell\r\n neighbours.append((row-1, column))\r\n top = False\r\n if(original_binary_img[y_start + CELL_SIZE - 1, x] == 255 and bottom):\r\n # when no black bars are found on bottom of any cell\r\n neighbours.append((row+1, column))\r\n bottom = False\r\n\r\n for y in range(y_start, y_start + CELL_SIZE):\r\n if(original_binary_img[y, x_start] == 255 and left):\r\n # when no black bars are found on left of any cell\r\n neighbours.append((row, column-1))\r\n left = False\r\n if(original_binary_img[y, x_start + CELL_SIZE - 1] == 255 and right):\r\n # when no black bars are found on right of any cell\r\n neighbours.append((row, column+1))\r\n right = False\r\n\r\n return neighbours\r\n\r\n\r\ndef isSafe(graph, visited, i, j, x, y):\r\n # a cell is safe to travel only if it is a neighbour and\r\n # it has not been visited\r\n if((x, y) in graph[(i, j)] and visited[x][y] == 0):\r\n return True\r\n\r\n return False\r\n\r\n\r\ndef findPath(graph, visited, i, j, x, y, dist):\r\n # Implementing BFS using queue as list\r\n queue = []\r\n queue.insert(0, (i, j, dist))\r\n visited[i][j] = 1\r\n parentDict = {}\r\n\r\n while(len(queue)):\r\n node = queue[-1] # node = (i,j,dist)\r\n queue.pop()\r\n\r\n if(node[0] == x and node[1] == y):\r\n return parentDict\r\n\r\n # up\r\n if(isSafe(graph, visited, node[0], node[1], node[0]-1, node[1])):\r\n queue.insert(0, ((node[0] - 1, node[1], node[2]+1)))\r\n parentDict[(node[0]-1, node[1])] = (node[0], node[1])\r\n visited[node[0]][node[1]] = 1\r\n\r\n # left\r\n if(isSafe(graph, visited, node[0], node[1], node[0], node[1]-1)):\r\n parentDict[(node[0], node[1]-1)] = (node[0], node[1])\r\n queue.insert(0, ((node[0], node[1]-1, node[2]+1)))\r\n visited[node[0]][node[1]] = 1\r\n\r\n # right\r\n if(isSafe(graph, visited, node[0], node[1], node[0], node[1] + 1)):\r\n parentDict[(node[0], node[1]+1)] = (node[0], node[1])\r\n queue.insert(0, ((node[0], node[1] + 1, node[2]+1)))\r\n visited[node[0]][node[1]] = 1\r\n\r\n # down\r\n if(isSafe(graph, visited, node[0], node[1], node[0]+1, node[1])):\r\n parentDict[(node[0]+1, node[1])] = (node[0], node[1])\r\n queue.insert(0, ((node[0] + 1, node[1], node[2]+1)))\r\n visited[node[0]][node[1]] = 1\r\n\r\n\r\ndef constructPath(parent, initial_point, final_point):\r\n children = []\r\n shortest = []\r\n\r\n for key in parent.keys():\r\n children.insert(0, key)\r\n while(1):\r\n shortest.insert(0, final_point)\r\n if(parent[final_point] == initial_point):\r\n shortest.insert(0, initial_point)\r\n break\r\n final_point = parent[final_point]\r\n return shortest\r\n#########################################################################\r\n\r\n\r\n# NOTE:\tYOU ARE NOT ALLOWED TO MAKE ANY CHANGE TO THIS FUNCTION\r\n#\r\n# Function Name:\tmain\r\n# Inputs:\t\t\tNone\r\n# Outputs: \t\t\tNone\r\n# Purpose: \t\t\tthe function first takes 'maze00.jpg' as input and solves the maze by calling readImage\r\n# \t\t\t\t\tand solveMaze functions, it then asks the user whether to repeat the same on all maze images\r\n# \t\t\t\t\tpresent in 'task_1a_images' folder or not\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n curr_dir_path = os.getcwd()\r\n # path to directory of 'task_1a_images'\r\n img_dir_path = curr_dir_path + '/../task_1a_images/'\r\n\r\n file_num = 0\r\n img_file_path = img_dir_path + 'maze0' + \\\r\n str(file_num) + '.jpg'\t\t# path to 'maze00.jpg' image file\r\n\r\n print('\\n============================================')\r\n\r\n print('\\nFor maze0' + str(file_num) + '.jpg')\r\n\r\n try:\r\n\r\n original_binary_img = readImage(img_file_path)\r\n height, width = original_binary_img.shape\r\n\r\n except AttributeError as attr_error:\r\n\r\n print('\\n[ERROR] readImage function is not returning binary form of original image in expected format !\\n')\r\n exit()\r\n\r\n # number of cells in height of maze image\r\n no_cells_height = int(height/CELL_SIZE)\r\n # number of cells in width of maze image\r\n no_cells_width = int(width/CELL_SIZE)\r\n initial_point = (0, 0)\t\t\t\t\t\t\t\t\t\t\t# start point coordinates of maze\r\n # end point coordinates of maze\r\n final_point = ((no_cells_height-1), (no_cells_width-1))\r\n\r\n try:\r\n\r\n shortestPath = solveMaze(\r\n original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n if len(shortestPath) > 2:\r\n\r\n img = image_enhancer.highlightPath(\r\n original_binary_img, initial_point, final_point, shortestPath)\r\n\r\n else:\r\n\r\n print(\r\n '\\n[ERROR] shortestPath returned by solveMaze function is not complete !\\n')\r\n exit()\r\n\r\n except TypeError as type_err:\r\n\r\n print('\\n[ERROR] solveMaze function is not returning shortest path in maze image in expected format !\\n')\r\n exit()\r\n\r\n print('\\nShortest Path = %s \\n\\nLength of Path = %d' %\r\n (shortestPath, len(shortestPath)))\r\n\r\n print('\\n============================================')\r\n\r\n cv2.imshow('canvas0' + str(file_num), img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n choice = input(\r\n '\\nWant to run your script on all maze images ? ==>> \"y\" or \"n\": ')\r\n\r\n if choice == 'y':\r\n\r\n file_count = len(os.listdir(img_dir_path))\r\n\r\n for file_num in range(file_count):\r\n\r\n img_file_path = img_dir_path + 'maze0' + str(file_num) + '.jpg'\r\n\r\n print('\\n============================================')\r\n\r\n print('\\nFor maze0' + str(file_num) + '.jpg')\r\n\r\n try:\r\n\r\n original_binary_img = readImage(img_file_path)\r\n height, width = original_binary_img.shape\r\n\r\n except AttributeError as attr_error:\r\n\r\n print(\r\n '\\n[ERROR] readImage function is not returning binary form of original image in expected format !\\n')\r\n exit()\r\n\r\n # number of cells in height of maze image\r\n no_cells_height = int(height/CELL_SIZE)\r\n # number of cells in width of maze image\r\n no_cells_width = int(width/CELL_SIZE)\r\n initial_point = (0, 0)\t\t\t\t\t\t\t\t\t\t\t# start point coordinates of maze\r\n # end point coordinates of maze\r\n final_point = ((no_cells_height-1), (no_cells_width-1))\r\n\r\n try:\r\n\r\n shortestPath = solveMaze(\r\n original_binary_img, initial_point, final_point, no_cells_height, no_cells_width)\r\n\r\n if len(shortestPath) > 2:\r\n\r\n img = image_enhancer.highlightPath(\r\n original_binary_img, initial_point, final_point, shortestPath)\r\n\r\n else:\r\n\r\n print(\r\n '\\n[ERROR] shortestPath returned by solveMaze function is not complete !\\n')\r\n exit()\r\n\r\n except TypeError as type_err:\r\n\r\n print(\r\n '\\n[ERROR] solveMaze function is not returning shortest path in maze image in expected format !\\n')\r\n exit()\r\n\r\n print('\\nShortest Path = %s \\n\\nLength of Path = %d' %\r\n (shortestPath, len(shortestPath)))\r\n\r\n print('\\n============================================')\r\n\r\n cv2.imshow('canvas0' + str(file_num), img)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\n else:\r\n\r\n print('')\r\n","sub_path":"submit/RR_Task_1A_1B#5151/Task 1A/task_1a.py","file_name":"task_1a.py","file_ext":"py","file_size_in_byte":12437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"415373745","text":"import os\nimport signal\nimport subprocess\nimport sys\n\nfrom src.reversi_zero.lib.pipe_helper import dump_pipe_pairs_names\n\n\nchildren_processes = []\nexit_tasks = []\n\n\ndef build_child_cmd(type, config, pipe_pairs):\n cmd = ['python3.6', '-m', 'src.reversi_zero.run', type,\n '--env', f'{config.env.env_arg_name}',\n '--n-sims', f'{config.play.simulation_num_per_move}',\n '--pipe', dump_pipe_pairs_names(pipe_pairs),\n ]\n if config.opts.gpu_mem_frac is not None:\n cmd.append('--gpu-mem-frac')\n cmd.append(f'{config.opts.gpu_mem_frac}')\n\n return cmd\n\n\ndef start_child_proc(cmd, nocuda=None, stdin=None, stdout=None, stderr=None, cwd=None):\n global children_processes\n\n env = os.environ.copy()\n if nocuda:\n env['CUDA_VISIBLE_DEVICES'] = ''\n\n try:\n p = subprocess.Popen(cmd, env=env, stdin=stdin, stdout=stdout, stderr=stderr, cwd=cwd)\n except Exception:\n print(cmd)\n raise\n\n children_processes.append(p)\n\n return p\n\n\ndef kill_children_processes(*args):\n for p in children_processes:\n if p and p.poll() is None:\n p.kill()\n\n\ndef add_exit_task(task):\n global exit_tasks\n exit_tasks.append(task)\n\n\ndef clean(*args):\n for t in exit_tasks:\n print(t)\n t(*args)\n sys.exit()\n\n\ndef signal_exit():\n for sig in (signal.SIGABRT, signal.SIGILL, signal.SIGINT, signal.SIGSEGV, signal.SIGTERM):\n signal.signal(sig, clean)\n","sub_path":"src/reversi_zero/lib/proc_helper.py","file_name":"proc_helper.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"172573274","text":"class TrieNode(object):\n def __init__(self):\n self.is_word = False\n self.children = [None] * 26\n \nclass WordDictionary(object):\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root = TrieNode()\n \n\n def addWord(self, word):\n \"\"\"\n Adds a word into the data structure.\n :type word: str\n :rtype: None\n \"\"\"\n p = self.root\n for c in word:\n index = ord(c) - ord('a')\n if not p.children[index]:\n p.children[index] = TrieNode()\n p = p.children[index]\n p.is_word = True\n\n \n\n def search(self, word):\n \"\"\"\n Returns if the word is in the data structure. A word could contain the dot character '.' to represent any one letter.\n :type word: str\n :rtype: bool\n \"\"\"\n return self.find(word, self.root, 0)\n \n def find(self, word, cur, index):\n if index == len(word):\n return cur.is_word\n \n c = word[index]\n \n if c == \".\":\n for i in xrange(26):\n if cur.children[i] != None and self.find(word, cur.children[i], index+1):\n return True\n return False \n else:\n return cur.children[ord(c) - ord(\"a\")] != None and self.find(word, cur.children[ord(c) - ord(\"a\")], index + 1)\n\n \n\n\n# Your WordDictionary object will be instantiated and called as such:\n# obj = WordDictionary()\n# obj.addWord(word)\n# param_2 = obj.search(word)","sub_path":"LC211_Trie.py","file_name":"LC211_Trie.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"542222382","text":"import httplib2 as http\nimport json\n\n\ndef get_suggestions(text):\n headers = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Authorization\": \"Token 4bc501558cb7d702b435bfd6e0a6a026e99a87b4\",\n }\n\n url = \"https://suggestions.dadata.ru/suggestions/api/4_1/rs/suggest/party\"\n method = \"POST\"\n body = '{ \"query\": \"' + text + '\" }'\n\n h = http.Http()\n\n response, content = h.request(\n url,\n method,\n body.encode('utf-8'),\n headers)\n\n if response[\"status\"] == '200':\n data = json.loads(content)\n if (len(data[\"suggestions\"]) != 0):\n return data[\"suggestions\"]\n else:\n raise Exception(\"Bad request\")\n\nget_suggestions(\"сбер\")","sub_path":"dadata_wrapper.py","file_name":"dadata_wrapper.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"529052875","text":"import sys,os,glob,json\nimport random\nimport xgboost as xgb\nimport numpy as np\nimport time\n\nimport poc_expt_base_ml_utils as ml_utils\nimport poc_expt_base_extract_feature as extract_feature\n\nfrom sklearn.metrics import *\nfrom sklearn.externals import joblib\n\ndef read_meta(f):\n t = None\n with open(f) as file:\n t = json.load(file)\n return t\n\ndef read_meta_path(meta_path):\n return [read_meta(f) for f in sorted(glob.glob(os.path.join(meta_path,'*.meta')))]\n\ndef get_xgb_param(labels):\n ratio = float(labels.count(0)) / labels.count(1)\n param = dict()\n param['booster'] = 'gbtree'\n param['objective'] = 'multi:softprob'\n param['num_class'] = 2\n param['eval_metric'] = 'mlogloss'\n param['scale_pos_weight'] = ratio\n param['eta'] = 0.05\n param['n_estimators'] = 20\n param['max_depth'] = 6\n param['min_child_weight'] = 0.8\n param['colsample_bytree'] = 0.5\n param['subsample'] = 0.5\n param['silent'] = 1\n param['nthread'] = 4\n param['seed'] = 777\n return param\n\ndef model_training(mldataset,num_boost_round=0):\n params = get_xgb_param(mldataset['y_tr'])\n d_tr = xgb.DMatrix(data=mldataset['x_tr'],label=mldataset['y_tr'])\n # d_test = xgb.DMatrix(data=mldataset['x_ts'],label=mldataset['y_ts']) if len(mldataset['y_ts']) > 0 else None\n bst = xgb.train(params,d_tr,\n num_boost_round=num_boost_round if num_boost_round>0 else 1000,\n # early_stopping_rounds=None if num_boost_round>0 else 50,\n # evals=[(d_test,'test')] if d_test is not None else [],\n # verbose_eval=False\n )\n return bst\n\ndef get_performance(y_ts, y_pred):\n perf = dict()\n #y_pred_cat = np.argmax(y_pred, axis=1)\n y_pred_cat = [1 if i[1] > 0.7 else 0 for i in y_pred] #aggressive\n perf['accuracy'] = accuracy_score(y_true=y_ts, y_pred=y_pred_cat)\n perf['precision'] = precision_score(y_true=y_ts, y_pred=y_pred_cat)\n perf['recall'] = recall_score(y_true=y_ts, y_pred=y_pred_cat)\n perf['data_dim'] = len(y_ts)\n zero_num = len([i for i in y_ts if i==0])\n one_num = len([i for i in y_ts if i==1])\n perf['data_dist'] = '(0:1) {}:{}'.format(zero_num,one_num)\n return perf\n\nDATA_PATH = os.path.join(os.sep,'data','real_case',)\n\nuser = sys.argv[1]\n\nprint('poc_bench_base',user)\n\n#-- Read Meta --#\ntr_target = read_meta_path(os.path.join(DATA_PATH,user,'train'))\ntr_anti = read_meta_path(os.path.join(DATA_PATH,user,'anti'))\ntt_bec = read_meta_path(os.path.join(DATA_PATH,user,'bec'))\n\nt0 = time.time()\n#-- Feature Extraction --#\nt1 = time.clock()\nml_dataset,ext_cost = ml_utils.prepare_ml_dataset(tr_target,tr_anti,0,False,\n target_test=[],\n anti_test=[],\n bec_test=tt_bec)\nprint(time.clock() - t1)\n\nt1 = time.clock()\nmodel = model_training(ml_dataset,num_boost_round=200)\nif 'normalizer' in ml_dataset:\n normalizer = ml_dataset['normalizer']\njoblib.dump(model,'bench_base_model_'+user+'.pkl')\nprint(time.clock() - t1)\nprint(\"Model Training Time:\",time.time() - t0)\n\n#-- Prediction --#\nt0 = time.time()\nml_dataset_,ext_cost = ml_utils.prepare_ml_dataset(\n tr_target,\n tr_anti,\n 0,False,\n target_test=tt_bec,\n anti_test=tt_bec,\n bec_test=tt_bec)\ndtr = xgb.DMatrix(\n data=ml_dataset_['x_tr'],\nlabel=ml_dataset_['y_tr'])\ndtt = xgb.DMatrix(\n data=ml_dataset_['x_ts'],\n label=ml_dataset_['y_ts'])\nmodel = joblib.load('bench_base_model_'+user+'.pkl')\nbec_hat = model.predict(dtt)[-len(tt_bec):,0]\nprint(time.time() - t0)\n\nprint(\"Accuracy:\",accuracy_score([True]*len(tt_bec),bec_hat>0.3))\nprint()\n","sub_path":"poc_bench_base.py","file_name":"poc_bench_base.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"582325353","text":"\"\"\"\nCIS-15 Excercise 36\nEmanuel Navarro\n\"\"\"\nfrom sys import exit\ndef treasure(Hero):\n print('You enter the room.')\n print('The room if full of gold and artifacts and old scrolls.')\n Treasure=input('what do you take? The glowing shiny gold or a book that catches your eye.')\n if Treasure== 'gold':\n dead('The gold is cursed and you die and become a permanent soul lost in the dungeon')\n elif Treasure=='book':\n print('The temple starts to crumble and you proceed to exit the dungeon')\n print('You have your book in hand believing you lost your opportunity for gold and riches')\n print('You open the book and its a book of infinte knowledge that helps you become a famous rich adventure remembered for all time')\n print('Good job you win')\n print(Hero, ' you level up and all your stats go up!!')\n exit(0)\n else:\n dead('you take take to long to decide and a 30 foot snake eats while you were thinking')\n \n\ndef dead(why):\n print(why, \"Good job!\")\n exit(0)\ndef battle(hero):\n hp= 30\n squire=hero\n name= squire +\":\"+\"Hp\"\n return name\n\ndef waterdungeon(Hero):\n print('You walk into the temple you see two levers')\n print('And what seems to be a door.')\n print('Dead skulls eerily are spread through out the ground.')\n \n waterchoice=input('what lever do you pull? left or right?')\n \n if waterchoice== 'left':\n dead('The temple trembles and a 30 foor snake unburrows itself from the ground and eats you')\n elif waterchoice=='right':\n dead('The room collapese on top of you and you die')\n elif waterchoice=='both':\n print('The door opens')\n treasure(Hero)\n else:\n dead('You do nothing and die of starvation')\n \ndef deathmountain():\n dead('The air is rancid poison and you die.')\n\n\ndef gamestart(name):\n \n print(f\"Hello {name}\")\n print('Please select a class from the following')\n classes=['Archer', 'Knight', 'Mage']\n stats=['Str:4, Dex:10, Vit:6','Str:10, Vit:8, Spirit:6',' Magic:12, Spirit:8, Luck:6']\n Archer= classes[0] + stats[0]\n Knight= classes[1] + stats[1]\n Mage= classes[2] + stats[2]\n n= 0\n for i in classes:\n print(f\"Class#{n+1} {i} with Stats {stats[n]} \")\n n+=1\n while True:\n choice= input('Enter class name: ')\n if choice==classes[0]:\n return name+\":\"+Archer\n elif choice== classes[1]:\n return name+\":\"+Knight\n elif choice== classes[2]:\n return name+\":\"+Mage\n else:\n print(\"Name of the class is capatalized\")\n \n\n#print(gamestart())\n\ndef main() :\n\n name= str(input('Enter your character name: '))\n Hero=gamestart(name)\n print(\"Finaly\",name, \"Today is your graduation from Wisteria Academy\")\n print(\"Before you can gradute you must prove yourself.\")\n print(\"what dugeon shall you go conquer and show your worth\")\n print(\"Will it be waterdungeon? A place of unknown creatures and untold treasures.\")\n print(\"Death mountain where death is assured\")\n choice2=int(input(\"Option 1 waterdungeon, Option 2 Deathmountain: \"))\n if choice2==1:\n return waterdungeon(Hero)\n elif choice2==2:\n deathmountain()\n else:\n dead('You run away and become a farmer')\n \n \n \n \n \n\n\nif __name__ == '__main__' :\n main() ","sub_path":"week11/ex36.py","file_name":"ex36.py","file_ext":"py","file_size_in_byte":3380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"236627731","text":"import os\nimport json\nimport xmltodict\nimport xml.etree.ElementTree as Et\nfrom unittest import TestCase\nfrom copy import deepcopy\nfrom piano_utils.xml_json_converter import xml_to_json, json_to_xml\n\n\nclass TestXmlJsonConverter(TestCase):\n\n @classmethod\n def setUpClass(cls):\n \"\"\"\n create/setup test data\n :return:\n \"\"\"\n cls.simple_xml = open(os.path.join(os.path.dirname(__file__), 'simple_xml.xml')).read()\n\n cls.simple_json = {\n \"note_to\": \"Tove\",\n \"note_from\": \"Jani\",\n \"note_heading\": \"Reminder\",\n \"note_body\": \"Don't forget me this weekend!\",\n }\n\n cls.complex_xml = open(os.path.join(os.path.dirname(__file__), 'complex_xml.xml')).read()\n\n cls.catalog = [{\n \"title\": \"Empire Burlesque\",\n \"artist\": \"Bob Dylan\",\n \"prices\": (\"10.90\", \"8.99\"),\n \"year\": \"1985\",\n }, {\n \"title\": \"Hide Your Heart\",\n \"artist\": \"Bonnie Tyler\",\n \"prices\": (\"9.90\", \"7.50\"),\n \"year\": \"1988\",\n }, {\n \"title\": \"One Night Only\",\n \"artist\": \"Bee Gees\",\n \"prices\": (\"10.90\", \"5.99\"),\n \"year\": \"1998\",\n }]\n\n cls.complex_json = {}\n for idx, cd in enumerate(cls.catalog):\n for k in [\"title\", \"artist\", \"prices\", \"year\"]:\n key = \"catalog_cd_%s_%s\" % (str(idx), k)\n if k == \"prices\":\n cls.complex_json[key + \"_price_0_@type\"] = \"rrp\"\n cls.complex_json[key + \"_price_0_#text\"] = cd[k][0]\n cls.complex_json[key + \"_price_1_@type\"] = \"special\"\n cls.complex_json[key + \"_price_1_#text\"] = cd[k][1]\n else:\n cls.complex_json[key] = cd[k]\n\n cls.complex_json = [cls.complex_json]\n\n def test_xml_to_json(self):\n \"\"\"\n test conversion from xml to json\n :return:\n \"\"\"\n original_xml = self.simple_xml\n\n xml_as_json = json.loads(xml_to_json(original_xml))\n\n self.assertDictEqual(self.simple_json, xml_as_json[0])\n\n def test_xml_to_json__with_multiple(self):\n \"\"\"\n test conversion from xml to json with multiple tags\n :return:\n \"\"\"\n original_xml = deepcopy(self.complex_xml)\n\n # convert xml to json\n result = json.loads(xml_to_json(original_xml))\n self.assertTrue(isinstance(result, list))\n\n # check is flattened\n for article in result:\n for v in article.values():\n self.assertNotIsInstance(v, dict)\n self.assertNotIsInstance(v, list)\n\n # check values\n for idx, json_value in enumerate(self.complex_json):\n for k, v in json_value.iteritems():\n self.assertEquals(v, result[idx][k])\n\n def test_json_to_xml(self):\n \"\"\"\n test conversion from json to xml\n :return:\n \"\"\"\n original_json = deepcopy(self.simple_json)\n\n json_as_xml = json_to_xml(json.dumps(original_json))\n\n xml_tree = Et.fromstring(json_as_xml)\n self.assertEquals(\"note\", xml_tree.tag)\n self.assertEquals(original_json[\"note_to\"], xml_tree.find(\"to\").text)\n self.assertEquals(original_json[\"note_from\"], xml_tree.find(\"from\").text)\n self.assertEquals(original_json[\"note_heading\"], xml_tree.find(\"heading\").text)\n self.assertEquals(original_json[\"note_body\"], xml_tree.find(\"body\").text)\n\n def test_json_to_xml__with_multiple(self):\n \"\"\"\n test conversion from json to xml with multiple tags\n :return:\n \"\"\"\n original_json = deepcopy(self.complex_json)\n\n # convert from json to xml\n json_as_xml = json_to_xml(json.dumps(original_json[0]))\n\n for idx, json_value in enumerate(original_json):\n xml_tree = Et.fromstring(json_as_xml)\n cd = xml_tree[idx]\n self.assertEquals(self.catalog[idx][\"title\"], cd.find(\"title\").text)\n self.assertEquals(self.catalog[idx][\"artist\"], cd.find(\"artist\").text)\n self.assertEquals(self.catalog[idx][\"year\"], cd.find(\"year\").text)\n prices = cd.find(\"prices\")\n price_types = [\"rrp\", \"special\"]\n for p_idx, price in enumerate(prices.findall(\"price\")):\n self.assertEquals(self.catalog[idx][\"prices\"][p_idx], price.text)\n self.assertEquals(price_types[p_idx], price.get(\"type\", \"\"))\n\n def test_xml_to_json_to_xml(self):\n \"\"\"\n test conversion from xml to json, and back from json to xml\n :return:\n \"\"\"\n original_xml = deepcopy(self.simple_xml)\n\n # convert xml to json\n xml_as_json = json.loads(xml_to_json(deepcopy(original_xml)))\n\n # convert json back to xml\n json_as_xml = json_to_xml(json.dumps(xml_as_json[0]))\n\n # format result and expected result into dictionaries\n actual = json.loads(json.dumps(xmltodict.parse(json_as_xml)))\n expect = json.loads(json.dumps(xmltodict.parse(original_xml)))\n\n # compare dictionary values\n self.assertDictEqual(actual, expect)\n\n def test_xml_to_json_to_xml__with_multiple(self):\n \"\"\"\n test conversion from xml to json, and back from json to xml with multiple tags\n :return:\n \"\"\"\n original_xml = deepcopy(self.complex_xml)\n\n # convert xml to json\n xml_as_json = json.loads(xml_to_json(deepcopy(original_xml)))\n\n # convert json back to xml\n json_as_xml = json_to_xml(json.dumps(xml_as_json[0]))\n\n # format result and expected result into dictionaries\n result = json.loads(json.dumps(xmltodict.parse(json_as_xml)))\n expects = json.loads(json.dumps(xmltodict.parse(original_xml)))\n\n # compare dictionary values\n self.assertDictEqual(result, expects)\n\n def test_json_to_xml_to_json(self):\n \"\"\"\n test conversion from json to xml, and back from xml to json\n :return:\n \"\"\"\n original_json = deepcopy(self.simple_json)\n\n # convert json to xml\n json_as_xml = json_to_xml(json.dumps(original_json))\n\n # convert xml back to json\n xml_as_json = json.loads(xml_to_json(json_as_xml))\n\n # compare results\n self.assertDictEqual(original_json, xml_as_json[0])\n\n def test_json_to_xml_to_json__with_multiple(self):\n \"\"\"\n test conversion from json to xml, and back from xml to json with multiple tags\n :return:\n \"\"\"\n original_json = deepcopy(self.complex_json)\n\n # convert json to xml\n json_as_xml = json_to_xml(json.dumps(original_json[0]))\n\n # convert xml back to json\n xml_as_json = json.loads(xml_to_json(json_as_xml))\n\n # compare results\n self.assertEquals(len(original_json), len(xml_as_json))\n for idx, d in enumerate(original_json):\n self.assertDictEqual(d, xml_as_json[idx])\n","sub_path":"tests/test_xml_json_converter.py","file_name":"test_xml_json_converter.py","file_ext":"py","file_size_in_byte":7023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"115094169","text":"# @author: Guy Zyskind\n# guy@zyskind.com\n# Created on June 18, 2013\n#\n# DESCRIPTION:\n# Pulls tweet data from Twitter because ToS prevents distributing it directly.\n#\n# This is an updated version of Niek Sanders Corpus Install Script, which is compliant\n# with twitter's new API v1.1. The old API (v1) has been deprecated and no longer works.\n# This version also supports OAuth2, which is now required, but will also significantly\n# improve the running time.\n#\n# Full information and credit - \n# - Niek Sanders\n# njs@sananalytics.com\n# http://www.sananalytics.com/lab/twitter-sentiment/\n#\n# USAGE:\n# 1. Fill in the following parameters (from your twitter's app):\n# CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET.\n# 2. Run the script (Optional: change paths).\n#\n# Twitter currently limits such requests to 900/window (15 minutes).\n# This will require around 1.5 hours for the script to complete.\n#\n##############################################################\n#\n# Updated to be Python 3 compatible and run against the current Twitter API.\n#\n# Tested Environment - Windows 10 Pro + Python 3.6.1 + tweepy 3.5.0\n#\n# - Mark\n#\n##############################################################\n#\n# New Dependency on tweepy - https://github.com/tweepy/tweepy\n# - Install tweepy using pip, as...\n# - pip is the preferred installer program - https://docs.python.org/3/installing/\n# - With Python 3.4, pip is included by default with the Python binary installers.\n# - Command\n# - pip install tweepy\n#\n#\nimport csv, getpass, json, os, time, urllib\n\nimport tweepy\n\nCONSUMER_KEY = 'Your twitter app key'\nCONSUMER_SECRET = 'Your twitter app secret'\nACCESS_TOKEN = 'Your access token key'\nACCESS_TOKEN_SECRET = 'Your access token secret'\n\ndef get_user_params():\n\n user_params = {}\n\n # get user input params\n user_params['inList'] = input( '\\nInput file [./corpus.csv]: ' )\n user_params['outList'] = input( 'Results file [./full-corpus.csv]: ' )\n user_params['rawDir'] = input( 'Raw data dir [./rawdata/]: ' )\n \n # apply defaults\n if user_params['inList'] == '': \n user_params['inList'] = './corpus.csv'\n if user_params['outList'] == '': \n user_params['outList'] = './full-corpus.csv'\n if user_params['rawDir'] == '': \n user_params['rawDir'] = './rawdata/'\n\n return user_params\n\n\ndef dump_user_params( user_params ):\n\n # dump user params for confirmation\n print('Input: ' + user_params['inList'])\n print('Output: ' + user_params['outList'])\n print('Raw data: ' + user_params['rawDir'])\n return\n\n\ndef read_total_list( in_filename ):\n\n # read total fetch list csv\n fp = open( in_filename, 'r', encoding=\"utf-8\" )\n reader = csv.reader( fp, delimiter=',', quotechar='\"' )\n\n total_list = []\n for row in reader:\n total_list.append( row )\n\n return total_list\n\n\ndef purge_already_fetched( fetch_list, raw_dir ):\n\n # list of tweet ids that still need downloading\n rem_list = []\n\n # check each tweet to see if we have it\n for item in fetch_list:\n\n # check if json file exists\n tweet_file = raw_dir + item[2] + '.json'\n if os.path.exists( tweet_file ):\n\n # attempt to parse json file\n try:\n parse_tweet_json( tweet_file )\n print('--> already downloaded #' + item[2])\n except RuntimeError:\n rem_list.append( item )\n else:\n rem_list.append( item )\n\n return rem_list\n\n\ndef get_time_left_str( cur_idx, fetch_list, download_pause ):\n\n tweets_left = len(fetch_list) - cur_idx\n total_seconds = tweets_left * download_pause\n\n str_hr = int( total_seconds / 3600 )\n str_min = int((total_seconds - str_hr*3600) / 60)\n str_sec = total_seconds - str_hr*3600 - str_min*60\n\n return '%dh %dm %ds' % (str_hr, str_min, str_sec)\n\ndef oauth_get_tweet(tweet_id, http_method=\"GET\", post_body='', http_headers=None):\n \n auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n api = tweepy.API(auth)\n \n #print('Fetching tweet for ID %s', tweet_id)\n\n tweet = api.get_status(tweet_id)\n print(\"%s,%s\" % (tweet_id, tweet.text))\n return tweet\n\n\ndef download_tweets( fetch_list, raw_dir ):\n\n # ensure raw data directory exists\n if not os.path.exists( raw_dir ):\n os.mkdir( raw_dir )\n\n # Set rate limit and minus fudge factor of 100\n # https://dev.twitter.com/rest/public/rate-limits\n max_tweets_per_hr = 4 * 900 - 100\n download_pause_sec = 3600.0 / max_tweets_per_hr\n print(\"Tweet Throttle - Max tweets per hour = %d (One every %f seconds)\" % \\\n (max_tweets_per_hr, download_pause_sec))\n\n # download tweets\n for idx in range(0,len(fetch_list)):\n\n # current item\n item = fetch_list[idx]\n\n # print status\n trem = get_time_left_str( idx, fetch_list, download_pause_sec )\n print('--> downloading tweet #%s (%d of %d) (%s left)' % \\\n (item[2], idx+1, len(fetch_list), trem))\n\n # pull data\n try:\n data = oauth_get_tweet(item[2])\n with open(raw_dir + item[2] + '.json', 'w', encoding=\"utf-8\") as outfile:\n json.dump(data._json, outfile, ensure_ascii=False, indent=2)\n except tweepy.TweepError as te:\n print(\"Failed to get tweet ID %s: %s\" % (item[2], te))\n # traceback.print_exc(file=sys.stderr)\n pass\n \n # stay in Twitter API rate limits \n print(' pausing %f seconds to obey Twitter API rate limits' % \\\n (download_pause_sec))\n time.sleep( download_pause_sec )\n\n return\n\n\ndef parse_tweet_json( filename ):\n \n # read tweet\n print('opening: ' + filename)\n\t\n # parse json \n with open( filename, 'rt', encoding=\"utf-8\" ) as infile:\n try:\n tweet_json = json.load( infile )\n except ValueError as e:\n raise RuntimeError(\"Error parsing json - %s\" % str(e))\n except json.JSONDecodeError as e:\n raise RuntimeError(\"Error parsing json - %s\" % str(e))\n\n # look for twitter api error msgs\n if 'errors' in tweet_json:\n raise RuntimeError('error in downloaded tweet')\n\n # extract creation date and tweet text\n return [ tweet_json['created_at'], tweet_json['text'] ]\n\n\ndef build_output_corpus( out_filename, raw_dir, total_list ):\n\n # open csv output file\n fp = open( out_filename, 'w', newline='', encoding=\"utf-8\" )\n writer = csv.writer( fp, delimiter=',', quotechar='\"', escapechar='\\\\', \n quoting=csv.QUOTE_ALL )\n\n # write header row\n writer.writerow( ['Topic','Sentiment','TweetId','TweetDate','TweetText'] )\n\n # parse all downloaded tweets\n missing_count = 0\n for item in total_list:\n\n # ensure tweet exists\n if os.path.exists( raw_dir + item[2] + '.json' ):\n\n try: \n # parse tweet\n parsed_tweet = parse_tweet_json( raw_dir + item[2] + '.json' )\n full_row = item + parsed_tweet\n \n # write csv row\n writer.writerow( full_row )\n\n except RuntimeError:\n print('--> bad data in tweet #' + item[2])\n missing_count += 1\n\n else:\n print('--> missing tweet #' + item[2])\n missing_count += 1\n\n # indicate success\n if missing_count == 0:\n print('\\nSuccessfully downloaded corpus!')\n print('Output in: ' + out_filename + '\\n')\n else: \n print('\\nMissing %d of %d tweets!' % (missing_count, len(total_list)))\n print('Partial output in: ' + out_filename + '\\n')\n\n return\n\n\ndef main():\n\n # get user parameters\n user_params = get_user_params()\n dump_user_params( user_params )\n\n # get fetch list\n total_list = read_total_list( user_params['inList'] )\n fetch_list = purge_already_fetched( total_list, user_params['rawDir'] )\n\n # start fetching data from twitter\n download_tweets( fetch_list, user_params['rawDir'] )\n\n # second pass for any failed downloads\n print('\\nStarting second pass to retry any failed downloads')\n fetch_list = purge_already_fetched( total_list, user_params['rawDir'] )\n download_tweets( fetch_list, user_params['rawDir'] )\n\n # build output corpus\n build_output_corpus( user_params['outList'], user_params['rawDir'], \n total_list )\n\n return\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":8560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"301171039","text":"from django.db import models\nfrom django.contrib.postgres.fields import JSONField\nfrom django.contrib.auth import get_user_model\n\n\n# Suggestions (needs refined algorithm):\n# Perform aggregate groupings on User search history by ingredients\n# Find most commonly searched ingredients\n# Get ingredients and cuisine category from a random saved bookmark\n# Generate new search suggestion from combined data\n\n\nclass History(models.Model):\n \"\"\"Log user searches from application to provide caching and\n adaptive suggestions.\"\"\"\n\n user = models.ForeignKey(\n get_user_model(),\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n related_name='searches',\n related_query_name='search'\n )\n ingredients = JSONField(default=dict)\n filters = JSONField(default=dict)\n request_url = models.CharField(max_length=200, default='')\n submitted_at = models.DateTimeField(auto_now_add=True)\n\n\nclass Bookmarks(models.Model):\n \"\"\"Save recipe search results for future use.\"\"\"\n\n user = models.ForeignKey(\n get_user_model(),\n null=False,\n blank=False,\n on_delete=models.CASCADE,\n related_name='bookmarks',\n related_query_name='bookmark'\n )\n url = models.CharField(max_length=400)\n created_at = models.DateTimeField(auto_now_add=True)\n","sub_path":"recipe-api/search/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"537519557","text":"import numpy as np\nimport pylab as pl\n\n\nKappa_In = np.genfromtxt('Plotters/Output/Average_Size_KappaEst_in_RA_Dec_Grid.dat')\nKappa = Kappa_In[1:Kappa_In.shape[0]-1, 1:Kappa_In.shape[1]-1]\n\nKappaErr_In = np.genfromtxt('Plotters/Output/Average_Size_KappaError_in_RA_Dec_Grid.dat')\nKappaErr = KappaErr_In[1:KappaErr_In.shape[0]-1, 1:KappaErr_In.shape[1]-1]\n\nRatioK = np.zeros(Kappa.shape)\n\nfor i in range(1,Kappa.shape[0]-1):\n for j in range(1,Kappa.shape[1]-1): \n if(KappaErr[i,j] != 0.):\n RatioK[i,j] = Kappa[i,j]/KappaErr[i,j]\n else:\n RatioK[i,j] = 0.\n\nf = pl.figure()\nax = f.add_subplot(1,1,1)\nim = ax.imshow(RatioK)\nf.colorbar(im)\n\nax.set_title(r'$\\kappa_{Size}/\\sigma_{\\kappa}$')\npl.show()\n\n\nSize_In = np.genfromtxt('Plotters/Output/Average_Size_in_RA_Dec_Grid.dat')\nSize = Size_In[1:Size_In.shape[0]-1, 1:Size_In.shape[1]-1]\n\nSizeErr_In = np.genfromtxt('Plotters/Output/Average_Size_Error_in_RA_Dec_Grid.dat')\nSizeErr = SizeErr_In[1:SizeErr_In.shape[0]-1, 1:SizeErr_In.shape[1]-1]\n\nRatio = np.zeros(Size.shape)\n\nfor i in range(1,Size.shape[0]-1):\n for j in range(1,Size.shape[1]-1): \n if(SizeErr[i,j] != 0.):\n Ratio[i,j] = Size[i,j]/SizeErr[i,j]\n else:\n Ratio[i,j] = 0.\n\nf = pl.figure()\nax = f.add_subplot(1,1,1)\nim = ax.imshow(Ratio)\nf.colorbar(im)\n\nax.set_title(r'$R/\\sigma_{R}$')\npl.show()\n\n\nRatioErr = np.zeros(SizeErr.shape)\nfor i in range(1,Size.shape[0]-1):\n for j in range(1,Size.shape[1]-1): \n if(SizeErr[i,j] != 0.):\n RatioErr[i,j] = KappaErr[i,j]/SizeErr[i,j]\n else:\n RatioErr[i,j] = 0.\n\nf = pl.figure()\nax = f.add_subplot(1,1,1)\nim = ax.imshow(RatioErr)\nf.colorbar(im)\n\nax.set_title(r'$\\sigma_{\\kappa}/\\sigma_{R}$')\npl.show()\n\n","sub_path":"Size_Err_Ratio.py","file_name":"Size_Err_Ratio.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"257525092","text":"Lista1 = []\n\ncontador = 0\nvalores = 0\nfor item in range(1, 10 +1):\n item = int(input(\" Insira os numeros que deseja: \"))\n if item != -1:\n Lista1.append(item)\n contador = contador + 1\n valores = valores + item\n print(Lista1)\n if contador == 10:\n conta1 = valores / 10\n print (\"conta1 : \", conta1)\n else:\n conta = valores / contador\n print (conta)\n break","sub_path":"FichasPraticas/Ficha6.ex2.py","file_name":"Ficha6.ex2.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"509080585","text":"import re\n\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.web import http\nfrom twisted.web.resource import Resource\n\nfrom vumi.transports.tests.test_base import TransportTestCase\nfrom vumi.tests.utils import (get_stubbed_worker, TestResourceWorker,\n RegexMatcher, UTCNearNow)\nfrom vumi.utils import http_request_full\nfrom vumi.message import TransportMessage, TransportEvent, TransportUserMessage\n\nfrom tests.utils import MessageMaker\nfrom transports import MobivateHttpTransport\n\nclass MobivateHttpTransportTestCase(MessageMaker, TransportTestCase):\n \n transport_name = 'mobivate'\n transport_type = 'sms'\n transport_class = MobivateHttpTransport\n \n @inlineCallbacks\n def setUp(self):\n yield super(MobivateHttpTransportTestCase, self).setUp()\n self.send_path = '/sendsms'\n self.send_port = 9999\n self.config ={\n 'transport_name': self.transport_name,\n 'url': 'http://localhost:%s%s' % (self.send_port, self.send_path),\n 'user_name': 'username',\n 'password': 'password',\n 'default_origin': '55882',\n 'receive_path': '/mobivate',\n 'receive_port': 9998}\n self.worker = yield self.get_transport(self.config)\n \n def make_resource_worker(self, response, code=http.OK, send_id=None):\n w = get_stubbed_worker(TestResourceWorker, {})\n w.set_resources([\n (self.send_path, TestResource, ( response, code, send_id))])\n self._workers.append(w)\n return w.startWorker()\n\n def get_dispatched(self, rkey):\n return self._amqp.get_dispatched('vumi', rkey)\n \n @inlineCallbacks\n def test_sending_one_sms_ok(self):\n mocked_message = \"0\"\n yield self.make_resource_worker(mocked_message)\n yield self.dispatch(self.mkmsg_out())\n [smsg] = self.get_dispatched('mobivate.event')\n self.assertEqual(\n self.mkmsg_ack(\n user_message_id='1',\n sent_message_id='1'),\n TransportMessage.from_json(smsg.body))\n\n @inlineCallbacks\n def test_sending_one_sms_fail(self):\n mocked_message = \"500\\nSome internal issue\"\n yield self.make_resource_worker(mocked_message)\n yield self.dispatch(self.mkmsg_out(to_addr=\"256\"))\n [smsg] = self.get_dispatched('mobivate.event')\n self.assertEqual(\n self.mkmsg_delivery(\n transport_name=self.transport_name,\n delivery_status='failed',\n failure_level='service',\n failure_code=\"500\",\n failure_reason=\"Some internal issue\",\n user_message_id='1',\n sent_message_id='1'),\n TransportMessage.from_json(smsg.body))\n\n @inlineCallbacks\n def test_receiving_sms(self):\n params = (\"ORIGINATOR=61412345678&RECIPIENT=1987654&PROVIDER=telstra\"\n \"&MESSAGE_TEXT=Hello%20There!\")\n url = (\"http://localhost:%s%s/SMSfromMobiles?%s\" %\n (self.config['receive_port'], self.config['receive_path'], params))\n\n response = yield http_request_full(url, method='GET')\n self.assertEqual(response.code, http.OK)\n self.assertEqual(response.delivered_body, '0')\n \n [smsg] = self.get_dispatched('mobivate.inbound')\n sms_in = TransportMessage.from_json(smsg.body)\n self.assertEqual(self.transport_name, sms_in['transport_name'])\n self.assertEqual(\"Hello There!\", sms_in['content'])\n self.assertEqual(\"61412345678\", sms_in['from_addr'])\n self.assertEqual(\"1987654\", sms_in['to_addr'])\n\n @inlineCallbacks\n def test_receiving_delivery_report(self):\n params = (\"ORIGINATOR=61412345678&RECIPIENT=1987654&PROVIDER=telstra\"\n \"&MESSAGE_TEXT=Hello%20There!&ID=939ec52e333fbf124a87845d3a5d72e1\"\n \"&REFERENCE=ABC123&RESULT=1\")\n url = (\"http://localhost:%s%s/DeliveryReciept?%s\" %\n (self.config['receive_port'], self.config['receive_path'], params))\n\n response = yield http_request_full(url, method='GET')\n self.assertEqual(response.code, http.OK)\n self.assertEqual(response.delivered_body, '0') \n \n [smsg] = self.get_dispatched('mobivate.event')\n sms_delivery = TransportMessage.from_json(smsg.body) \n self.assertEqual(\n self.mkmsg_delivery(\n transport_name=self.transport_name,\n delivery_status='delivered',\n user_message_id='ABC123'),\n sms_delivery)\n\n\nclass TestResource(Resource):\n isLeaf = True\n \n def __init__(self, response, code=http.OK, send_id=None):\n self.response = response\n self.code = code\n self.send_id = send_id\n \n def render_GET(self, request):\n regex = re.compile('^(\\+|00|0)[0-9]*')\n request.setResponseCode(self.code)\n if (not ('RECIPIENT' in request.args) or\n regex.match(request.args['RECIPIENT'][0]) or\n not ('ORIGINATOR' in request.args) or\n not ('USER_NAME' in request.args) or\n not ('PASSWORD' in request.args) or\n not ('MESSAGE_TEXT' in request.args) or\n not ('REFERENCE' in request.args) or\n (self.send_id is not None and self.send_id != request.args['originator'][0])):\n return \"501\"\n else:\n return self.response\n","sub_path":"transports/tests/test_mobivate_http.py","file_name":"test_mobivate_http.py","file_ext":"py","file_size_in_byte":5503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"364029014","text":"from neuroglancer.AlignmentScore import AlignmentScore\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom django_plotly_dash import DjangoDash\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\nalignmentPlot = DjangoDash('AlignmentPlot',\n external_stylesheets=external_stylesheets)\n\nalignmentPlot.layout = html.Div(children=[\n dcc.Graph(id='plot'),\n html.Label('Select plot type'),\n dcc.RadioItems(id='plottype',\n options=[\n {'label': 'scatter plot', 'value': 'scatter'},\n {'label': u'box plot', 'value': 'box_plot'},\n ],\n value='scatter'\n ),\n])\n\n\n@alignmentPlot.expanded_callback(\n Output('plot', 'figure'),\n [Input('plottype', 'value')])\ndef update_figure(figure_type):\n align_score = AlignmentScore()\n fig = align_score.get(figure_type)\n return fig\n","sub_path":"neuroglancer/com_score_app.py","file_name":"com_score_app.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"280086488","text":"from selenium import webdriver\r\nimport sys\r\n\r\ndriver = webdriver.Chrome()\r\nexecutor_url = driver.command_executor._url\r\nsession_id = driver.session_id\r\npidfile=\"chrome.pid\"\r\ntarget_url = sys.argv[1] \r\n\r\ndriver.get(target_url)\r\n\r\nprint(session_id)\r\nprint(executor_url)\r\nwith open(pidfile, 'wb') as the_file:\r\n the_file.write(executor_url + '\\n')\r\n the_file.write(session_id)\r\n\r\ndef create_driver_session(session_id, executor_url):\r\n from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver\r\n\r\n # Save the original function, so we can revert our patch\r\n org_command_execute = RemoteWebDriver.execute\r\n\r\n def new_command_execute(self, command, params=None):\r\n if command == \"newSession\":\r\n # Mock the response\r\n return {'success': 0, 'value': None, 'sessionId': session_id}\r\n else:\r\n return org_command_execute(self, command, params)\r\n\r\n # Patch the function before creating the driver object\r\n RemoteWebDriver.execute = new_command_execute\r\n\r\n new_driver = webdriver.Remote(command_executor=executor_url, desired_capabilities={})\r\n new_driver.session_id = session_id\r\n\r\n # Replace the patched function with original function\r\n RemoteWebDriver.execute = org_command_execute\r\n\r\n return new_driver\r\n\r\ndriver2 = create_driver_session(session_id, executor_url)\r\nprint(driver2.current_url)\r\n","sub_path":"create_new_session.py","file_name":"create_new_session.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"373400857","text":"from django.shortcuts import render\nfrom django.http import HttpResponse \nfrom django.views.decorators.csrf import csrf_exempt \nfrom rest_framework.renderers import JSONRenderer \nfrom rest_framework.parsers import JSONParser \nfrom rest_framework import status \nfrom WhiteMarket.apps.products.models import Product \nfrom WhiteMarket.apps.products.serializers import ProductSerializer \nfrom django.contrib.gis.geoip2 import GeoIP2\n\n# Create your views here.\nclass JSONResponse(HttpResponse): \n def __init__(self, data, **kwargs): \n content = JSONRenderer().render(data) \n kwargs['content_type'] = 'application/json' \n super(JSONResponse, self).__init__(content, **kwargs) \n \n#Get Products === List Products\n#Post Products === Create Product\n@csrf_exempt \ndef product_list(request): \n if request.method == 'GET': \n products = Product.objects.all() \n products_serializer = ProductSerializer(products, many=True)\n return JSONResponse(products_serializer.data) \n \n elif request.method == 'POST': \n product_data = JSONParser().parse(request) \n product_serializer = ProductSerializer(data=product_data) \n if product_serializer.is_valid(): \n product_serializer.save() \n return JSONResponse(product_serializer.data, \\\n status=status.HTTP_201_CREATED) \n return JSONResponse(product_serializer.errors, \\\n status=status.HTTP_400_BAD_REQUEST) \n\ndef get_client_ip(request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n return ip\n '''\n g = GeoIP2()\n return JSONResponse(g.city(get_client_ip(request)))\n '''","sub_path":"server/WhiteMarket/WhiteMarket/apps/products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"187831214","text":"import mock\nimport threading\n\nfrom chalice.cli import reloader\n\n\nDEFAULT_DELAY = 0.1\nMAX_TIMEOUT = 5.0\n\n\ndef modify_file_after_n_seconds(filename, contents, delay=DEFAULT_DELAY):\n t = threading.Timer(delay, function=modify_file, args=(filename, contents))\n t.daemon = True\n t.start()\n\n\ndef modify_file(filename, contents):\n if filename is None:\n return\n with open(filename, 'w') as f:\n f.write(contents)\n\n\ndef assert_reload_happens(root_dir, when_modified_file):\n http_thread = mock.Mock(spec=reloader.HTTPServerThread)\n p = reloader.WorkerProcess(http_thread)\n modify_file_after_n_seconds(when_modified_file, 'contents')\n rc = p.main(root_dir, MAX_TIMEOUT)\n assert rc == reloader.RESTART_REQUEST_RC\n\n\ndef test_can_reload_when_file_created(tmpdir):\n top_level_file = str(tmpdir.join('foo'))\n assert_reload_happens(str(tmpdir), when_modified_file=top_level_file)\n\n\ndef test_can_reload_when_subdir_file_created(tmpdir):\n subdir_file = str(tmpdir.join('subdir').mkdir().join('foo.txt'))\n assert_reload_happens(str(tmpdir), when_modified_file=subdir_file)\n\n\ndef test_rc_0_when_no_file_modified(tmpdir):\n http_thread = mock.Mock(spec=reloader.HTTPServerThread)\n p = reloader.WorkerProcess(http_thread)\n rc = p.main(str(tmpdir), timeout=0.2)\n assert rc == 0\n","sub_path":"tests/functional/cli/test_reloader.py","file_name":"test_reloader.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"473756567","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\b3\\parsers\\cod.py\n# Compiled at: 2016-03-08 18:42:09\n__author__ = 'ThorN, xlr8or'\n__version__ = '1.5.3'\nimport b3, b3.events, b3.parsers.punkbuster, re, string\nfrom b3.parsers.q3a.abstractParser import AbstractParser\nfrom threading import Timer\n\nclass CodParser(AbstractParser):\n gameName = 'cod'\n PunkBuster = None\n IpsOnly = False\n _guidLength = 6\n _reMap = re.compile('map ([a-z0-9_-]+)', re.IGNORECASE)\n _pbRegExp = re.compile('^[0-9a-f]{32}$', re.IGNORECASE)\n _logSync = 3\n _counter = {}\n _line_length = 65\n _line_color_prefix = ''\n _commands = {'message': 'tell %(cid)s %(message)s', \n 'say': 'say %(message)s', \n 'set': 'set %(name)s \"%(value)s\"', \n 'kick': 'clientkick %(cid)s', \n 'ban': 'banclient %(cid)s', \n 'unban': 'unbanuser %(name)s', \n 'tempban': 'clientkick %(cid)s'}\n _eventMap = {}\n _lineClear = re.compile('^(?:[0-9:]+\\\\s?)?')\n _lineFormats = (\n re.compile('^(?P[a-z]+):\\\\s?(?P.*)$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9-]{1,2});(?P[a-z]+);(?P[^;]+);(?P[^;]*);(?P-1);(?Pworld);(?P[^;]*);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]*);(?P-1);(?P[a-z]*);(?P[^;]+);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]*);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[a-z0-9_-]+);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]*);(?P[^;]+);(?P[^;]*);(?P[0-9]{1,2});(?Pworld);(?P[a-z]*);(?Pnone);(?P[0-9.]+);(?P[A-Z_]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]+);(?P[^;]+);(?P[a-z_]+))$', re.IGNORECASE),\n re.compile('^(?PJT);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[a-z]+);(?P[^;]+);)$', re.IGNORECASE),\n re.compile('^(?P[a-z]+);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+);(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+);(?P.*))$', re.IGNORECASE),\n re.compile('^(?P[a-z]+);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+);(?P.*))$', re.IGNORECASE),\n re.compile('^(?P[A-Z]);(?P(?P[^;]+);(?P[0-9]{1,2});(?P[^;]+))$', re.IGNORECASE))\n _regPlayer = re.compile('^\\\\s*(?P[0-9]+)\\\\s+(?P[0-9-]+)\\\\s+(?P[0-9]+)\\\\s+(?P[0-9]+)\\\\s+(?P.*?)\\\\s+(?P[0-9]+?)\\\\s*(?P(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\\\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])):?(?P-?[0-9]{1,5})\\\\s*(?P-?[0-9]{1,5})\\\\s+(?P[0-9]+)$', re.IGNORECASE | re.VERBOSE)\n\n def startup(self):\n \"\"\"\n Called after the parser is created before run().\n \"\"\"\n if not self.config.has_option('server', 'game_log'):\n self.critical(\"Your main config file is missing the 'game_log' setting in section 'server'\")\n raise SystemExit(220)\n if self.IpsOnly:\n self.debug('Authentication method: Using IP instead of GUID!')\n self.clients.newClient('-1', guid='WORLD', name='World', hide=True, pbid='WORLD')\n if not self.config.has_option('server', 'punkbuster') or self.config.getboolean('server', 'punkbuster'):\n result = self.write('PB_SV_Ver')\n if result != '' and result[:7] != 'Unknown':\n self.info('punkbuster active: %s' % result)\n self.PunkBuster = b3.parsers.punkbuster.PunkBuster(self)\n else:\n self.warning('Punkbuster test failed: check your game server setup and B3 config!')\n self.debug('Disabling punkbuster support!')\n self._eventMap['warmup'] = self.getEventID('EVT_GAME_WARMUP')\n self._eventMap['restartgame'] = self.getEventID('EVT_GAME_ROUND_END')\n mapname = self.getMap()\n if mapname:\n self.game.mapName = mapname\n self.info('map is: %s' % self.game.mapName)\n self.debug('Forcing server cvar g_logsync to %s' % self._logSync)\n self.setCvar('g_logsync', self._logSync)\n try:\n self.game.fs_game = self.getCvar('fs_game').getString()\n except:\n self.game.fs_game = None\n self.warning('Could not query server for fs_game')\n\n try:\n self.game.fs_basepath = self.getCvar('fs_basepath').getString().rstrip('/')\n self.debug('fs_basepath: %s' % self.game.fs_basepath)\n except:\n self.game.fs_basepath = None\n self.warning('could not query server for fs_basepath')\n\n try:\n self.game.fs_homepath = self.getCvar('fs_homepath').getString().rstrip('/')\n self.debug('fs_homepath: %s' % self.game.fs_homepath)\n except:\n self.game.fs_homepath = None\n self.warning('could not query server for fs_homepath')\n\n try:\n self.game.shortversion = self.getCvar('shortversion').getString()\n self.debug('shortversion: %s' % self.game.shortversion)\n except:\n self.game.shortversion = None\n self.warning('Could not query server for shortversion')\n\n self.setVersionExceptions()\n self.debug('Parser started')\n return\n\n def OnK(self, action, data, match=None):\n victim = self.getClient(victim=match)\n if not victim:\n self.debug('No victim')\n self.OnJ(action, data, match)\n return None\n else:\n attacker = self.getClient(attacker=match)\n if not attacker:\n self.debug('No attacker')\n return None\n attacker.team = self.getTeam(match.group('ateam'))\n attacker.name = match.group('aname')\n victim.team = self.getTeam(match.group('team'))\n victim.name = match.group('name')\n event_key = 'EVT_CLIENT_KILL'\n if attacker.cid == victim.cid:\n event_key = 'EVT_CLIENT_SUICIDE'\n elif attacker.team != b3.TEAM_UNKNOWN and attacker.team == victim.team:\n event_key = 'EVT_CLIENT_KILL_TEAM'\n victim.state = b3.STATE_DEAD\n data = (float(match.group('damage')), match.group('aweap'), match.group('dlocation'), match.group('dtype'))\n return self.getEvent(event_key, data=data, client=attacker, target=victim)\n\n def OnD(self, action, data, match=None):\n victim = self.getClient(victim=match)\n if not victim:\n self.debug('No victim - attempt join')\n self.OnJ(action, data, match)\n return None\n else:\n attacker = self.getClient(attacker=match)\n if not attacker:\n self.debug('No attacker')\n return None\n attacker.team = self.getTeam(match.group('ateam'))\n attacker.name = match.group('aname')\n victim.team = self.getTeam(match.group('team'))\n victim.name = match.group('name')\n eventkey = 'EVT_CLIENT_DAMAGE'\n if attacker.cid == victim.cid:\n eventkey = 'EVT_CLIENT_DAMAGE_SELF'\n elif attacker.team != b3.TEAM_UNKNOWN and attacker.team == victim.team:\n eventkey = 'EVT_CLIENT_DAMAGE_TEAM'\n data = (float(match.group('damage')), match.group('aweap'), match.group('dlocation'), match.group('dtype'))\n return self.getEvent(eventkey, data=data, client=attacker, target=victim)\n\n def OnQ(self, action, data, match=None):\n client = self.getClient(match)\n if client:\n client.disconnect()\n elif match.group('cid') in self._counter:\n cid = match.group('cid')\n self._counter[cid] = 'Disconnected'\n self.debug('Slot %s has disconnected or was forwarded to our http download location: removing from authentication queue...' % cid)\n return\n\n def OnJ(self, action, data, match=None):\n codguid = match.group('guid')\n cid = match.group('cid')\n name = match.group('name')\n if len(codguid) < self._guidLength:\n self.verbose2('Invalid GUID: %s. GUID length set to %s' % (codguid, self._guidLength))\n codguid = None\n client = self.getClient(match)\n if client:\n self.verbose2('Client object already exists')\n if not self.PunkBuster:\n if self.IpsOnly:\n if name != client.name:\n self.debug('This is not the correct client (%s <> %s): disconnecting..' % (name, client.name))\n client.disconnect()\n return\n self.verbose2('client.name in sync: %s == %s' % (name, client.name))\n else:\n if codguid != client.guid:\n self.debug('This is not the correct client (%s <> %s): disconnecting...' % (codguid, client.guid))\n client.disconnect()\n return\n self.verbose2('client.guid in sync: %s == %s' % (codguid, client.guid))\n client.state = b3.STATE_ALIVE\n client.name = name\n return self.getEvent('EVT_CLIENT_JOIN', client=client)\n else:\n if self._counter.get(cid) and self._counter.get(cid) != 'Disconnected':\n self.verbose('cid: %s already in authentication queue: aborting join' % cid)\n return\n self._counter[cid] = 1\n t = Timer(2, self.newPlayer, (cid, codguid, name))\n t.start()\n self.debug('%s connected: waiting for authentication...' % name)\n self.debug('Our authentication queue: %s' % self._counter)\n return\n\n def OnA(self, action, data, match=None):\n client = self.getClient(match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n client.name = match.group('name')\n actiontype = match.group('type')\n self.verbose('On action: %s: %s' % (client.name, actiontype))\n return self.getEvent('EVT_CLIENT_ACTION', data=actiontype, client=client)\n\n def OnSay(self, action, data, match=None):\n client = self.getClient(match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n data = match.group('text')\n if data and ord(data[:1]) == 21:\n data = data[1:]\n if self.encoding:\n try:\n data = data.decode(self.encoding)\n except Exception as msg:\n self.warning('ERROR: decoding data: %r', msg)\n\n if client.name != match.group('name'):\n client.name = match.group('name')\n return self.getEvent('EVT_CLIENT_SAY', data=data, client=client)\n\n def OnSayteam(self, action, data, match=None):\n client = self.getClient(match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n data = match.group('text')\n if data and ord(data[:1]) == 21:\n data = data[1:]\n if self.encoding:\n try:\n data = data.decode(self.encoding)\n except Exception as msg:\n self.warning('ERROR: decoding data: %r', msg)\n\n if client.name != match.group('name'):\n client.name = match.group('name')\n return self.getEvent('EVT_CLIENT_TEAM_SAY', data=data, client=client)\n\n def OnTell(self, action, data, match=None):\n client = self.getClient(match)\n tclient = self.getClient(attacker=match)\n if not client:\n self.debug('No client - attempt join')\n self.OnJ(action, data, match)\n client = self.getClient(match)\n if not client:\n return None\n data = match.group('text')\n if data and ord(data[:1]) == 21:\n data = data[1:]\n if self.encoding:\n try:\n data = data.decode(self.encoding)\n except Exception as msg:\n self.warning('ERROR: decoding data: %r', msg)\n\n client.name = match.group('name')\n return self.getEvent('EVT_CLIENT_PRIVATE_SAY', data=data, client=client, target=tclient)\n\n def OnInitgame(self, action, data, match=None):\n options = re.findall('\\\\\\\\([^\\\\\\\\]+)\\\\\\\\([^\\\\\\\\]+)', data)\n for o in options:\n if o[0] == 'mapname':\n self.game.mapName = o[1]\n elif o[0] == 'g_gametype':\n self.game.gameType = o[1]\n elif o[0] == 'fs_game':\n self.game.modName = o[1]\n else:\n setattr(self.game, o[0], o[1])\n\n self.verbose('...self.console.game.gameType: %s' % self.game.gameType)\n self.game.startRound()\n return self.getEvent('EVT_GAME_ROUND_START', data=self.game)\n\n def OnExitlevel(self, action, data, match=None):\n t = Timer(60, self.clients.sync)\n t.start()\n self.game.mapEnd()\n return self.getEvent('EVT_GAME_EXIT', data=data)\n\n def OnItem(self, action, data, match=None):\n guid, cid, name, item = string.split(data, ';', 3)\n client = self.clients.getByCID(cid)\n if client:\n return self.getEvent('EVT_CLIENT_ITEM_PICKUP', data=item, client=client)\n else:\n return\n\n def setVersionExceptions(self):\n \"\"\"\n Dummy to enable shortversionexceptions for cod2.\n Use this function in inheriting parsers to override certain vars based on ie. shortversion.\n \"\"\"\n pass\n\n def getTeam(self, team):\n \"\"\"\n Return a B3 team given the team value.\n :param team: The team value\n \"\"\"\n if team == 'allies':\n return b3.TEAM_BLUE\n else:\n if team == 'axis':\n return b3.TEAM_RED\n return b3.TEAM_UNKNOWN\n\n def connectClient(self, ccid):\n \"\"\"\n Return the client matchign the given slot number.\n :param ccid: The client slot number\n \"\"\"\n players = self.getPlayerList()\n self.verbose('connectClient() = %s' % players)\n for cid, p in players.iteritems():\n if int(cid) == int(ccid):\n self.debug('%s found in status/playerList' % p['name'])\n return p\n\n def newPlayer(self, cid, codguid, name):\n \"\"\"\n Build a new client using data in the authentication queue.\n :param cid: The client slot number\n :param codguid: The client GUID\n :param name: The client name\n \"\"\"\n if not self._counter.get(cid):\n self.verbose('newPlayer thread no longer needed: key no longer available')\n return\n else:\n if self._counter.get(cid) == 'Disconnected':\n self.debug('%s disconnected: removing from authentication queue' % name)\n self._counter.pop(cid)\n return\n self.debug('newClient: %s, %s, %s' % (cid, codguid, name))\n sp = self.connectClient(cid)\n if sp and self.PunkBuster:\n self.debug('sp: %s' % sp)\n if not re.match(self._pbRegExp, sp['pbid']):\n self.debug('PB-id is not valid: giving it another try')\n self._counter[cid] += 1\n t = Timer(4, self.newPlayer, (cid, codguid, name))\n t.start()\n return\n if self.IpsOnly:\n guid = sp['ip']\n pbid = sp['pbid']\n else:\n guid = sp['pbid']\n pbid = guid\n ip = sp['ip']\n if self._counter.get(cid):\n self._counter.pop(cid)\n else:\n return\n elif sp:\n if self.IpsOnly:\n codguid = sp['ip']\n if not codguid:\n self.warning('Missing or wrong CodGuid and PunkBuster is disabled: cannot authenticate!')\n if self._counter.get(cid):\n self._counter.pop(cid)\n return\n guid = codguid\n pbid = ''\n ip = sp['ip']\n if self._counter.get(cid):\n self._counter.pop(cid)\n else:\n return\n else:\n if self._counter.get(cid) > 10:\n self.debug('Could not auth %s: giving up...' % name)\n if self._counter.get(cid):\n self._counter.pop(cid)\n return\n if self._counter.get(cid):\n self.debug('%s not yet fully connected: retrying...#:%s' % (name, self._counter.get(cid)))\n self._counter[cid] += 1\n t = Timer(4, self.newPlayer, (cid, codguid, name))\n t.start()\n else:\n self.warning('All authentication attempts failed')\n return\n client = self.clients.newClient(cid, name=name, ip=ip, state=b3.STATE_ALIVE, guid=guid, pbid=pbid, data={'codguid': codguid})\n self.queueEvent(self.getEvent('EVT_CLIENT_JOIN', client=client))\n return\n\n def unban(self, client, reason='', admin=None, silent=False, *kwargs):\n \"\"\"\n Unban a client.\n :param client: The client to unban\n :param reason: The reason for the unban\n :param admin: The admin who unbanned this client\n :param silent: Whether or not to announce this unban\n \"\"\"\n if self.PunkBuster:\n if client.pbid:\n result = self.PunkBuster.unBanGUID(client)\n if result:\n admin.message('^3Unbanned^7: %s^7: %s' % (client.exactName, result))\n if admin:\n variables = self.getMessageVariables(client=client, reason=reason, admin=admin)\n fullreason = self.getMessage('unbanned_by', variables)\n else:\n variables = self.getMessageVariables(client=client, reason=reason)\n fullreason = self.getMessage('unbanned', variables)\n if not silent and fullreason != '':\n self.say(fullreason)\n elif admin:\n admin.message('%s ^7unbanned but has no punkbuster id' % client.exactName)\n else:\n name = self.stripColors(client.exactName)\n result = self.write(self.getCommand('unban', name=name, reason=reason))\n if admin:\n admin.message(result)\n\n def getMaps(self):\n \"\"\"\n Return the available maps/levels name\n \"\"\"\n maps = self.getCvar('sv_mapRotation')\n nmaps = []\n if maps:\n maps = re.findall(self._reMap, maps[0])\n for m in maps:\n if m[:3] == 'mp_':\n m = m[3:]\n nmaps.append(m.title())\n\n return nmaps\n\n def getNextMap(self):\n \"\"\"\n Return the next map/level name to be played.\n \"\"\"\n if not self.game.mapName:\n return\n else:\n maps = self.getCvar('sv_mapRotation')\n if maps:\n maps = re.findall(self._reMap, maps[0])\n gmap = self.game.mapName.strip().lower()\n found = False\n nmap = ''\n for nmap in maps:\n nmap = nmap.strip().lower()\n if found:\n found = nmap\n break\n elif nmap == gmap:\n found = True\n\n if found is True:\n nmap = maps[0].strip().lower()\n if found:\n if nmap[:3] == 'mp_':\n nmap = nmap[3:]\n return nmap.title()\n return\n return\n return\n\n def sync(self):\n \"\"\"\n For all connected players returned by self.get_player_list(), get the matching Client\n object from self.clients (with self.clients.get_by_cid(cid) or similar methods) and\n look for inconsistencies. If required call the client.disconnect() method to remove\n a client from self.clients.\n \"\"\"\n self.debug('synchronising clients...')\n plist = self.getPlayerList(maxRetries=4)\n mlist = {}\n for cid, c in plist.iteritems():\n client = self.clients.getByCID(cid)\n if client:\n if client.guid and 'guid' in c and not self.IpsOnly:\n if client.guid == c['guid']:\n self.debug('in-sync %s == %s', client.guid, c['guid'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.guid, c['guid'])\n client.disconnect()\n elif client.ip and 'ip' in c:\n if client.ip == c['ip']:\n self.debug('in-sync %s == %s', client.ip, c['ip'])\n mlist[str(cid)] = client\n else:\n self.debug('no-sync %s <> %s', client.ip, c['ip'])\n client.disconnect()\n else:\n self.debug('no-sync: no guid or ip found')\n\n return mlist\n\n def authorizeClients(self):\n \"\"\"\n For all connected players, fill the client object with properties allowing to find\n the user in the database (usualy guid, or punkbuster id, ip) and call the\n Client.auth() method.\n \"\"\"\n players = self.getPlayerList(maxRetries=4)\n self.verbose('authorizeClients() = %s' % players)\n for cid, p in players.iteritems():\n sp = self.clients.getByCID(cid)\n if sp:\n sp.ip = p.get('ip', sp.ip)\n sp.pbid = p.get('pbid', sp.pbid)\n if self.IpsOnly:\n sp.guid = p.get('ip', sp.guid)\n else:\n sp.guid = p.get('guid', sp.guid)\n sp.data = p\n sp.auth()","sub_path":"pycfiles/b3-1.10.10-py2.7/cod.py","file_name":"cod.py","file_ext":"py","file_size_in_byte":23655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"0"}
+{"seq_id":"408787009","text":"'''Un alumno desea saber cual será su promedio general en las tres materias mas difíciles\nque cursa y cual será el promedio que obtendrá en cada una de ellas. Estas materias se\nevalúan como se muestra a continuación:\nLa calificación de Matemáticas se obtiene de la sig. manera:\nExamen 90%\nPromedio de tareas 10%\nEn esta materia se pidió un total de tres tareas.\nLa calificación de Física se obtiene de la sig. manera:\nExamen 80%\nPromedio de tareas 20%\nEn esta materia se pidió un total de dos tareas.\nLa calificación de Química se obtiene de la sig. manera:\nExamen 85%\nPromedio de tareas 15%\nEn esta materia se pidió un promedio de tres tareas.'''\n\nexamenm = float(input(\"Escriba la calificacion del examen de matematicas: \"))\ntarem1 = float(input(\"Escriba la calificacion de la tarea 1 de matematicas: \"))\ntarem2 = float(input(\"Escriba la calificacion de la tarea 2 de matematicas: \"))\ntarem3 = float(input(\"Escriba la calificacion de la tarea 3 de matematicas: \"))\nexamenf= float(input(\"Escriba la calificacion del examen de fisica: \"))\ntaref1 = float(input(\"Escriba la calificacion de la tarea 1 de fisica: \"))\ntaref2 = float(input(\"Escriba la calificacion de la tarea 2 de fisica: \"))\nexamenq = float(input(\"Escriba la calificacion del examen de quimica: \"))\ntareq1 = float(input(\"Escriba la calificacion de la tarea 1 de quimica: \"))\ntareq2 = float(input(\"Escriba la calificacion de la tarea 2 de quimica: \"))\ntareq3 = float(input(\"Escriba la calificacion de la tarea 3 de quimica: \"))\n\npem = examenm * 0.90\npromtarem = (tarem1 + tarem2 + tarem3) / 3\nporcenttm = promtarem * 0.10\ntotalm = pem + porcenttm\n\npef = examenf * 0.80\npromtaref = (taref1 + taref2 ) / 2\nporcenttf = promtaref * 0.20\ntotalf = pef + porcenttf\n\npeq = examenq * 0.85\npromtareq = (tareq1 + tareq2 + tareq3) / 3\nporcenttq = promtareq * 0.15\ntotalq = peq + porcenttq\n\nprommat = (totalm + totalf + totalq)/3\n\nprint(f\"La calificacion final de matematicas es: {totalm}\")\nprint(f\"La calificacion final de fisica es: {totalf}\")\nprint(f\"La calificacion final de quimica es: {totalq}\")\nprint(f\"El promedio de las tres materias es: {prommat}\")\n","sub_path":"propuesto 10.py","file_name":"propuesto 10.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"148873187","text":"import time\n\nfrom binary_search_tree import BSTNode\n\n\n\nstart_time = time.time()\n\nf = open('names_1.txt', 'r')\nnames_1 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nf = open('names_2.txt', 'r')\nnames_2 = f.read().split(\"\\n\") # List containing 10000 names\nf.close()\n\nduplicates = [] # Return the list of duplicates in this data structure\n\n#Replace the nested for loops below with your improvements\n# for name_1 in names_1:\n# for name_2 in names_2:\n# if name_1 == name_2:\n# duplicates.append(name_1)\n\n# the runtime complexity is O(n**2), or to be more specific O(len(names_1)*len(names_2))\n\n\"\"\"solution 1\"\"\"\n\n# bst = BSTNode(\"named entries\")\n\n# for name in names_1:\n\n# bst.insert(name)\n\n# for name in names_2:\n\n# if bst.contains(name):\n\n# duplicates.append(name)\n\n\n# ---------- Stretch Goal -----------\n# Python has built-in tools that allow for a very efficient approach to this problem\n# What's the best time you can accomplish? Thare are no restrictions on techniques or data\n# structures, but you may not import any additional libraries that you did not write yourself.\n\n\n\n\"\"\" solution 2: fastest solution\"\"\"\n\nnames_1 = set(names_1)\nnames_2 = set(names_2)\n\nduplicates = list(names_1.intersection(names_2))\n\n\"\"\" solution 3 \"\"\"\n\n# storage = {name:0 for name in names_1}\n\n# for name in names_2:\n\n# try:\n\n# storage[name] += 1\n\n# except:\n\n# continue\n\n# duplicates = [key for key in storage.keys() if storage[key] == 1]\n\n\n\"\"\" solution using only lists \"\"\"\n\n# from collections import Counter\n\n# all_names = names_1 + names_2\n\n# cnt = Counter(all_names)\n\n# duplicates = [k for k, v in cnt.items() if v > 1]\n\n# this solution does not account for duplicates in lists themeselves\n\n\nend_time = time.time()\nprint (f\"{len(duplicates)} duplicates:\\n\\n{', '.join(duplicates)}\\n\\n\")\nprint (f\"runtime: {end_time - start_time} seconds\")\n\n\n\n","sub_path":"names/names.py","file_name":"names.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"325795544","text":"\"\"\"Activity views.\"\"\"\n\nimport django_filters\n\nfrom django.conf.urls import url\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import CreateView, DetailView, UpdateView\nfrom django_filters.views import FilterView\n\nfrom .mixins import AJAXRedirectMixin\nfrom ..forms import activity as activity_forms\nfrom ..models.activity import Activity\n\n\nclass ActivityCreateView(CreateView):\n \"\"\"Create an Activity.\"\"\"\n\n model = Activity\n template_name = 'activity/create.html'\n form_class = activity_forms.ActivityForm\n\n def get_success_url(self):\n \"\"\"Go to the Activity details view.\"\"\"\n\n return reverse_lazy('activity_details', args=[self.object.id])\n\n\nclass ActivityDetailView(DetailView):\n \"\"\"Activity Details\"\"\"\n\n model = Activity\n template_name = 'activity/activity.html'\n\n\nclass ActivityInlineDetailView(DetailView):\n \"\"\"Display Activity details in a table row.\"\"\"\n\n model = Activity\n template_name = 'activity/inline_details.html'\n\n\nclass ActivityEditView(AJAXRedirectMixin, UpdateView):\n \"\"\"Edit Activity details.\"\"\"\n\n model = Activity\n template_name = 'activity/edit.html'\n form_class = activity_forms.ActivityForm\n\n def get_success_url(self):\n \"\"\"Go to the Activity details view.\"\"\"\n\n return reverse_lazy('activity_details', args=[self.object.id])\n\n\nclass ActivityInlineEditView(AJAXRedirectMixin, UpdateView):\n \"\"\"Display a form in a table row.\"\"\"\n\n model = Activity\n template_name = 'activity/inline_edit.html'\n fields = [\n 'id',\n 'short_description',\n 'long_description',\n 'to_dos',\n 'places',\n ]\n\n def get_success_url(self):\n \"\"\"Go to the Activity details view.\"\"\"\n\n return reverse_lazy('activity_inline_details', args=[self.object.id])\n\n\nclass ActivityFilter(django_filters.FilterSet):\n \"\"\"Filter for Activities.\"\"\"\n\n all_choices = [('', '---------')]\n\n id = django_filters.CharFilter( # pylint:disable=invalid-name\n lookup_expr='icontains',\n help_text='',\n )\n\n short_description = django_filters.CharFilter(\n lookup_expr='icontains',\n help_text='',\n )\n\n long_description = django_filters.CharFilter(\n lookup_expr='icontains',\n help_text='',\n )\n\n class Meta:\n model = Activity\n fields = ['id', 'short_description', 'long_description']\n order_by = ['id']\n\n\nclass ActivityListView(AJAXRedirectMixin, FilterView):\n \"\"\"List Activities and provide a filter.\"\"\"\n\n model = Activity\n template_name = 'activity/filtered_list.html'\n filterset_class = ActivityFilter\n\n\nurlpatterns = [\n url('^create$', ActivityCreateView.as_view(), name='activity_create'),\n url(\n r'^(?P\\d+)/$',\n ActivityDetailView.as_view(),\n name='activity_details',\n ),\n url(\n r'^(?P\\d+)/edit$',\n ActivityEditView.as_view(),\n name='activity_edit'\n ),\n url(r'^list/$', ActivityListView.as_view(), name='activity_list'),\n url(\n r'^inline/(?P\\d+)/$',\n ActivityInlineDetailView.as_view(),\n name='activity_inline_details',\n ),\n url(\n r'^inline/(?P\\d+)/edit$',\n ActivityInlineEditView.as_view(),\n name='activity_inline_edit',\n ),\n]\n","sub_path":"holiday_planner/holiday_place/views/activity.py","file_name":"activity.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"539419722","text":"# ghifarul azhar #\r\n# 15200234 #\r\n\r\nprint(\"//=========================================================\")\r\nprint(\"// Data Pembeli Baju \")\r\nprint(\"//=========================================================\")\r\ndef queue():\r\n s = []\r\n return s\r\ndef enqueue (s,i):\r\n s.insert(0,i)\r\n return s\r\ndef dequeue (s):\r\n return s.pop()\r\ndef rear(s):\r\n return(s[0])\r\ndef front (s):\r\n return(s[len(s)-1])\r\ndef size (s):\r\n return len(s)\r\ndef IsEmpety(s):\r\n return s == []\r\n\r\ndef Ke2():\r\n s = queue()\r\n k = ''\r\n while True:\r\n banyak = int(input(\"Masukan Banyak Pembeli secara keseluruhan = \"))\r\n for j in range(banyak):\r\n orang = input(\"Masukan Nama Pembeli ke %i yang masuk di antrian = \" %(j+1))\r\n enqueue(s,orang)\r\n s.reverse()\r\n print(\"Data Nama Seluruh Pembeli Adalah : %s\"%(s))\r\n s.reverse()\r\n o = input(\"Masukan Nama Pembeli yang dicari = \")\r\n ditemukan = \"t\"\r\n itung = 0\r\n while ditemukan=='t':\r\n if o == front(s):\r\n print(\"Congrats Pembeli Sudah Ditemukan\")\r\n ditemukan = 'y'\r\n print(\"Pembeli berada pada antrian yang ke-\",str(itung-1+2),\"Dari Data Nama Seluruh Pembeli\")\r\n print(\"Dengan Looping\",str(itung-1+1),\"Kali\")\r\n elif o != front(s):\r\n masukan = dequeue(s)\r\n enqueue(s,Masukan)\r\n ditemukan = 't'\r\n s.reverse()\r\n print(\"Looping %i = %s \"%(itung+1),s)\r\n s.reverse()\r\n itung+=1\r\n if itung > len(s):\r\n print(\"Maaf Nama yang Dimaksud Tidak Ada\")\r\n print()\r\n print(\"Silahkan tambahkan nama jika ingin memesan dengan ketik (yes/no) dibawah ini \")\r\n ditemukan = \"y\"\r\n k = input(\"Apakah Masih ada yang dibantu? --Ketik (yes/no)-- ?\")\r\n if k != 'yes':\r\n print(\"||=======================================================||\")\r\n print(\"||==========================Thanks You===================||\")\r\n print(\"||==================Data Pembeli Baju ===================||\")\r\n print(\"||==================Ghifarul Azhar ======================||\")\r\n print(\"||=======================================================||\")\r\n break\r\n else:\r\n print(\"Ketik Nama yang ingin memesan \")\r\n\r\n\r\nKe2()\r\n\r\n\r\n\r\n","sub_path":"Ghifarul Azhar - 15200234.py","file_name":"Ghifarul Azhar - 15200234.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"416315327","text":"def adding_two_number(arr1, arr2):\n \"\"\"\n if num1 is 123 then arr1 = [3,2,1]\n\n :param arr1:\n :param arr2:\n\n :return:\n \"\"\"\n\n if len(arr1) > len(arr2):\n return adding_two_number(arr2, arr1)\n\n out = []\n extra = 0\n\n for i, (a1, a2) in enumerate(zip(arr1, arr2)):\n e = a1 + a2 + extra\n out.append(e % 10)\n extra = e // 10\n\n for i in range(len(arr1), len(arr2)):\n e = arr2[i] + extra\n out.append(e % 10)\n extra = e // 10\n\n if extra:\n out.append(extra)\n\n return out\n\n\nif __name__ == '__main__':\n print(adding_two_number([9, 9, 9], [9, 9, 9]))\n","sub_path":"algo/addingTwoNumber.py","file_name":"addingTwoNumber.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"609496020","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 19 15:40:02 2018\n\nCode to look at results from convolution\n\n@author: ppxee\n\"\"\"\n\n\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\n#from astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\n#from astropy.stats import median_absolute_deviation\nimport vari_funcs_no06 #my module to help run code neatly\nfrom matplotlib.colors import LogNorm\nplt.close('all') #close any open plots\n\ncombined = fits.open('stars_mag_flux_table.fits')\nalldata = combined[1].data\ncombined = fits.open('stars_mag_flux_convS.fits')\nalldataconv = combined[1].data\nstars = fits.open('starsfwhm.fits')\nsdata = stars[1].data\n\n# remove saturated stars\nsdata = sdata[alldata['MAG_APER_5_05B'] >= 12]\nalldata = alldata[alldata['MAG_APER_5_05B'] >= 12]\nalldataconv = alldataconv[alldataconv['MAG_APER_5_05B'] >= 12]\n\n## Create flux stack\n#allflux = vari_funcs_no06.flux5_stacks(alldata)\n#allfluxconv = vari_funcs_no06.flux5_stacks(alldataconv)\n\n\n# Create mag stack\n#allflux = vari_funcs_no06.mag5_stacks(alldata)\n#allflux, alldata2 = vari_funcs_no06.no99(allflux, alldata)\n#allfluxconv = vari_funcs_no06.mag5_stacks(alldataconv)\n#allfluxconv, alldataconv2 = vari_funcs_no06.no99(allfluxconv, alldataconv)\n\n# Remove negative values\n#allflux[allflux <= 0] = np.nan\n#mask = ~np.isnan(allflux).any(axis=1)\n#allflux = allflux[mask]\n##allfluxconv[allfluxconv <= 0] = np.nan\n##mask = ~np.isnan(allfluxconv).any(axis=1)\n#allfluxconv = allfluxconv[mask]\n#\n## Normalise\n#allflux = vari_funcs_no06.normalise_flux(allflux)\n##allflux = vari_funcs_no06.normalise_mag(allflux)\n##allfluxconv = vari_funcs_no06.psf_correct_flux(allflux, allflux, 'median')\n#allfluxconv = vari_funcs_no06.normalise_flux(allfluxconv)\n##allfluxconv = vari_funcs_no06.normalise_mag(allfluxconv)\n\navgflux = np.array([np.median(sdata['FWHM_05B']), \n np.median(sdata['FWHM_07B']), \n np.median(sdata['FWHM_08B']), \n np.median(sdata['FWHM_09B']), \n np.median(sdata['FWHM_10B']), \n np.median(sdata['FWHM_11B']), \n np.median(sdata['FWHM_12B'])]) *3600\n\navgfluxconv = np.array([np.median(alldataconv['FWHM_WORLD_05B']), \n np.median(alldataconv['FWHM_WORLD_07B']), \n np.median(alldataconv['FWHM_WORLD_08B']), \n np.median(alldataconv['FWHM_WORLD_09B']), \n np.median(alldataconv['FWHM_WORLD_10B']), \n np.median(alldataconv['FWHM_WORLD_11B']), \n np.median(alldataconv['FWHM_WORLD_12B'])]) *3600\n\n## find and plot average\n#avgflux = np.median(allflux, axis=0)\nvari_funcs_no06.avg_lightcurve(avgflux)\n#plt.title('Normalised Flux of Stars with Unconvolved')\n#plt.ylim(0.9986, 1.0004)\n#plt.ylim(21.36, 21.51)\nplt.title('Median FWHM of stars before convolution')\nplt.ylim(0.74, 0.88)\nplt.ylabel('FWHM (arcsec)')\n#plt.ylabel('Normalised Flux')\nplt.savefig('plots/Lightcurves/FWHMbefore')\n\n#avgfluxconv = np.median(allfluxconv, axis=0)\nvari_funcs_no06.avg_lightcurve(avgfluxconv)\n#plt.title('Normalised Flux of Stars curve with Convolved')\n#plt.ylim(0.9986, 1.0004)\n#plt.ylim(21.36, 21.51)\n#plt.ylim(0.000205,0.000243)\nplt.title('Median FWHM of stars after convolution')\nplt.ylim(0.74, 0.88)\nplt.ylabel('FWHM (arcsec)')\n#plt.ylabel('Normalised Flux')\nplt.savefig('plots/Lightcurves/FWHMafter')\n","sub_path":"invextconv.py","file_name":"invextconv.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"14"}
+{"seq_id":"116081588","text":"import sys\nimport os\nimport string\nimport re\nimport urlparse\nimport urllib\n\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n# threshold for displaying trace message. Must be <, so higher means more\ntraceLevel = 0\n\ndef trace(s, level = 1):\n \"\"\"\n is compared to rdfAppDef.traceLevel. Higher means less likely to display\n \"\"\"\n if level > traceLevel:\n return\n sys.stderr.write(\"rdfAppDef.py: %s\\n\" % s)\n\nclass InvalidFileError(Exception):\n def __init__(self, path):\n self.path = path\n def __str__(self):\n return repr(self.path)\n\nclass ScriptError(Exception):\n def __init__(self, commandLine, exitCode):\n self.commandLine = commandLine\n self.exitCode = exitCode\n def __str__(self):\n return repr(\"Script error calling %s:\\nError %s.\" % (self.commandLine, os.strerror(self.exitCode)))\n\nclass LoadFailure(Exception):\n def __init__(self, fileUri):\n self.message = \"File %s failed to load.\" % (fileUri)\n self.fileUri = fileUri\n def __str__(self):\n return repr(self.message)\n\nclass NoGraph(Exception):\n def __init__(self, fileUri):\n self.message = \"Dependency %s has no graph assignments.\" % (fileUri)\n self.fileUri = fileUri\n def __str__(self):\n return repr(self.message)\n\nclass NoSpecification(Exception):\n def __init__(self, parameterName):\n self.message = \"No specification provided for %s\" % parameterName\n self.parameterName = parameterName\n def __str__(self):\n return repr(self.message)\n\nclass NotAnRdfFile(Exception):\n def __init__(self, fileName):\n self.message = \"%s is not subsumed by app:RDFFile.\" % fileName\n self.fileName = fileName\n def __str__(self):\n return repr(self.message)\n\nclass ApplicationDefinitionError(Exception):\n def __init__(self, bindings):\n if len(bindings) == 0:\n self.message = \"No Application Definition Graph Found\"\n else:\n self.message = \"Should be exactly one Application Definition. Instead there were these:%s\" % map(lambda b:b['appDef'], bindings)\n self.bindings = bindings\n def __str__(self):\n return repr(self.message)\n\nSparqlPrefixes = \"\"\"\nPrefix app: \nPrefix rdfs: \n\"\"\"\n\n\ndef uriToPath(uri):\n \"\"\"\n returns translated to a pathname, if indicates a file, else None\n \"\"\"\n unquotedUri = uri.replace(\"<\", \"\").replace(\">\", \"\")\n parse = urlparse.urlparse(unquotedUri)\n if parse.scheme == 'file' or parse.scheme== '':\n return parse.path\n\ndef maybeAngleQuote(uri):\n \"\"\"\n ensures that angle quotes are provided for if appropriate\n \"\"\"\n\n uri = uri.strip()\n hasAnglequotes = re.compile('^<.*>$')\n needsAngleQuotes = re.compile(\"^https?://|^file:|^/\", re.IGNORECASE)\n\n if hasAnglequotes.match(uri):\n return uri\n elif needsAngleQuotes.match(uri):\n return \"<%s>\" % uri\n else:\n return uri\n\ndef clearGraph(graphUri, updateFn):\n \"\"\"\n Side-effect: drops \n Side-effect: removes all members of app:loadedFiles assigned to \n Where:\n := fn() -> None\n \"\"\"\n trace (\"clearing graph:%s\" % maybeAngleQuote(graphUri), level = 0)\n\n updateFn(SparqlPrefixes + \"Drop Graph %s\" % maybeAngleQuote(graphUri))\n\ndef isInvalidFile(path, askFn):\n \"\"\"\n Returns True if fileUri is of class app:InvalidFile.\n Where:\n is a file path, eg 'Data/blah.ttl'\n Note this is typically assigned by the formatCheck function.\n \"\"\"\n path = path.strip(' <>')\n template = SparqlPrefixes + \"\"\"\n Ask\n Where\n {\n Bind (URI(\"$Path\") as ?file)\n Graph ?containingGraph\n {\n ?file a/rdfs:subClassOf* app:InvalidFile.\n }\n }\n \"\"\"\n query = string.Template(template).substitute(\n Path = path)\n trace(\"query in isInvalidFile:%s\" % query)\n return askFn(query)\n\ndef uniqueApplicationDefinition(queryFn):\n \"\"\"\n Returns the URI of the only graph G for which \n Graph ?g { ?g a app:ApplicationDefinition. ...}\n Raises an ApplicationDefinitionError if there is not exactly one\n such graph.\n Note: this is the default value of applicationDefinitionFn parameters\n to functions defined in rdfAppDef.py which require such parameters.\n \"\"\"\n query = SparqlPrefixes + \"\"\"\n Select Distinct ?appDef\n Where\n {\n Graph ?appDef\n {\n ?appDef a app:ApplicationDefinition.\n }\n }\n \"\"\"\n bindings = queryFn(query)\n if len(bindings) != 1:\n raise ApplicationDefinitionError(bindings)\n appDefs = map(lambda b: b['appDef'], bindings)\n trace (\"type of appDefs[0]:%s\" % type(appDefs[0]))\n return appDefs[0]\n\ndef moveToTmp (fileName):\n \"\"\"\n SIDE-EFFECT: moves to /tmp/\n Returns: new pathname of /tmp/, or None if not found\n NOTE: this is the default function to invaldiate a file whose dependencies\n have been made invalid.\n \"\"\"\n splitFileName = fileName.split('/')\n target = os.path.join(\"/tmp/\" , splitFileName[len(splitFileName)-1:][0])\n if os.path.exists(fileName):\n trace(\"moving %s to %s\" % (fileName, target), 0)\n os.rename(fileName, target)\n return target\n\n \ndef removeInvalidDependencies (queryFn, updateFn, \n appDefGraphFn=uniqueApplicationDefinition,\n invalidateFileFn=moveToTmp):\n \"\"\"\n Returns: the set of any graphs cleared when invalidating dependencies\n Side-effect: any files dependent on non-existent files are also removed.\n Side-effect: the app:LoadedFiles graph is dropped if any files were removed.\n Where:\n is a function f(query) -> [, ...]\n will be a SPARQL query into file dependencies.\n is a function f(queryFn) -> \n is the URI of the apprpriate Application Definition Graph.\n := fn(dependency) -> newPath, with side-effect that \n has been moved to , or removed completely if is None.\n \"\"\"\n template = SparqlPrefixes + \"\"\"\n Select Distinct ?dependent ?dependency ?graph\n Where\n {\n Bind(URI(\"$AppDef\") as ?applicationDefinition)\n Graph ?applicationDefinition\n {\n ?dependent app:informedByFile+ ?dependency.\n ?dependent a/rdfs:subClassOf* app:RDFFile.\n Optional\n {\n ?dependent a*/rdfs:subClassOf*/app:graph ?graph\n }\n }\n }\n \"\"\"\n query = string.Template(template).substitute(\n AppDef = appDefGraphFn(queryFn)\n )\n trace (\"query in removeInvalidDependencies:%s\" % query, 0)\n bindings = queryFn(query)\n #[{\"dependency\" : , \"dependent\" : }, ...]\n invalidGraphs = []\n for dependencyMap in bindings:\n dependency = dependencyMap[\"dependency\"]\n dependent = dependencyMap[\"dependent\"]\n if '#' in dependent:\n raise Exception (\"dependents should not include paths with #\")\n dependencyName = urlparse.urlparse(dependency).path\n dependentName = urlparse.urlparse(dependent).path\n if (not os.path.exists(dependencyName) and os.path.exists(dependentName)):\n trace (\"Invalidating dependent %s\" % dependentName, 0)\n invalidateFileFn(dependentName)\n #os.remove(dependentName)\n if 'graph' in dependencyMap:\n invalidGraph = dependencyMap['graph']\n filesWereRemoved = True\n if invalidGraphs:\n for invalidGraph in set(invalidGraphs):\n clearGraph(invalidGraph, updateFn)\n return set(invalidGraphs)\n\n# def makeObsolete(path, queryFn, askFn, updateFn,\n# appDefGraphFn=uniqueApplicationDefinition):\n# \"\"\"\n# Returns: the graph to which was loaded, and graphs of all dependencies\n# Side-effect: deletes and clears its associated graph.\n# Side-effect clears graph of and all graphs associated with dependencies.\n# \"\"\"\n# if os.path.exists(path):\n# os.remove(path)\n# clearedGraphs = []\n# graphBindings = queryFn(string.Template(template).substitute(Path = \"file:\" + path))\n# graphs = map (lambda b: b['graph'], graphBindings)\n# for graph in graphs:\n\n# if fileIsLoaded(path, graph, askFn):\n# template = SparqlPrefixes + \"\"\"\n# Select distinct ?graph\n# Where\n# {\n# Bind(URI(\"$AppDef\") as ?applicationDefinition)\n# Bind(URI(\"$PathUri\") as ?pathUri)\n# Graph ?applicationDefinition\n# {\n# ?path app:graph ?graph.\n# }\n# }\n# \"\"\"\n# clearGraph(graph)\n# clearedGraphs = clearedGraphs + [graph]\n# return set (clearedGraphs).union(removeInvalidDependencies(queryFn, updateFn))\n\n\ndef fileIsLoaded(fileUri, graphUri, base, askFn):\n \"\"\"\n Returns: true if the there is some graph in the model which lists as type app:LoadedFile. This is not done automatically by AppDef, but the user can add this statement as a utility.\n Where:\n is an angleQuoted file URI.\n is a function f(query)->True/False\n \"\"\"\n template = SparqlPrefixes + \"\"\"\n BASE <$Base>\n Ask\n Where\n {\n Bind (URI(\"$GraphUri\") as ?graphUri)\n Bind (URI(\"$File\") as ?fileUri)\n Graph ?graphUri\n {\n ?fileUri a app:LoadedFile.\n }\n }\n \"\"\"\n isLoadedQuery = string.Template(template).substitute(\n Base = base,\n GraphUri = graphUri,\n File = fileUri\n )\n trace (\"isLoaded Query: %s\" % isLoadedQuery, level = 0)\n isLoaded = askFn(isLoadedQuery)\n if isLoaded:\n trace(\"%s is loaded.\" % fileUri, level = 0)\n return isLoaded\n\ndef loadFileUri(fileUri, graphUri, updateFn, askFn, formatCheck, base,\n fileIsLoadedFn=fileIsLoaded):\n \"\"\"\n Side Effect: is loaded per , into or Default Graph if graphUri is None.\n Side Effect: if , asserts app:loadedInto in the app:LoadedFiles graph. if not it asserts a app:LoadedFile.\n May raise errors per \n Where:\n is a URI naming an RDF file\n is None, or a URI naming a graph\n := fn() -> None\n := None or fn(askQuery) -> True/False\n Used only to inform . If provided, it will flag an error if the \n app.fileIsLoaded is not true.\n := None, or fn() -> s.t. is guranteed to point to a file containing valid RDF.\n