diff --git "a/6483.jsonl" "b/6483.jsonl" new file mode 100644--- /dev/null +++ "b/6483.jsonl" @@ -0,0 +1,347 @@ +{"seq_id":"213295297","text":"from django.conf.urls.defaults import patterns, include, url\n\nfrom user.views import *\nfrom main.views import *\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom django.contrib.auth.views import login, logout\n\nimport authority\nauthority.autodiscover()\n\nurlpatterns = patterns('',\n \n # url(r'^$', 'SocialNetwork.views.home', name='home'),\n # url(r'^SocialNetwork/', include('SocialNetwork.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n\t\n ('^$', Main),\n \n ('^changefriendship/(\\w+)/(\\w+)/' , ChangeFriendship),\n ('^changesponsorship/(\\w+)/(\\w+)/' , ChangeSponsorship),\n \n ('^profile/(\\w+)/' , SeePeerProfile),\n ('^sponsor/(\\w+)/' , SeeSponsorProfile),\n \n ('^accounts/create/' , UserCreateAccount),\n ('^accounts/login/' , UserLogin),\n ('^accounts/profile/' , UserProfile),\n ('^accounts/logout/' , UserLogout),\n\n\t(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"505714860","text":"#!/usr/bin/env python3\n# See: https://github.com/VirusTrack/COVIDvu/blob/master/LICENSE \n# vim: set fileencoding=utf-8:\n\n\nfrom covidvu.config import MASTER_DATABASE\nfrom covidvu.config import SITE_DATA\nfrom covidvu.cryostation import Cryostation\nfrom covidvu.virustrack.countryinfo import US_REGIONS\nfrom covidvu.virustrack.countryinfo import TOTAL_US_NAME\n\nimport csv\nimport datetime\nimport os\nimport pytz\n\nimport tqdm\n\n\n# --- constants ---\n\nCOUNTRY_NAMES = {\n 'Bosnia' : 'Bosnia and Herzegovina',\n 'Czech Republic' : 'Czechia',\n 'South Korea' : 'Korea, South',\n 'World' : '!Global',\n 'Taiwan' : 'Taiwan*',\n 'U.S. TOTAL' : TOTAL_US_NAME,\n 'UAE' : 'United Arab Emirates',\n 'United States' : 'US',\n }\nNIXED_ROWS_INDEX = (\n 'Diamond Princess (repatriated)',\n 'Diamond Princess',\n 'Grand Princess (repatriated)',\n 'Grand Princess',\n 'Marianas',\n 'Northern Marianas',\n 'Queue',\n 'Recovered',\n 'TBD',\n 'US',\n 'Unassigned',\n 'United States Virgin Islands',\n 'Washington D.C.',\n 'Wuhan (repatriated)',\n 'Wuhan Evacuee',\n 'Wuhan',\n)\nUS_STATE_NAMES = {\n 'U.S. TOTAL' : TOTAL_US_NAME,\n 'Northern Mariana Islands': 'Northern Marianas',\n 'U.S. Virgin Islands' : 'Virgin Islands',\n }\nSCRAPED_WORLD_DATA = os.path.join(SITE_DATA, 'scraped-world.tsv')\nSCRAPED_US_DATA = os.path.join(SITE_DATA, 'scraped-US.tsv')\nSCRAPED_TODAY = pytz.utc.localize(datetime.datetime.today()).astimezone(pytz.timezone('America/Los_Angeles')).strftime('%Y-%m-%d')\n\n\n# --- globals ---\n\n\n# *** functions ***\n\ndef _fetchCurrentUpdates(columnRef, index = 'LOCATION'):\n grandTotal = 0.0\n\n updatesDataset = dict()\n with open(SCRAPED_WORLD_DATA, 'r') as inputFile:\n rawData = csv.DictReader(inputFile, delimiter = '\\t')\n for row in rawData:\n ref = row[index]\n if 'Queue' == ref:\n continue\n if 'Diamond' not in row[index]:\n try:\n bodyCount = float(row[columnRef]) if row[columnRef] != '' else 0.0\n except:\n bodyCount = 0.0\n\n updatesDataset[ref] = { SCRAPED_TODAY: float(bodyCount) }\n \n grandTotal += bodyCount\n\n if 'Queue' in updatesDataset:\n del(updatesDataset['Queue'])\n\n updatesDataset['!Global'] = dict()\n updatesDataset['!Global'][SCRAPED_TODAY] = grandTotal\n\n return updatesDataset\n\n\ndef _fetchCurrentUpdatesUS(columnRef, index = 'UNITED STATES'):\n updatesDataset = dict()\n with open(SCRAPED_US_DATA, 'r') as inputFile:\n rawData = csv.DictReader(inputFile, delimiter = '\\t')\n for row in rawData:\n try:\n bodyCount = float(row[columnRef]) if row[columnRef] != '' else 0.0\n except:\n bodyCount = 0.0\n updatesDataset[row[index]] = { SCRAPED_TODAY: bodyCount, }\n\n if 'Queue' in updatesDataset:\n del(updatesDataset['Queue'])\n\n return updatesDataset\n\n\ndef _homologizeUpdateData(dataset, table):\n badKeys = list()\n updates = dict()\n\n for key in dataset:\n if key in table:\n updates[table[key]] = dataset[key]\n badKeys.append(key)\n\n for key in badKeys:\n del(dataset[key])\n\n dataset.update(updates)\n\n return dataset\n\n\ndef _updateWorldData():\n # 'Cases' -- TSV ref\n # 'confirmed' -- VirusTrack DB key\n print(' updating world...')\n\n updateWorldCases = _fetchCurrentUpdates('Cases')\n updateWorldCases = _homologizeUpdateData(updateWorldCases, COUNTRY_NAMES)\n updateWorldDeaths = _fetchCurrentUpdates('Deaths')\n updateWorldDeaths = _homologizeUpdateData(updateWorldDeaths, COUNTRY_NAMES)\n\n cryostation = Cryostation(MASTER_DATABASE)\n\n for countryName in tqdm.tqdm(sorted(updateWorldCases.keys())):\n if countryName in cryostation:\n country = cryostation[countryName]\n try:\n country['confirmed'][SCRAPED_TODAY] = updateWorldCases[countryName][SCRAPED_TODAY]\n country['deaths'][SCRAPED_TODAY] = updateWorldDeaths[countryName][SCRAPED_TODAY]\n except KeyError:\n # TODO: Eugene - Define a mechanism to add new countries reporting to the database\n pass\n cryostation[countryName] = country\n else:\n print( '## country %s not found in database' % countryName)\n \n cryostation.close()\n\n\ndef _updateUSData():\n # 'Cases' -- TSV ref\n # 'confirmed' -- VirusTrack DB key\n print(' updating US...')\n updateUSCases = _fetchCurrentUpdatesUS(columnRef = 'Cases')\n updateUSCases = _homologizeUpdateData(updateUSCases, US_STATE_NAMES)\n updateUSDeaths = _fetchCurrentUpdatesUS(columnRef = 'Deaths')\n updateUSDeaths = _homologizeUpdateData(updateUSDeaths, US_STATE_NAMES)\n\n cryostation = Cryostation(MASTER_DATABASE)\n country = cryostation['US']\n\n for location in tqdm.tqdm(sorted(updateUSCases.keys())):\n try:\n if location in NIXED_ROWS_INDEX:\n # TODO: Eugene - what do we do about these uncharted locations?\n # retardedKeys.append(location)\n continue\n\n country['provinces'][location]['confirmed'][SCRAPED_TODAY] = updateUSCases[location][SCRAPED_TODAY]\n country['provinces'][location]['deaths'][SCRAPED_TODAY] = updateUSDeaths[location][SCRAPED_TODAY]\n except:\n print(' || Invalid location: %s' % location)\n continue\n\n cryostation['US'] = country\n cryostation.close()\n\n\ndef _updateUSRegionsData(target):\n # 'confirmed' -- VirusTrack DB key\n print(' updating US regions...')\n updateUSRegions = dict()\n\n cryostation = Cryostation(MASTER_DATABASE)\n country = cryostation['US']\n allTime = list(country['provinces'][TOTAL_US_NAME][target].keys())\n\n for location in tqdm.tqdm(country['provinces']):\n if location in NIXED_ROWS_INDEX:\n continue\n try:\n region = US_REGIONS[location]\n if region not in updateUSRegions:\n updateUSRegions[region] = { SCRAPED_TODAY: 0.0, }\n\n try:\n updateUSRegions[region][SCRAPED_TODAY] += float(country['provinces'][location][target][SCRAPED_TODAY])\n except:\n yesterday = country['provinces'][location][allTime[len(allTime)-2]]\n updateUSRegions[region][SCRAPED_TODAY] = yesterday\n except KeyError:\n print(' >> Invalid location: %s' % location)\n continue\n\n for region in sorted(updateUSRegions.keys()):\n country['regions'][region][target][SCRAPED_TODAY] = updateUSRegions[region][SCRAPED_TODAY]\n\n cryostation['US'] = country\n cryostation.close()\n\n\ndef _main():\n _updateWorldData()\n _updateUSData()\n _updateUSRegionsData('confirmed')\n _updateUSRegionsData('deaths')\n\n\n# +++ main +++\n\nif '__main__' == __name__:\n _main()\n\n","sub_path":"work/covidvu/pipeline/vuupdate.py","file_name":"vuupdate.py","file_ext":"py","file_size_in_byte":7247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"165624300","text":"#!/usr/local/bin/python\nimport os\nimport json\nimport sys\n\ntry:\n import requests\nexcept ImportError:\n print(\"Could not load 'requests' module. You may need to install it by running:\\n pip install requests\")\n sys.exit(1)\n\nTOKEN_FILE = \"/etc/jaguar/lib/python/py429/Test/.token\"\nRESULT_FILE = \"/tmp/result.json\"\nSESSION_FILE = \"/tmp/session\"\nCASP_CONFIG_FILE = \"/run/jaguar/casp/config.json\"\n\n\nclass CaspServer:\n def __init__(self, servertype):\n self.session = None\n self.token = None\n self.keystr = \"\"\n\n if servertype is True:\n self.prefix = \"https://\"\n else:\n self.prefix = \"http://\"\n\n def put(self, d, keys, item):\n try:\n if not self.keystr:\n self.keystr = keys\n if \".\" in keys:\n key, rest = keys.split(\".\", 1)\n self.put(d[key], rest, item)\n else:\n if keys not in d:\n raise KeyError\n d[keys] = item\n except KeyError:\n print(\"CASP config does not have \" + self.keystr)\n self.keystr = \"\"\n sys.exit(0)\n\n def get(self, d, keys):\n try:\n if not self.keystr:\n self.keystr = keys\n if \".\" in keys:\n key, rest = keys.split(\".\", 1)\n return self.get(d[key], rest)\n else:\n return d[keys]\n except KeyError:\n print(\"CASP config does not have \" + self.keystr)\n self.keystr = \"\"\n sys.exit(0)\n\n def casp_edit(self, editstr):\n \"\"\"\n Edits a specific value in the CASP config file.\n Example 'labels.latitude.config.port=3' or 'log_level=debug'\n \"\"\"\n with open(CASP_CONFIG_FILE, \"r\") as casp_data:\n config = json.load(casp_data)\n\n if '=' in editstr:\n (keys, value) = editstr.split('=')\n self.put(config, keys, value)\n else:\n print(editstr + \"=\" + self.get(config, editstr))\n sys.exit(0)\n\n with open(CASP_CONFIG_FILE, \"w\") as casp_file:\n json.dump(config, casp_file, indent=4)\n\n print(\"Configuration change successful: \", editstr)\n\n print(\"May need to execute 'jag casp restart' to take effect\")\n sys.exit(0)\n\n def authentication(self, caspserver):\n \"\"\"\n Authentication with the casp server\n :param caspserver: url of casp server\n :return: session token or None\n Will exit(), if TOKEN_FILE is missing\n \"\"\"\n if os.path.isfile(TOKEN_FILE):\n self.token = self.__read_key(TOKEN_FILE)\n else:\n print(\"Error: Authentication token is missing. Cannot continue\")\n sys.exit(1)\n\n url = self.prefix + caspserver + \"/api/a429/v1/authenticate\"\n headers = {\n 'cache-control': \"no-cache\",\n 'content-type': \"application/x-www-form-urlencoded\"\n }\n print(\"Fetching session key...\")\n print(\"NOTICE: You may see a unverified HTTPS request, this is normal.\\n\")\n\n try:\n response = requests.request(\"POST\", url, data=self.token, headers=headers, verify=False)\n response.raise_for_status()\n except requests.RequestException as err:\n print(\"Connection Error: Unable to connect to the CASP server: \", err)\n return None\n\n try:\n auth = json.loads(response.text)\n except ValueError as err:\n print(\"JSON Decode Error: \", err)\n return None\n\n if auth.get(\"success\") is True:\n # print(\"\\nSession key fetch successful, cacheing the following key:\\n\\n \", auth.get(\"token\"))\n token = str(\"token=\" + auth.get(\"token\"))\n self.__write_key(SESSION_FILE, token)\n print(\"Session key renewed.\")\n return token\n else:\n print(\"\\nError: Unable to fetch the session key.\")\n return None\n\n def fetch_item(self, caspserver, caspvalue):\n if os.path.isfile(SESSION_FILE):\n self.session = self.__read_key(SESSION_FILE)\n else:\n print(\"Error: No session key available.\")\n self.session = self.authentication(caspserver)\n if self.session is None:\n return None\n\n url = self.prefix + caspserver + \"/api/a429/v1/poll/\" + caspvalue\n headers = {\n 'cache-control': \"no-cache\",\n 'content-type': \"application/x-www-form-urlencoded\"\n }\n print(\"Fetching casp data...\")\n print(\"NOTICE: You may see a unverified HTTPS request, this is normal.\")\n\n try:\n response = requests.request(\"POST\", url, data=self.session, headers=headers, verify=False)\n response.raise_for_status()\n except requests.RequestException as err:\n print(\"Connection Error: Unable to connect to the CASP server: \", err)\n return None\n\n try:\n item = json.loads(response.text)\n print(\"\\n======= RESULTS FOR \" + caspvalue + \"==========\\n\")\n print(json.dumps(item, sort_keys=True, indent=4, separators=(',', ': ')))\n self.__dump_results(RESULT_FILE, item)\n except ValueError as err:\n print(\"JSON Decode Error: \", err)\n return None\n\n return item\n\n def quick_fetch_item(self, caspserver, caspvalue):\n url = self.prefix + caspserver + \"/api/a429/v1/poll/\" + caspvalue\n headers = {\n 'cache-control': \"no-cache\",\n 'content-type': \"application/x-www-form-urlencoded\"\n }\n print(\"Fetching casp data...\")\n try:\n response = requests.request(\"POST\", url, headers=headers, verify=False)\n response.raise_for_status()\n except requests.RequestException as err:\n print(\"Connection Error: Unable to connect to the CASP server: \", err)\n return None\n\n try:\n item = json.loads(response.text)\n print(\"\\n======= RESULTS FOR \" + caspvalue + \"==========\\n\")\n print(json.dumps(item, sort_keys=True, indent=4, separators=(',', ': ')))\n self.__dump_results(RESULT_FILE, item)\n except ValueError as err:\n print(\"JSON Decode Error: \", err)\n return None\n\n return item\n\n def modify_value(self, caspserver, caspname, value, silent=True):\n url = self.prefix + caspserver + \"/api/a429/v1/conf/set/\" + caspname\n headers = {\n 'cache-control': \"no-cache\",\n 'content-type': \"application/x-www-form-urlencoded\"\n }\n change = \"setvalue=\" + str(value)\n if silent is not True:\n print(\"Fetching casp data...\")\n\n try:\n response = requests.request(\"POST\", url, data=change, headers=headers, verify=False)\n response.raise_for_status()\n except requests.RequestException as err:\n print(\"Connection Error: Unable to connect to the CASP server: \", err)\n return response.status_code\n\n if silent is not True:\n print(\"Label has been changed successfully.\")\n\n return response.status_code\n\n def __read_key(self, filename):\n '''\n Read cache key from previous authentication to the CASP server,\n we only need to do this once per 24 hour session. The cache keys\n can be removed at any time by running this script with the\n -d argument or refreshed by using the -r argument.\n '''\n f = open(filename, 'r')\n item = f.read()\n f.close()\n return item\n\n def __write_key(self, filename, key):\n '''\n Helper function to write token and session key to a file to\n be cache for reuse of the script.\n '''\n f = open(filename, 'w')\n f.write(key)\n f.close()\n\n def __dump_results(self, filename, item):\n '''\n Writes the requested data from casp to a file.\n '''\n print(\"Saving results to\", filename)\n f = open(filename, 'w')\n f.write(json.dumps(item, sort_keys=True, indent=4,\n separators=(',', ': ')))\n f.close()\n\n def set_token(self, token):\n f = open(TOKEN_FILE, 'w')\n f.write(str(\"token=\" + token))\n f.close()\n sys.exit(0)\n\n def delete_session(self):\n try:\n print(\"Removing cache session key...\")\n os.remove(SESSION_FILE)\n sys.exit(0)\n except OSError:\n sys.exit(1)\n\n\n# ======================\n# MAIN PROGRAM\n# ======================\n\nif __name__ == '__main__':\n print(\"Try casptool\")\n","sub_path":"bin/caspclient.py","file_name":"caspclient.py","file_ext":"py","file_size_in_byte":8683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"259036062","text":"import os\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog, QWidget\nfrom PyQt5.QtWidgets import QTextEdit, QPushButton, QVBoxLayout, QHBoxLayout, QAction, qApp\n\n\nclass Notepad(QWidget):\n\n def __init__(self):\n super().__init__()\n self.text = QTextEdit(self)\n self.clr_btn = QPushButton('Clear')\n self.sav_btn = QPushButton('Save')\n self.opn_btn = QPushButton('Open')\n\n self.init_ui()\n\n def init_ui(self):\n v_layout = QVBoxLayout()\n h_layout = QHBoxLayout()\n\n h_layout.addWidget(self.clr_btn)\n h_layout.addWidget(self.sav_btn)\n h_layout.addWidget(self.opn_btn)\n\n v_layout.addWidget(self.text)\n v_layout.addLayout(h_layout)\n\n self.sav_btn.clicked.connect(self.save_text)\n self.clr_btn.clicked.connect(self.clear_text)\n self.opn_btn.clicked.connect(self.open_text)\n\n self.setLayout(v_layout)\n self.setWindowTitle('PyQt5 TextEdit')\n\n self.show()\n\n def save_text(self):\n filename = QFileDialog.getSaveFileName(self, 'Save File', os.getenv('HOME'))\n with open(filename[0], 'w') as f:\n my_text = self.text.toPlainText()\n f.write(my_text)\n\n def open_text(self):\n filename = QFileDialog.getOpenFileName(self, 'Open File', os.getenv('HOME'))\n with open(filename[0], 'r') as f:\n file_text = f.read()\n self.text.setText(file_text)\n\n def clear_text(self):\n self.text.clear()\n\n\nclass Writer(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.form_widget = Notepad()\n self.setCentralWidget(self.form_widget)\n\n self.init_ui()\n\n def init_ui(self):\n bar = self.menuBar()\n file = bar.addMenu('File')\n\n new_action = QAction('New', self)\n new_action.setShortcut('Ctrl+N')\n\n save_action = QAction('&Save', self)\n save_action.setShortcut('Ctrl+S')\n\n open_action = QAction('&Open', self)\n\n quit_action = QAction('&Quit', self)\n\n file.addAction(new_action)\n file.addAction(save_action)\n file.addAction(open_action)\n file.addAction(quit_action)\n\n quit_action.triggered.connect(self.quit_trigger)\n file.triggered.connect(self.respond)\n\n self.show()\n\n def quit_trigger(self):\n qApp.quit()\n\n def respond(self, q):\n signal = q.text()\n\n if signal == 'New':\n self.form_widget.clear_text()\n elif signal == '&Open':\n self.form_widget.open_text()\n elif signal == '&Save':\n self.form_widget.save_text()\n\n\napp = QApplication(sys.argv)\nwriter = Writer()\nsys.exit(app.exec_())\n# import sys\n# from PyQt5.QtWidgets import QApplication, QTextEdit, QWidget, QPushButton, QVBoxLayout\n#\n#\n# class Notepad(QWidget):\n#\n# def __init__(self):\n# super(Notepad, self).__init__()\n# self.text = QTextEdit(self)\n# self.clr_btn = QPushButton('Clear')\n#\n# self.init_ui()\n#\n# def init_ui(self):\n# layout = QVBoxLayout()\n# layout.addWidget(self.text)\n# layout.addWidget(self.clr_btn)\n# self.clr_btn.clicked.connect(self.clear_text)\n#\n# self.setLayout(layout)\n# self.setWindowTitle('PyQt5 TextEdit')\n#\n# self.show()\n#\n# def clear_text(self):\n# self.text.clear()\n#\n# app = QApplication(sys.argv)\n# writer = Notepad()\n# sys.exit(app.exec_())\n# import sys\n# from PyQt5.QtWidgets import (QLabel, QRadioButton, QPushButton, QVBoxLayout, QApplication, QWidget)\n#\n#\n# class Window(QWidget):\n#\n# def __init__(self):\n# super(Window, self).__init__()\n#\n# self.init_ui()\n#\n# def init_ui(self):\n# self.lbl = QLabel('Which do you like best?')\n# self.dog = QRadioButton('Dogs')\n# self.cat = QRadioButton('Cats')\n# self.btn = QPushButton('Select')\n#\n# layout = QVBoxLayout()\n# layout.addWidget(self.lbl)\n# layout.addWidget(self.dog)\n# layout.addWidget(self.cat)\n# layout.addWidget(self.btn)\n#\n# layout2 = QVBoxLayout()\n# layout2.addWidget(self.lbl)\n# layout2.addWidget(self.dog)\n# layout2.addWidget(self.cat)\n# # layout2.addWidget(self.btn)\n#\n# self.setLayout(layout)\n# # self.setLayout(layout2)\n# self.setWindowTitle('PyQt5 Lesson 10')\n#\n# self.btn.clicked.connect(lambda: self.btn_clk(self.dog.isChecked(), self.lbl))\n#\n# self.show()\n#\n# def btn_clk(self, chk, lbl):\n# if chk:\n# lbl.setText('I guess you like dogs')\n# else:\n# lbl.setText('So its cats for you')\n#\n# app = QApplication(sys.argv)\n# a_window = Window()\n# sys.exit(app.exec_())\n\n# import sys\n# from PyQt5.QtWidgets import (QLabel, QCheckBox, QPushButton, QVBoxLayout, QApplication, QWidget)\n#\n#\n# class Window(QWidget):\n#\n# def __init__(self):\n# super().__init__()\n#\n# self.init_ui()\n#\n# def init_ui(self):\n# self.lbl = QLabel()\n# self.chx = QCheckBox('Do you like dogs ?')\n# self.btn = QPushButton('Push Me!')\n#\n# layout = QVBoxLayout()\n# layout.addWidget(self.lbl)\n# layout.addWidget(self.chx)\n# layout.addWidget(self.btn)\n#\n# self.setLayout(layout)\n#\n# self.btn.clicked.connect(lambda :self.btn_clk(self.chx.isChecked(), self.lbl))\n#\n# self.show()\n#\n# def btn_clk(self, chk, lbl):\n# if chk:\n# lbl.setText('I guess you like dog s')\n# else:\n# lbl.setText('Dog hater then')\n#\n#\n# app = QApplication(sys.argv)\n# a_window = Window()\n# sys.exit(app.exec_())\n#\n#\n#\n#\n#\n\n\n\n\n\n\n# import sys\n# from PyQt5.QtWidgets import (QLineEdit, QSlider, QPushButton, QVBoxLayout, QApplication, QWidget)\n# from PyQt5.QtCore import Qt\n#\n# class Window(QWidget):\n#\n# \tdef __init__(self):\n# \t\tsuper().__init__()\n#\n# \t\tself.init_ui()\n#\n#\n# \tdef init_ui(self):\n# \t\tself.setWindowTitle('Title window')\n#\n# \t\tself.line_edit = QLineEdit()\n# \t\t# self.line_\n# \t\tself.button1 = QPushButton('Clear')\n# \t\tself.button2 = QPushButton('Print into console')\n# \t\tself.slider1 = QSlider(Qt.Horizontal)\n# \t\tself.slider1.setMinimum(1)\n# \t\tself.slider1.setMaximum(99)\n# \t\tself.slider1.setValue(25)\n# \t\tself.slider1.setTickInterval(5)\n# \t\tself.slider1.setTickPosition(QSlider.TicksBelow)\n# \t\t# self.slider1.setVisible(False)\n#\n#\n# \t\tv_box = QVBoxLayout()\n# \t\tv_box.addWidget(self.line_edit)\n# \t\tv_box.addWidget(self.button2)\n# \t\tv_box.addWidget(self.button1)\n# \t\tv_box.addWidget(self.slider1)\n#\n# \t\tself.setLayout(v_box)\n#\n# \t\t# self.button1.clicked.connect(self.button_clk)\n# \t\t# self.button2.clicked.connect(self.button_clk)\n# \t\tself.button1.clicked.connect(lambda: self.button_click(self.button1, 'Hellow 1'))\n# \t\tself.button2.clicked.connect(lambda: self.button_click(self.button2, 'Hellow 2'))\n# \t\tself.slider1.valueChanged.connect(self.v_change)\n#\n# \t\t# my_value = str(self.line_edit.cursorPosition())\n# \t\t# self.line_edit.setText((my_value))\n#\n#\n#\n# \t\tself.show()\n#\n#\n# \tdef button_click(self, button, string):\n# \t\tif button.text() == 'Print into console':\n# \t\t\tprint(self.line_edit.text())\n# \t\telse:\n# \t\t\tself.line_edit.clear()\n# \t\t\tprint(type(string),' <---')\n# \t\t# my_value = str(self.line_edit.cursorPosition())\n# \t\t# self.line_edit.setText((my_value))\n#\n#\n# \tdef v_change(self):\n# \t\tmy_value = str(self.line_edit.cursorPosition())\n# \t\tself.line_edit.setText((my_value))\n#\n#\n#\n#\n# \t# self.button1 = QtWidgets.QPushButton('Push me')\n# \t# self.label1 = QtWidgets.QLabel('Look at me')\n# \t#\n# \t# h_box = QtWidgets.QHBoxLayout()\n# \t# h_box.addStretch()\n# \t# h_box.addWidget(self.label1)\n# \t# h_box.addStretch()\n# \t#\n# \t# v_box = QtWidgets.QVBoxLayout()\n# \t# v_box.addWidget(self.button1)\n# \t# v_box.addLayout(h_box)\n# \t#\n# \t# self.setLayout(v_box)\n# \t# self.setWindowTitle('Title by me')\n# \t#\n# \t# self.button1.clicked.connect(self.btn_click)\n# \t#\n# \t#\n#\n# \tdef button_clk(self):\n# \t\tsender = self.sender()\n# \t\tif sender.text() == 'Print into console':\n# \t\t\tprint(self.line_edit.text())\n# \t\t\tself.line_edit.setText('Printed')\n# \t\telse:\n# \t\t\tself.line_edit.clear()\n#\n#\n# \tdef btn_click(self):\n# \t\tself.label1.setText('I have been clicked')\n# \t\tprint('test')\n#\n#\n# app = QApplication(sys.argv)\n# a_window = Window()\n# sys.exit((app.exec_()))\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n#\n# # def window():\n# # \tapp = QtWidgets.QApplication(sys.argv)\n# # \tfirst_window = QtWidgets.QWidget()\n# # \tbutton1 = QtWidgets.QPushButton('Push me')\n# # \tlabel1 = QtWidgets.QLabel('Look at me')\n# #\n# # \th_box = QtWidgets.QHBoxLayout()\n# # \th_box.addStretch()\n# # \th_box.addWidget(label1)\n# # \th_box.addStretch()\n# #\n# # \tv_box = QtWidgets.QVBoxLayout()\n# # \tv_box.addWidget(button1)\n# # \tv_box.addLayout(h_box)\n# #\n# # \tfirst_window.setLayout(v_box)\n#\n# \t# v_box.addStretch()\n# \t# v_box.stretch()\n#\n#\n# \t# box1 = QtWidgets.QVBoxLayout()\n# \t# box1.addWidget(button1)\n# \t# box1.addWidget(label1)\n# \t# first_window.setLayout(box1)\n#\n#\n# \t# button1 = QtWidgets.QPushButton(first_window)\n# \t# button1.setText('push me')\n# \t# label1 = QtWidgets.QLabel(first_window)\n# \t# label2 = QtWidgets.QLabel(first_window)\n# \t# label1.setText('Label')\n# \t# label2.setPixmap(QtGui.QPixmap('A.png'))\n# \t# label1.move(250, 100)\n# \t# label2.move(200,150)\n# \t# first_window.setWindowTitle('First')\n# \t# first_window.setGeometry(0, 0, 300, 400)\n#\n#\n# \t# first_window.show()\n# \t# sys.exit((app.exec_()))\n#\n#\n#\n# # window()","sub_path":"ProgBasics_4/music_collector_mzgw/dicplayPyQt5.py","file_name":"dicplayPyQt5.py","file_ext":"py","file_size_in_byte":9494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"486923251","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n This module is used to print out the results after each race.\n\"\"\"\nimport json\nimport logging\n\nfrom collectData import Datastore\nfrom strategy import Betstore\nimport settledBets\n\nresultlogger = logging.getLogger(__name__)\nresultlogger.setLevel(logging.INFO)\n\nclass Results(object):\n def __init__(self, message, **kwargs):\n self.message = message\n self.kwargs = kwargs\n\n def __str__(self, *args, **kwargs):\n return '%s' % json.dumps(self.kwargs)\n\n\ndef print_results(marketId, selectionId):\n \"\"\"\n Prints the results from the race to the log\n \"\"\"\n _ = Results\n curBalance = settledBets.CurrentBalance(betfairClient)\n resultlogger.info(\"=== RACE RESULTS for %s ===\", marketId)\n resultlogger.info(_(race_name=Datastore[marketId]['marketName'],\n race_time=Datastore[marketId]['openTime'], #open time or market time?\n horse_selected=Datastore[marketId][selectionId]['HorseName'],\n cur_price=Betstore[marketId][selectionId]['curPrice'],\n cur_volume=Betstore[marketId][selectionId]['curVolume'],\n bet_type=0,\n cur_balance=curBalance + Betstore['curLiability'],\n cur_Profit=curBalance - Betstore['InitialBalance'],\n high_balance=Betstore['BalanceHigh'],\n balance_aim=Betstore['curPotential'],\n strat_high=Betstore['stratHigh'],\n strat_low=Betstore['stratLow'],\n risk_multi=Betstore['riskMulti'],\n cur_loss=max(0, Betstore['stratHigh'] - Betstore['Balance'] +\n Betstore['curLiability']),\n max_risk=Betstore[marketId][selectionId]['maxRisk'],\n min_risk=Betstore[marketId][selectionId]['minRisk']))\n resultlogger.info(\"=== END RACE RESULTS for %s\", marketId)","sub_path":"results.py","file_name":"results.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"100895377","text":"#!/usr/bin/python3\n\nimport sys\n\n# hex ed\n\n# n, ne, se, s, sw, nw\n\n# given path child took, calculate fewest steps to reach them.\n\n# 1. remove opposing steps (n<-->s)\n# 2. merge two-away steps (n + se = ne, etc)\n# 3. remove opposing steps again?\n\nstepOrder = ['n', 'ne', 'se', 's', 'sw', 'nw']\n\n\ndef removeOpposing(path):\n half = len(stepOrder)//2\n\n for dir in range(0, half):\n cancelled = min(path[stepOrder[dir]], path[stepOrder[dir+half]])\n path[stepOrder[dir]] -= cancelled\n path[stepOrder[dir+half]] -= cancelled\n\n return path\n\n\ndef mergeSemiAdjacent(path):\n numSteps = len(stepOrder)\n\n for dir in range(0, numSteps):\n merged = min(path[stepOrder[dir]], path[stepOrder[(dir+2) % numSteps]])\n path[stepOrder[dir]] -= merged\n path[stepOrder[(dir+1) % numSteps]] += merged\n path[stepOrder[(dir+2) % numSteps]] -= merged\n\n return path\n\n\ndef calculateFarthest(stepstr):\n path = {key: 0 for key in stepOrder}\n farthest = 0\n\n for step in stepstr.rstrip().split(','):\n path[step] += 1\n currdist = getShortestPath(path)\n if currdist > farthest:\n farthest = currdist\n\n return farthest\n\n\ndef lenPath(path):\n return sum(path[dir] for dir in stepOrder)\n\n\ndef getShortestPath(path):\n oldpath = 999999999\n while True:\n path = removeOpposing(path)\n path = mergeSemiAdjacent(path)\n lenpath = lenPath(path)\n if lenpath == oldpath:\n break\n oldpath = lenpath\n return lenpath\n\n\ntt = {\n 'ne,ne,ne': 3,\n 'ne,ne,sw,sw': 2,\n 'ne,ne,s,s': 2,\n 'se,sw,se,sw,sw': 3\n}\nfor k, v in tt.items():\n result = calculateFarthest(k)\n if result != v:\n print(\"FAIL: input \", k, \": expected \", v, \", got \", result, sep=\"\")\n\n# The input is not checked for sanity, just existence.\nstepstrs = sys.stdin.readlines()\nif len(stepstrs) == 0:\n print(\"stepstrs missing!\")\n sys.exit(1)\n\nprint(calculateFarthest(stepstrs[0]))\n","sub_path":"Day11b/Day11b.py","file_name":"Day11b.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"280690499","text":"#TO SAVE PHOTOS\n\nimport cv2\nimport os\nimport time\n# Load the cascade\nface_cascade=cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\n\nid=input()\n# To capture video from webcam. \ncap = cv2.VideoCapture(1)\n# To use a video file as input \n# cap = cv2.VideoCapture('filename.mp4')\ncount=0\nwhile True:\n # Read the frame\n _, img = cap.read()\n # Convert to grayscale\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # Detect the faces\n faces = face_cascade.detectMultiScale(gray, 1.3, 3)\n \n # Draw the rectangle around each face\n for (x, y, w, h) in faces:\n cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img,\"user.\"+id+\".\"+str(count),(x,y), font,1,(255,0,0),2,cv2.LINE_AA)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n img_item=\"user.\"+id+\".\"+str(count)+\".png\"\n \n #press c to capture photo\n if cv2.waitKey(2) & 0xFF==ord('c'):\n if(count<25 ):\n\n path = r\"C:\\Users\\dk887\\Desktop\\Robot\\dataSet\"\n cv2.imwrite(os.path.join(path ,img_item),roi_color)\n count+=1\n \n \n # Display\n cv2.imshow('img', img)\n\n # Stop if escape key is pressed\n if cv2.waitKey(2) & 0xFF==ord('q'):\n break\n if(count==25):\n break\n# Release the VideoCapture object\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"capture_photo.py","file_name":"capture_photo.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"106537238","text":"# The number 6 is a truly great number. Given two int values, a and b, return True if either one is 6. Or if their sum or difference is 6. Note: the function abs(num) computes the absolute value of a number.\r\n\r\n\r\n# love6(6, 4) → True\r\n# love6(4, 5) → False\r\n# love6(1, 5) → True\r\n\r\ninp_a = int(input(\"Enter Value Of 'A' : \"))\r\ninp_b = int(input(\"Enter Value Of 'B' : \"))\r\nc = inp_a + inp_b \r\nif ((inp_a == 6 or inp_b == 6)):\r\n print('True') \r\nelif (c == 6 ):\r\n print(\"True\")\r\nelse :\r\n print(c)","sub_path":"Love6.py","file_name":"Love6.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"153445398","text":"from django import forms\nfrom .models import Car,User,Seller,Buyer\nfrom django.forms.utils import ValidationError\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.db import transaction\n\nclass Car_Part_Form(forms.ModelForm):\n class Meta:\n model=Car\n fields=[\n 'Car_name',\n 'Car_model',\n 'Car_Part_Name',\n 'Car_Part_Info',\n 'Car_Part_Cat',\n 'Car_Part_Discription',\n 'Owner_info',\n ]\n# seller signup form \nclass SellerSignUpFrom(UserCreationForm):\n class Meta(UserCreationForm.Meta):\n model=User\n def save(self, commit=True):\n user = super().save(commit=False)\n user.is_seller= True\n user.save()\n if commit:\n user.save()\n return user\n\n# user signup from\nclass BuyerSignupForm(UserCreationForm):\n #email\n class Meta(UserCreationForm.Meta):\n model = User\n \n @ transaction.atomic\n def save(self):\n user=super().save(commit=False)\n user.is_user=True\n user.save()\n user=Buyer.objects.create(user=user)\n #email\n return user\n # car=Car.objects.create(user=user)\n # car.interests.add(*self.cleaned_data.get('interests'))","sub_path":"Car/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"367325883","text":"from django.urls import path\nfrom main.views import contact, about, commentview, commentconfirm, Commview\n\napp_name = 'met'\nurlpatterns = [\n path('contact/', contact, name='con'),\n path('comment/', commentview, name='com'),\n path('about/', about, name='about'),\n path('com/', commentconfirm, name='conconfirm'),\n path('commview/', Commview.as_view(), name='commview'),\n\n\n]","sub_path":"main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"309216913","text":"import random\n\nrock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\nchoices = [\"rock\", \"paper\", \"scissors\"]\n\nnum = random.randint(0,2)\n\nplayer = input(\"Type 'rock' 'paper' or 'scissors'\\n\")\n\ncomputer = choices[num]\n\n\nif computer == \"paper\":\n print(f\"computer selected {computer}\")\n print(paper)\nif player == \"paper\":\n print(f\"player selected {player}\")\n print(paper)\nif computer == \"rock\":\n print(f\"computer selected {computer}\")\n print(rock)\nif player == \"rock\":\n print(f\"player selected {player}\")\n print(rock)\nif computer == \"scissors\":\n print(f\"computer selected {computer}\")\n print(scissors)\nif player == \"scissors\":\n print(f\"player selected {player}\")\n print(scissors)\n\nif player == \"rock\" and computer == \"paper\":\n print(\"computer wins\")\nelif player == \"rock\" and computer == \"rock\":\n print(\"Its a Tie\")\nelif player == \"paper\" and computer == \"rock\":\n print(\"player wins\")\nelif player == \"rock\" and computer == \"scissors\":\n print(\"player wins\")\nelif player == \"scissors\" and computer == \"rock\":\n print(\"computer wins\")\nelif player == \"scissors\" and computer == \"scissors\":\n print(\"its a tie\")\nelif player == \"paper\" and computer == \"scissors\":\n print(\"computer wins\")\nelif player == \"paper\" and computer == \"paper\":\n print(\"its a tie\")\nelif player == \"scissors\" and computer == \"paper\":\n print(\"player wins\")\nelif player != \"rock\" or player != \"paper\" or player != \"scissors\":\n print(\"You Lose\\nYou should enter 'rock, paper, or scissors'\")\n","sub_path":"RockPaperScissors.py","file_name":"RockPaperScissors.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"32919639","text":"#!/usr/bin/python\n\nfrom mininet.topo import Topo\nfrom mininet.net import Mininet\nfrom mininet.node import Controller, RemoteController\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel\n\nimport pprint\n\nclass host2host(Topo):\n\n def build( self ):\n \"Build a host2host topology\"\n\n # Create the mesh\n ippref = \"10.1.1.1\"\n\n h1 = self.addHost( \"h1\", ip=\"10.1.1.1/24\",\n mac=\"00:00:00:00:00:11\" )\n h2 = self.addHost( \"h2\", ip=\"10.1.1.2/24\",\n mac=\"00:00:00:00:00:22\" )\n\n self.addLink( h1, h2 )\n return\n\nif __name__ == '__main__':\n setLogLevel( 'info' )\n \n topo = host2host()\n net = Mininet( controller=lambda a: RemoteController(a, ip='127.0.0.1'),\n topo=topo )\n net.start()\n CLI( net )\n net.stop();\n","sub_path":"src/mcast-dev-test/topos/host2host.py","file_name":"host2host.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"630421503","text":"from bge import logic, types\nfrom collections import namedtuple\nfrom contextlib import contextmanager\n\nfrom network.decorators import with_tag\nfrom network.signals import SignalListener\nfrom network.tagged_delegate import FindByTag\n\nfrom game_system.animation import Animation\nfrom game_system.coordinates import Vector\nfrom game_system.definitions import ComponentLoader\nfrom game_system.enums import AnimationMode, AnimationBlend, Axis, CollisionState, PhysicsType\nfrom game_system.signals import CollisionSignal, UpdateCollidersSignal\n\nfrom functools import partial\n\nfrom .geometry.mesh.navmesh import BGENavmesh\n\n\nRayTestResult = namedtuple(\"RayTestResult\", \"position normal entity distance\")\nCollisionResult = namedtuple(\"CollisionResult\", \"entity state contacts\")\nCollisionContact = namedtuple(\"CollisionContact\", \"position normal impulse force\")\n\n\nclass BGESocket:\n\n def __init__(self, name, parent, obj):\n self.name = name\n self._parent = parent\n self._obj = obj\n self.children = set()\n\n\nclass BGEComponent(FindByTag):\n subclasses = {}\n\n def destroy(self):\n pass\n\n# TODO use enums here\n\n@with_tag(\"physics\")\nclass BGEPhysicsInterface(BGEComponent):\n\n def __init__(self, config_section, entity, obj):\n self._obj = obj\n self._entity = entity\n\n self._new_collisions = set()\n self._old_collisions = set()\n self._dispatched = set()\n self._dispatched_entities = set()\n\n # Physics type\n # physics_constants = {logic.KX_PHYSICS_STATIC: PhysicsType.static,\n # logic.KX_PHYSICS_DYNAMIC: PhysicsType.dynamic,\n # logic.KX_PHYSICS_RIGID_BODY: PhysicsType.rigid_body,\n # logic.KX_PHYSICS_SOFT_BODY: PhysicsType.soft_body,\n # logic.KX_PHYSICS_OCCLUDER: PhysicsType.occluder,\n # logic.KX_PHYSICS_SENSOR: PhysicsType.sensor,\n # logic.KX_PHYSICS_NAVIGATION_MESH: PhysicsType.navigation_mesh,\n # logic.KX_PHYSICS_CHARACTER: PhysicsType.character,\n # logic.KX_PHYSICS_NO_COLLISION: PhysicsType.no_collision}\n\n if getattr(obj, \"meshes\", None) and 0:\n self._physics_type = physics_constants[obj.physicsType]\n\n else:\n self._physics_type = PhysicsType.no_collision\n\n # Collisions/\n self.has_dynamics = not self._physics_type in (PhysicsType.navigation_mesh, PhysicsType.no_collision)\n if self.has_dynamics:\n obj.collisionCallbacks.append(self._on_collision)\n\n # Used for raycast lookups\n obj['physics_component'] = self\n\n @staticmethod\n def entity_from_object(obj):\n try:\n component = obj[\"physics_component\"]\n\n except KeyError:\n return\n\n return component._entity\n\n @property\n def collision_group(self):\n return 0#self._obj.collisionGroup\n\n @property\n def collision_mask(self):\n return 0#self._obj.collisionMask\n\n @property\n def physics(self):\n \"\"\"The physics type of this object\n\n :returns: physics type of object, see :py:class:`bge_game_system.enums.PhysicsType`\n \"\"\"\n return self._physics_type\n\n @property\n def world_velocity(self):\n if not self.has_dynamics:\n return Vector()\n\n return self._obj.worldLinearVelocity\n\n @world_velocity.setter\n def world_velocity(self, velocity):\n if not self.has_dynamics:\n return\n\n self._obj.worldLinearVelocity = velocity\n\n @property\n def world_angular(self):\n if not self.has_dynamics:\n return Vector()\n\n return self._obj.worldLinearVelocity\n\n @world_angular.setter\n def world_angular(self, velocity):\n if not self.has_dynamics:\n return\n\n self._obj.worldAngularVelocity = velocity\n\n def ray_test(self, target, source=None, distance=0.0):\n \"\"\"Perform a ray trace to a target\n\n :param target: target to trace towards\n :param source: optional origin of trace, otherwise object position\n :param distance: distance to use instead of vector length\n :rtype: :py:class:`bge_game_system.object_types.RayTestResult`\n \"\"\"\n if source is None:\n source = self._obj.worldPosition\n\n result = self._obj.rayCast(target, source, distance)\n\n if not any(result):\n return None\n\n hit_bge_object, hit_position, hit_normal = result\n hit_entity = self.entity_from_object(hit_bge_object)\n hit_distance = (hit_position - source).length\n\n return RayTestResult(hit_position, hit_normal, hit_entity, hit_distance)\n\n @staticmethod\n def _convert_contacts(contacts):\n return [CollisionContact(c.hitPosition, c.hitNormal, c.hitImpulse, c.hitForce) for c in contacts]\n\n def _on_collision(self, other, data):\n self._new_collisions.add(other)\n\n if other in self._dispatched:\n return\n\n hit_entity = self.entity_from_object(other)\n\n self._dispatched.add(other)\n\n if hit_entity:\n self._dispatched_entities.add(hit_entity)\n\n hit_contacts = self._convert_contacts(data)\n result = CollisionResult(hit_entity, CollisionState.started, hit_contacts)\n\n CollisionSignal.invoke(result, target=self._entity)\n\n @UpdateCollidersSignal.global_listener\n def _update_collisions(self):\n # If we have a stored collision\n ended_collisions = self._old_collisions.difference(self._new_collisions)\n self._old_collisions, self._new_collisions = self._new_collisions, set()\n\n if not ended_collisions:\n return\n\n callback = CollisionSignal.invoke\n ended_collision = CollisionState.ended\n\n entity = self._entity\n for obj in ended_collisions:\n self._dispatched.remove(obj)\n\n if not obj.invalid:\n hit_entity = self.entity_from_object(obj)\n\n if hit_entity:\n self._dispatched_entities.remove(hit_entity)\n\n result = CollisionResult(hit_entity, ended_collision, None)\n callback(result, target=entity)\n\n\n@with_tag(\"transform\")\nclass BGETransformInterface(BGEComponent, SignalListener):\n \"\"\"Physics implementation for BGE entity\"\"\"\n\n def __init__(self, config_section, entity, obj):\n self._obj = obj\n self._entity = entity\n\n self._parent = None\n self.children = set()\n\n self.create_sockets()\n self.register_signals()\n\n @property\n def parent(self):\n return self._parent\n\n @parent.setter\n def parent(self, value):\n if value is self._parent:\n return\n\n self._parent.children.remove(self._entity)\n self._obj.removeParent()\n\n if value is None:\n return\n\n if not hasattr(value, \"_obj\"):\n raise TypeError(\"Invalid parent type {}\".format(value.__class__.__name__))\n\n self._obj.setParent(value._obj)\n value.children.add(self._entity)\n self._parent = value\n\n @property\n def world_position(self):\n return self._obj.worldPosition\n\n @world_position.setter\n def world_position(self, position):\n self._obj.worldPosition = position\n\n @property\n def world_orientation(self):\n return self._obj.worldOrientation.to_euler()\n\n @world_orientation.setter\n def world_orientation(self, orientation):\n self._obj.worldOrientation = orientation\n\n @property\n def is_colliding(self):\n return bool(self._dispatched)\n\n def align_to(self, vector, factor=1, axis=Axis.y):\n if not vector.length_squared:\n return\n\n if axis == Axis.x:\n forward_axis = \"X\"\n\n elif axis == Axis.y:\n forward_axis = \"Y\"\n\n elif axis == Axis.z:\n forward_axis = \"Z\"\n\n else:\n raise ValueError(\"Unknown Axis value: {}\".format(axis))\n\n rotation_quaternion = vector.to_track_quat(forward_axis, \"Z\")\n current_rotation = self.world_rotation.to_quaternion()\n self.world_rotation = current_rotation.slerp(rotation_quaternion, factor).to_euler()\n\n def create_sockets(self):\n self.sockets = set()\n\n for obj in self._obj.childrenRecursive:\n socket_name = obj.get(\"socket\")\n if not socket_name:\n continue\n\n socket = BGESocket(socket_name, self, obj)\n self.sockets.add(socket)\n\n def get_direction_vector(self, axis):\n \"\"\"Get the axis vector of this object in world space\n\n :param axis: :py:code:`bge_game_system.enums.Axis` value\n :rtype: :py:code:`mathutils.Vector`\n \"\"\"\n vector = [0, 0, 0]\n\n if axis == Axis.x:\n vector[0] = 1\n\n elif axis == Axis.y:\n vector[1] = 1\n\n elif axis == Axis.z:\n vector[2] = 1\n\n return Vector(self.object.getAxisVect(vector))\n\n def is_colliding_with(self, entity):\n \"\"\"Determines if the entity is colliding with another entity\n\n :param entity: entity to evaluate\n :returns: result of condition\n \"\"\"\n return entity in self._dispatched_entities\n\n\n@with_tag(\"animation\")\nclass BGEAnimationInterface(BGEComponent):\n \"\"\"Animation implementation for BGE entity\"\"\"\n\n def __init__(self, config_section, entity, obj):\n try:\n skeleton = next(o for o in obj.childrenRecursive if isinstance(obj, types.BL_ArmatureObject))\n\n except StopIteration:\n raise TypeError(\"Animation component requires Armature object\")\n\n self._obj = skeleton\n\n # Define conversions from Blender behaviours to Network animation enum\n self._bge_play_constants = {AnimationMode.play: logic.KX_ACTION_MODE_PLAY,\n AnimationMode.loop: logic.KX_ACTION_MODE_LOOP,\n AnimationMode.ping_pong: logic.KX_ACTION_MODE_PING_PONG}\n\n self._bge_blend_constants = {AnimationBlend.interpolate: logic.KX_ACTION_BLEND_BLEND,\n AnimationBlend.add: logic.KX_ACTION_BLEND_ADD}\n\n self.animations = self.load_animations(skeleton, config_section)\n\n @staticmethod\n def load_animations(obj, data):\n animations = {}\n\n for animation_name, animation_data in data.items():\n frame_info = animation_data['frame_info']\n start = frame_info['start']\n end = frame_info['end']\n\n modes = animation_data['modes']\n blend_mode = modes['blend']\n play_mode = modes['play']\n\n layer_data = animation_data['layers']\n layer = layer_data['layer']\n blending = layer_data['blending']\n weight = layer_data['weight']\n\n playback = animation_data['playback']\n priority = playback['priority']\n speed = playback['speed']\n\n callback = partial(obj.isPlayingAction, layer)\n\n animation = Animation(animation_name, start, end, layer, priority, blending, play_mode, weight, speed,\n blend_mode, callback)\n animations[animation_name] = animation\n\n return animations\n\n def get_frame(self, animation):\n \"\"\"Get the current frame of the animation\n\n :param animation: animation object\n \"\"\"\n return int(self._obj.getActionFrame(animation.layer))\n\n def play(self, animation):\n \"\"\"Play animation on bound object\n\n :param animation: animation resource\n \"\"\"\n play_mode = self._bge_play_constants[animation.mode]\n blend_mode = self._bge_blend_constants[animation.blend_mode]\n self._obj.playAction(animation.name, animation.start, animation.end, animation.layer, animation.priority,\n animation.blend, play_mode, animation.weight, speed=animation.speed, blend_mode=blend_mode)\n\n def stop(self, animation):\n \"\"\"Stop a playing animation on bound object\n\n :param animation: animation resource\n \"\"\"\n self._obj.stopAction(animation.layer)\n\n\n@with_tag(\"camera\")\nclass BGECameraInterface(BGEComponent):\n\n def __init__(self, config_section, entity, obj):\n self._obj = obj\n\n @contextmanager\n def active_context(self):\n camera = self._obj\n scene = camera.scene\n\n old_camera = scene.active_camera\n scene.active_camera = camera\n\n yield\n\n if old_camera:\n scene.active_camera = old_camera\n\n def is_point_in_frustum(self, point):\n \"\"\"Determine if a point resides in the camera frustum\n\n :param point: :py:code:`mathutils.Vector`\n :rtype: bool\n \"\"\"\n return self._obj.pointInsideFrustum(point)\n\n def is_sphere_in_frustum(self, point, radius):\n \"\"\"Determine if a sphere resides in the camera frustum\n\n :param point: :py:code:`mathutils.Vector`\n :param radius: radius of sphere\n :rtype: bool\n \"\"\"\n return self._obj.sphereInsideFrustum(point, radius) != self._obj.OUTSIDE\n\n def get_screen_direction(self, x=0.5, y=0.5):\n \"\"\"Find direction along screen vector\n\n :param x: screen space x coordinate\n :param y: screen space y coordinate\n \"\"\"\n return self._obj.getScreenRay(x, y)\n\n\n@with_tag(\"lamp\")\nclass BGELampInterface(BGEComponent):\n\n def __init__(self, config_section, entity, obj):\n self._obj = obj\n\n @property\n def colour(self):\n return self._obj.color\n\n @colour.setter\n def colour(self, colour):\n self._obj.color = colour\n\n @property\n def intensity(self):\n return self._obj.energy\n\n @intensity.setter\n def intensity(self, energy):\n self._obj.energy = energy\n\n\n@with_tag(\"navmesh\")\nclass BGENavmeshInterface(BGEComponent):\n\n def __init__(self, config_section, entity, obj):\n self._navmesh = BGENavmesh(obj)\n self._obj = obj\n\n self.find_node = self._navmesh.find_node\n self.nodes = self._navmesh.polygons\n\n\n@with_tag(\"BGE\")\nclass BGEComponentLoader(ComponentLoader):\n\n def __init__(self, *component_tags):\n self.component_tags = component_tags\n self.component_classes = {tag: BGEComponent.find_subclass_for(tag) for tag in component_tags}\n\n @classmethod\n def create_object(cls, config_parser):\n scene = logic.getCurrentScene()\n\n object_name = config_parser['object_name']\n assert object_name in scene.objectsInactive, (object_name, scene.objectsInactive)\n return scene.addObject(object_name, object_name)\n\n def load(self, entity, config_parser):\n obj = self.create_object(config_parser)\n components = self._load_components(config_parser, entity, obj)\n return LoaderResult(components, obj)\n\n\nclass LoaderResult:\n\n def __init__(self, components, obj):\n self._obj = obj\n self.components = components\n\n def unload(self):\n for component in self.components.values():\n component.destroy()\n\n self._obj.endObject()","sub_path":"bge_game_system/definitions.py","file_name":"definitions.py","file_ext":"py","file_size_in_byte":15189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552989264","text":"import requests\r\n\r\napi_key=str(input(\"Enter your API KEY: \"))\r\nplace=input(\"Enter your City name: \")\r\n\r\ncomplete_api_link=\"https://api.openweathermap.org/data/2.5/weather?q=\"+place+\"&appid=\"+api_key\r\napi_link=requests.get(complete_api_link)\r\napi_data=api_link.json()\r\n\r\n\r\ntemperature_of_city=((api_data['main']['temp'])-273.15)\r\nweather_description=api_data['weather'][0]['description']\r\nhumidity_of_city=api_data['main']['humidity']\r\nwind_speed=api_data['wind']['speed']\r\n\r\nprint(\"*****************************************************\")\r\nprint(\"Weather report for the location of {}\".format(place.upper()))\r\nprint(\"*****************************************************\")\r\n\r\nr=open('report.txt','a')\r\n\r\ntemp=str(temperature_of_city)\r\nwd=str(weather_description)\r\nh=str(humidity_of_city)\r\nws=str(wind_speed)\r\nprint(\"The fetched data's are uploaed to the file named report.txt\")\r\nr.write(\"Current temprature is: \")\r\nr.write(temp+\"deg in C \\n\")\r\nr.write(\"Current weather_description is: \")\r\nr.write(wd+\"\\n\")\r\nr.write(\"Current humidity is: \")\r\nr.write(h+\"% \\n\")\r\nr.write(\"Current wind_speed is: \")\r\nr.write(ws+\"kmph \\n\")\r\nprint(\"Printing completed , Check out the file report.txt in this same location\")\r\n\r\nr.close()\r\n","sub_path":"Surendhiraraj S - Project ( Shape AI)/Surendhiraraj S - Project ( Shape AI).py","file_name":"Surendhiraraj S - Project ( Shape AI).py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"261496010","text":"from opengever.base.model import Session\nfrom opengever.base.oguid import Oguid\nfrom opengever.base.sqlsyncer import SqlSyncer\nfrom opengever.meeting.model.proposal import Proposal\n\n\nclass ProposalSqlSyncer(SqlSyncer):\n\n def get_proposal(self):\n \"\"\"Return or create the corresponding proposal model.\"\"\"\n\n oguid = Oguid.for_object(self.obj)\n proposal = Proposal.query.get_by_oguid(oguid)\n if proposal is None:\n proposal = Proposal.create_from(self.obj)\n Session.add(proposal)\n return proposal\n\n def sync_with_sql(self):\n self.get_proposal().sync_with_proposal(self.obj)\n\n\nclass SubmittedProposalSqlSyncer(SqlSyncer):\n\n def get_proposal(self):\n \"\"\"Return the corresponding proposal model.\"\"\"\n\n oguid = Oguid.for_object(self.obj)\n return Proposal.query.filter_by(submitted_oguid=oguid).one()\n\n def sync_with_sql(self):\n self.get_proposal().sync_with_submitted_proposal(self.obj)\n","sub_path":"opengever/meeting/proposalsqlsyncer.py","file_name":"proposalsqlsyncer.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"344922070","text":"import yfinance as yf\nimport io\n\n\nclass yahoo:\n def getMA(self, symbolId, days=\"1mo\"):\n symbolId = symbolId+\".TW\"\n stock = yf.Ticker(symbolId)\n hist = stock.history(period=days)\n return round(sum([int(data) for data in hist[\"Close\"]])/len(hist[\"Close\"]), 2)\n\n def getHistory(self, symbolId, days=\"1d\"):\n symbolId = symbolId+\".TW\"\n stock = yf.Ticker(symbolId)\n if days != \"1d\":\n hist = stock.history(period=days)\n else:\n hist = stock.history(period=\"1d\", interval=\"1m\")\n return hist\n\n def getTodayPrice(self, symbolId):\n data = self.getHistory(symbolId)\n symbolId_tw = symbolId+\".TW\"\n stock = yf.Ticker(symbolId_tw)\n PreClose = stock.info[\"previousClose\"]\n current_price = data.iloc[-1, data.columns.get_loc(\"High\")]\n if PreClose > current_price:\n graph = data.plot(y='High', color='green')\n elif PreClose == current_price:\n graph = data.plot(y='High', color='gray')\n else:\n graph = data.plot(y='High', color='red')\n\n graph.axhline(PreClose, linestyle='dashed',\n color='xkcd:dark grey', alpha=0.6, label='參考價', marker='')\n graph.legend().set_visible(False)\n fig = graph.get_figure()\n buf = io.BytesIO()\n fig.savefig(buf, format='png')\n ret = dict()\n ret['Name'] = stock.info[\"shortName\"]\n ret['RealPrice'] = current_price\n ret['ID'] = symbolId\n ret['photo'] = buf\n return ret\n\n\nif __name__ == '__main__':\n hp = yahoo()\n # ma = hp.getMA(\"2454\", \"1mo\")\n # print(ma)\n # hist = hp.getHistory(\"2454\")\n # print(hist)\n hp.getTodayPrice(\"2454\")\n","sub_path":"DataProvider/yahoo.py","file_name":"yahoo.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"181547512","text":"from rest_framework.views import APIView\nfrom users.Api.api_register import register\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom buyer.Api.api_register_buyer import register_buyer_user\n\n\nclass RegisterBuyer(APIView):\n\n \n def post(self, request):\n\n try:\n custom_user_data = {\n 'email' : request.data.get('email'),\n 'password' : request.data.get('password'),\n 'username' : request.data.get('username'),\n }\n\n custom_user_register = register(custom_user_data)\n print(custom_user_register)\n if custom_user_register['error_code'] == 2:\n return Response(custom_user_register['serializer_msg'], status = status.HTTP_400_BAD_REQUEST)\n elif custom_user_register['error_code'] == 4:\n return Response(custom_user_register['serializer_msg'], status = status.HTTP_400_BAD_REQUEST)\n elif custom_user_register['error_code'] == 1 or custom_user_register['error_code'] == 3:\n \n couser_data = {\n 'user' : custom_user_register['serializer_msg'],\n 'name' : request.data.get('name'),\n 'about' : request.data.get('about'),\n 'address' : request.data.get('address'),\n 'city' : request.data.get('city'),\n 'country' : request.data.get('country'),\n 'image' : request.data.get('image'),\n 'area' : request.data.get('area'),\n }\n \n buyer_user_register = register_buyer_user(couser_data)\n \n if buyer_user_register['error_code'] == 2:\n return Response(buyer_user_register['serializer_msg'], status = status.HTTP_400_BAD_REQUEST)\n elif buyer_user_register['error_code'] == 4:\n return Response(buyer_user_register['serializer_msg'], status = status.HTTP_400_BAD_REQUEST)\n elif buyer_user_register['error_code'] == 1:\n return Response(buyer_user_register['serializer_msg'], status = status.HTTP_400_BAD_REQUEST)\n else:\n return Response(buyer_user_register['serializer_msg'], status = status.HTTP_201_CREATED)\n\n\n\n except Exception as e:\n return Response(str(e), status = status.HTTP_400_BAD_REQUEST)\n","sub_path":"buyer/Api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"376315688","text":"import uuid\n\nimport six\n\nfrom cloudbridge.cloud.interfaces import MachineImageState\nfrom test.helpers import ProviderTestBase\nimport test.helpers as helpers\n\n\nclass CloudImageServiceTestCase(ProviderTestBase):\n\n def __init__(self, methodName, provider):\n super(CloudImageServiceTestCase, self).__init__(\n methodName=methodName, provider=provider)\n\n def test_create_and_list_image(self):\n \"\"\"\n Create a new image and check whether that image can be listed.\n This covers waiting till the image is ready, checking that the image\n name is the expected one and whether list_images is functional.\n \"\"\"\n instance_name = \"CBImageTest-{0}-{1}\".format(\n self.provider.name,\n uuid.uuid4())\n net, _ = helpers.create_test_network(self.provider, instance_name)\n test_instance = helpers.get_test_instance(self.provider, instance_name,\n network=net)\n with helpers.cleanup_action(lambda: helpers.cleanup_test_resources(\n test_instance, net)):\n name = \"CBUnitTestListImg-{0}\".format(uuid.uuid4())\n test_image = test_instance.create_image(name)\n\n def cleanup_img(img):\n img.delete()\n img.wait_for(\n [MachineImageState.UNKNOWN, MachineImageState.ERROR])\n\n with helpers.cleanup_action(lambda: cleanup_img(test_image)):\n test_image.wait_till_ready()\n\n self.assertTrue(\n test_instance.id in repr(test_instance),\n \"repr(obj) should contain the object id so that the object\"\n \" can be reconstructed, but does not.\")\n\n self.assertTrue(\n test_image.description is None or isinstance(\n test_image.description, six.string_types),\n \"Image description must be None or a string\")\n\n images = self.provider.compute.images.list()\n list_images = [image for image in images\n if image.name == name]\n self.assertTrue(\n len(list_images) == 1,\n \"List images does not return the expected image %s\" %\n name)\n\n # check iteration\n iter_images = [image for image in self.provider.compute.images\n if image.name == name]\n self.assertTrue(\n len(iter_images) == 1,\n \"Iter images does not return the expected image %s\" %\n name)\n\n # find image\n found_images = self.provider.compute.images.find(name=name)\n self.assertTrue(\n len(found_images) == 1,\n \"Find images error: expected image %s but found: %s\" %\n (name, found_images))\n\n # check non-existent find\n ne_images = self.provider.compute.images.find(\n name=\"non_existent\")\n self.assertTrue(\n len(ne_images) == 0,\n \"Find() for a non-existent image returned %s\" %\n ne_images)\n\n get_img = self.provider.compute.images.get(\n test_image.id)\n self.assertTrue(\n found_images[0] == iter_images[0] == get_img == test_image,\n \"Objects returned by list: {0} and get: {1} are not as \"\n \" expected: {2}\" .format(found_images[0].id,\n get_img.id,\n test_image.id))\n self.assertTrue(\n list_images[0].name == found_images[0].name ==\n get_img.name == test_image.name,\n \"Names returned by list: {0}, find: {1} and get: {2} are\"\n \" not as expected: {3}\" .format(list_images[0].name,\n found_images[0].name,\n get_img.name,\n test_image.name))\n # TODO: Images take a long time to deregister on EC2. Needs\n # investigation\n images = self.provider.compute.images.list()\n found_images = [image for image in images\n if image.name == name]\n self.assertTrue(\n len(found_images) == 0,\n \"Image %s should have been deleted but still exists.\" %\n name)\n","sub_path":"test/test_image_service.py","file_name":"test_image_service.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"278152597","text":"from ngs.base.dataset import image_files_dataset, CrossDataset\nfrom ngs.base.transforms import ImageResizeToSquare\nfrom ngs.image.cyclegan import Pix2PixDenseG, CycleGANTrainer\nfrom .base.system import NNSystem\nfrom .image.gan import *\n\n\nclass TachieSystem(NNSystem):\n def __init__(self, context: Context):\n super().__init__(\"tachie\", context)\n self.new_child(\"cg2nude\", CG2NudeSystem)\n\n\nclass CG2NudeSystem(NNSystem):\n def __init__(self, name: str, context: Context):\n super().__init__(name, context)\n # domain A: CG\n # domain B: Nude\n self.image_side = 192\n self.input_image_size = (self.image_side, self.image_side)\n self._cg2nude = Pix2PixDenseG()\n self._cg2nude_criti = SNDenseD(self.input_image_size)\n self._nude2cg = Pix2PixDenseG()\n self._nude2cg_criti = SNDenseD(self.input_image_size)\n\n def _train(self):\n trainer = CycleGANTrainer(\n net_g_a=self._nude2cg,\n net_d_a=self._nude2cg_criti,\n net_g_b=self._cg2nude,\n net_d_b=self._cg2nude_criti,\n topic=self.name,\n context=self._context,\n adv_loss=GANLoss.RelativeStandard,\n config={\n 'checkpoint': {\n 'every_step': 100\n }\n }\n )\n\n dataset_cg_path = self._config.get(\"dataset.domain_a\", fallback=None)\n dataset_nude_path = self._config.get(\"dataset.domain_b\", fallback=None)\n # val_dataset_path = self._config.get(\"val_dataset\", fallback=None)\n\n image_transforms = transforms.Compose([\n ImageResizeToSquare(self.image_side),\n image_to_tensor\n ])\n\n dataset_cg = TransformDataset(image_files_dataset(dataset_cg_path), image_transforms)\n dataset_nude = TransformDataset(image_files_dataset(dataset_nude_path), image_transforms)\n\n cycle_dataset = CrossDataset(dataset_cg, dataset_nude)\n\n # val_dataset = TransformDataset(image_files_dataset(val_dataset_path), image_transforms)\n\n task = trainer.new_task(cycle_dataset, None)\n task.execute()\n\n self.save_params(\"cg2nude\")\n self.save_params(\"nude2cg\")\n","sub_path":"ngs/tachie.py","file_name":"tachie.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"50674287","text":"import tweepy\nimport json\nfrom tweepy import OAuthHandler\n \ndef process_or_store(tweet):\n# a=json.dumps(tweet)\n# print(json.dumps(tweet, sort_keys=True, indent=4))\n\ta=json.loads(json.dumps(tweet))\n#\tprint a['text'].encode('ascii', 'ignore')\n# print \"a:\", a[0] \n# line_object = json.loads(tweet)\n# print \"Line:\", line\n\twith open('python.json', 'a') as f:\n f.write(json.dumps(tweet))\n return True\n\n\nconsumer_key = 't5AGKRMAP9jxKLiHIBeIOWROx'\nconsumer_secret = 'j6IWkW3fEN0JkckZty9adylFMEFUsNvlKo2iCU7yX8gbbhoLdf'\naccess_token = '55853810-LmmIHxxlzD0AAT1sQ4c3mbAbZHXDoNqTyFp2EQQMY'\naccess_secret = 'ZRh5zEwJ8aID1cunNgkt4vnl18Kh6Q0gd5mZBrMklO8xN'\n \nauth = OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_secret)\n \napi = tweepy.API(auth)\n#tweet=api.search(q=\"@VirginAmerica\")\n\n#print(json.dumps(tweet))\nfor tweet in tweepy.Cursor(api.search,q=\"@VirginAmerica\").items():\n#for tweet in tweepy.Cursor(api.search(q=\"@VirginAmerica\")).items():\n process_or_store(tweet._json)\n # Process a single status\n# print(status.text)\n\n","sub_path":"tweet_stream/tweet.py","file_name":"tweet.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"352167100","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport uncertainties.unumpy as unp\nimport scipy.constants as con\nfrom scipy.optimize import curve_fit\nfrom scipy import stats\nfrom uncertainties import ufloat\n\n################################################################################\nprint('\\n' + '(I) Bestimmung der Wellenlänge')\nN, d = np.genfromtxt('data/1.txt', unpack=True)\nü = 5.017 # Hebelübersetzung\nd = d * 10**6 # µm\n# d = np.mean(d)\n# print(d)\n# N = np.mean(N)\n\nl = (2*d)/(N * ü) # µm; Bestimmte Wellenlänge\nprint(l)\nl = ufloat(np.mean(l), stats.sem(l))\nprint('Bestimmte Wellenlaenge des Lasers: ', l, 'nm')\n\n\n\n################################################################################\nprint('\\n\\n' + '(II) Brechungsindizes')\nN2, p = np.genfromtxt('data/2.txt', unpack=True)\n################################\np0 = 1.01325 # bar\nT0 = 273.15 # K\nT = 293.15 # K\nb = 50*10**(-3) # m\n################################\nlr = 635 * 10**(-9) # nm; Wellenlänge des verwendeten Lasers.\n\n# N2 = np.mean(N2)\ndn = (N2 * lr) / (2 * b)\n# p = np.mean(p)\np = p * (-1)\nprint('Delta_p = ', p)\nnn = 1 + dn * (T / T0) * (p0 / p)\nprint(nn)\nnn = ufloat(np.mean(nn), stats.sem(nn))\nprint('n = ', nn)\n\n\n################################ DISKUSSION ####################################\nprint('\\n\\n' + '(III) Abweichungen von der Theorie')\n\nlr = 635 # nm\nnluft = 1.000292\n# http://www.didaktik.physik.uni-duisburg-essen.de/~backhaus/NaturPhysikalisch/Naturphysikalischgesehen2004/OptischePhaenomene/Regenbogen/Brechungsindizes.htm\n###################\n\n# l = l * 10**3 # nm\ndl = (l - lr) / lr\nprint('Delta l: ', dl)\n\nnluft = nluft - 1\nnn = nn - 1\nprint(nn)\ndnn = (nn - nluft) / nluft\nprint('Delta nn: ', dnn)\n","sub_path":"SoSe/V401/plots/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"463857177","text":"'''\nuse this template for making basic argprase stuff this is not an example\n'''\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument(\"str1\", help=\"enter a string\")\nparser.add_argument(\"-n\",\"--num\", help=\"an optional number input\", type=int)\nargs = parser.parse_args()\nprint(args.str1)\nprint(args.num)","sub_path":"PythonLearn/Draps_tutorial/argprase/emptyArg.py","file_name":"emptyArg.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"324216416","text":"'''\r\nCreated on May 17, 2017\r\n@author: vimalk3\r\n'''\r\nimport os\r\nimport datetime\r\nimport csv\r\nfrom jinja2 import Template\r\nfrom jinja2 import Environment\r\n\r\ndef chunkstring(string, length):\r\n return (string[0 + i:length + i] for i in range(0, len(string), length))\r\n\r\n\r\nclass ParserMACT_CSV_File(object):\r\n '''\r\n This class is for binding csv file and file generation methods.\r\n from csv file data is collected in python lists\r\n Then these lists are rendered in .c & .h files templates.\r\n '''\r\n def __init__(self, arg_csvfile):\r\n '''\r\n Constructor CounterSyncMACT_Message_ID_List\r\n '''\r\n self.csvfile = arg_csvfile\r\n self.auth_mesg_macros_key = {}\r\n self.auth_mesg_macros = []\r\n self.mact_messageid_list = []\r\n self.mact_mac_length_list = []\r\n self.counter_sync_mact_messageid_list = []\r\n self.freshness_counter_tx_length_list = []\r\n self.key_slot_number_list = []\r\n self.mact_security_flag_list = []\r\n\r\n self.mact_generated_c_file = \"mact_generated.c\"\r\n self.mact_generated_h_file = \"mact_generated.h\"\r\n\r\n def parse_auth_mesg_macros(self):\r\n with open(self.csvfile, 'rt') as csvfile:\r\n try:\r\n reader = csv.reader(csvfile)\r\n for i, row in enumerate(reader):\r\n if i > 0:\r\n auth_mesg_id = row[0]\r\n freshness_counter_tx_length = row[1]\r\n mact_mac_length = row[2]\r\n counter_sync_mesg_id = row[3]\r\n key_slot_number = row[4]\r\n mact_security_flag = row[5]\r\n if len(auth_mesg_id) % 2 == 1:\r\n auth_mesg_id = '0' + auth_mesg_id\r\n if len(counter_sync_mesg_id) % 2 == 1:\r\n auth_mesg_id = '0' + counter_sync_mesg_id\r\n self.mact_messageid_list.append(list(chunkstring(auth_mesg_id, 2)))\r\n self.mact_mac_length_list.append(mact_mac_length)\r\n self.freshness_counter_tx_length_list.append(freshness_counter_tx_length)\r\n self.counter_sync_mact_messageid_list.append(list(chunkstring(auth_mesg_id, 2)))\r\n self.key_slot_number_list.append(key_slot_number)\r\n self.mact_security_flag_list.append(mact_security_flag)\r\n finally:\r\n csvfile.close()\r\n\r\n def generate_mact_ch_files(self):\r\n with open(os.path.join(\".\", \"mact_c_template.py\"), \"r\") as templateFile, \\\r\n open(os.path.join(\".\", self.mact_generated_c_file), \"w\") as outputFile:\r\n template = Template(templateFile.read())\r\n template.globals[\"enumerate\"] = enumerate\r\n template.globals[\"MACT_Message_ID_List\"] = self.mact_messageid_list\r\n template.globals[\"mact_mac_length_list\"] = self.mact_mac_length_list\r\n template.globals[\"freshness_counter_tx_length_list\"] = self.freshness_counter_tx_length_list\r\n template.globals[\"counter_sync_mact_messageid_list\"] = self.counter_sync_mact_messageid_list\r\n template.globals[\"key_slot_number_list\"] = self.key_slot_number_list\r\n template.globals[\"mact_security_flag_list\"] = self.mact_security_flag_list\r\n template.globals[\"todaydate\"] = datetime.datetime.today().strftime('%d-%m-%Y')\r\n outputFile.write(template.render())\r\n with open(os.path.join(\".\", \"mact_h_template.py\"), \"r\") as templateFile, \\\r\n open(os.path.join(\".\", self.mact_generated_h_file), \"w\") as outputFile:\r\n template = Template(templateFile.read())\r\n template.globals[\"noofrows\"] = len(self.mact_messageid_list)\r\n template.globals[\"todaydate\"] = datetime.datetime.today().strftime('%d-%m-%Y')\r\n outputFile.write(template.render())\r\n\r\nif __name__ == '__main__':\r\n csvfileparser = ParserMACT_CSV_File(\"./EBCM_MACT_NameBasedLinkingBeta20161028_1424.csv\")\r\n csvfileparser.parse_auth_mesg_macros()\r\n csvfileparser.generate_mact_ch_files()","sub_path":"parse_csv_file.py","file_name":"parse_csv_file.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"527136236","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: T:\\InGame\\Gameplay\\Scripts\\Server\\interactions\\utils\\visual_effect.py\n# Compiled at: 2018-10-04 01:23:52\n# Size of source mod 2**32: 8443 bytes\nfrom element_utils import build_critical_section_with_finally, build_element\nfrom interactions import ParticipantType, ParticipantTypeSingle\nfrom interactions.liability import Liability\nfrom interactions.utils.interaction_elements import XevtTriggeredElement\nfrom interactions.utils.loot_basic_op import BaseLootOperation\nfrom objects import ALL_HIDDEN_REASONS\nfrom sims4.tuning.tunable import TunableEnumEntry, OptionalTunable, TunableTuple, Tunable, HasTunableSingletonFactory, TunableVariant, AutoFactoryInit\nfrom sims4.tuning.tunable_hash import TunableStringHash32\nfrom tunable_utils.tunable_object_generator import TunableObjectGeneratorVariant\nfrom vfx import PlayEffect\n\nclass _VisualEffectLifetimeOneShot(HasTunableSingletonFactory):\n\n def start_visual_effect(self, vfx):\n vfx.start_one_shot()\n\n def get_visual_effect_sequence(self, vfx_element, sequence):\n return sequence\n\n\nclass _VisualEffectLifetimeInteraction(HasTunableSingletonFactory):\n\n def start_visual_effect(self, vfx):\n vfx.start()\n\n def get_visual_effect_sequence(self, vfx_element, sequence):\n return build_critical_section_with_finally(sequence, vfx_element._stop_vfx)\n\n\nclass _VisualEffectLifetimeContinuationLiability(Liability):\n LIABILITY_TOKEN = '_VisualEffectLifetimeContinuationLiability'\n\n def __init__(self, vfx_element, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n self._vfx_element = vfx_element\n\n def release(self):\n self._vfx_element._stop_vfx()\n\n\nclass _VisualEffectLifetimeContinuation(HasTunableSingletonFactory):\n\n def start_visual_effect(self, vfx):\n vfx.start()\n\n def get_visual_effect_sequence(self, vfx_element, sequence):\n\n def attach_liability(_):\n liability = _VisualEffectLifetimeContinuationLiability(vfx_element)\n vfx_element.interaction.add_liability(liability.LIABILITY_TOKEN, liability)\n\n return build_element((attach_liability, sequence))\n\n\nclass _VisualEffectLifetimeAnimationEvent(HasTunableSingletonFactory, AutoFactoryInit):\n FACTORY_TUNABLES = {'event': Tunable(description='\\n The event triggering the VFX stop.\\n ',\n tunable_type=int,\n default=100)}\n\n def start_visual_effect(self, vfx):\n vfx.start()\n\n def get_visual_effect_sequence(self, vfx_element, sequence):\n got_callback = False\n\n def callback(*_, **__):\n nonlocal got_callback\n if got_callback:\n return\n got_callback = True\n vfx_element._stop_vfx()\n\n vfx_element.interaction.store_event_handler(callback, handler_id=(self.event))\n return build_critical_section_with_finally(sequence, callback)\n\n\nclass PlayVisualEffectMixin:\n FACTORY_TUNABLES = {'vfx':PlayEffect.TunableFactory(description='\\n The effect to play.\\n '), \n 'vfx_target':OptionalTunable(description='\\n If enabled, the visual effect is set to target a specific joint on\\n another object or Sim.\\n ',\n tunable=TunableTuple(participant=TunableEnumEntry(description='\\n The participant this visual effect targets.\\n ',\n tunable_type=ParticipantTypeSingle,\n default=(ParticipantTypeSingle.TargetSim)),\n joint_name=TunableStringHash32(description='\\n The name of the slot this effect is targeted to.\\n ',\n default='_FX_')))}\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n\n def _start_vfx(self, participant, target_participant):\n vfx_params = {}\n if target_participant is not None:\n vfx_params['target_actor_id'] = target_participant.id\n vfx_params['target_joint_name_hash'] = self.vfx_target.joint_name\n running_vfx = (self.vfx)(participant, **vfx_params)\n self.vfx_lifetime.start_visual_effect(running_vfx)\n return running_vfx\n\n\nclass PlayVisualEffectElement(XevtTriggeredElement, PlayVisualEffectMixin):\n FACTORY_TUNABLES = {'participant':TunableObjectGeneratorVariant(description='\\n The object or objects to play the effect on.\\n ',\n participant_default=ParticipantType.Object), \n 'vfx_lifetime':TunableVariant(description='\\n Define how the lifetime of this visual effect is managed.\\n ',\n interaction=_VisualEffectLifetimeInteraction.TunableFactory(),\n continuation=_VisualEffectLifetimeContinuation.TunableFactory(),\n one_shot=_VisualEffectLifetimeOneShot.TunableFactory(),\n animation_event=_VisualEffectLifetimeAnimationEvent.TunableFactory(),\n default='one_shot')}\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n self._running_vfx = None\n\n def _stop_vfx(self, *_, **__):\n if self._running_vfx is not None:\n for vfx in self._running_vfx:\n vfx.stop()\n\n def _do_behavior(self):\n if self._running_vfx is not None:\n return\n self._running_vfx = []\n target_participant = None\n if self.vfx_target is not None:\n target_participant = self.interaction.get_participant(self.vfx_target.participant)\n if target_participant is None:\n return\n from sims.sim_info import SimInfo\n resolver = self.interaction.get_resolver()\n for participant in self.participant.get_objects(resolver):\n if participant.is_sim:\n if isinstance(participant, SimInfo):\n participant = participant.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)\n vfx = self._start_vfx(participant, target_participant)\n self._running_vfx.append(vfx)\n\n def _build_outer_elements(self, sequence):\n sequence = super()._build_outer_elements(sequence)\n return self.vfx_lifetime.get_visual_effect_sequence(self, sequence)\n\n\nclass PlayVisualEffectLootOp(BaseLootOperation, PlayVisualEffectMixin):\n\n def __init__(self, vfx, vfx_target, **kwargs):\n (super().__init__)(**kwargs)\n self.vfx = vfx\n self.vfx_target = vfx_target\n self.vfx_lifetime = _VisualEffectLifetimeOneShot()\n\n def _apply_to_subject_and_target(self, subject, target, resolver):\n target_participant = None\n if self.vfx_target is not None:\n target_participant = resolver.get_participant(self.vfx_target.participant)\n if target_participant is None:\n return\n from sims.sim_info import SimInfo\n if subject.is_sim:\n if isinstance(subject, SimInfo):\n subject = subject.get_sim_instance(allow_hidden_flags=ALL_HIDDEN_REASONS)\n self._start_vfx(subject, target_participant)","sub_path":"Scripts/simulation/interactions/utils/visual_effect.py","file_name":"visual_effect.py","file_ext":"py","file_size_in_byte":7175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"120699764","text":"from lemon_pi.pit.event_defs import (\n RaceStatusEvent,\n LapCompletedEvent,\n PittingEvent,\n PingEvent,\n TelemetryEvent,\n SendMessageEvent, RadioReceiveEvent\n)\nfrom lemon_pi.shared.events import EventHandler\nfrom lemon_pi.shared.generated.messages_pb2 import (\n EnteringPits,\n Ping,\n CarTelemetry,\n RaceFlagStatus,\n ToCarMessage\n)\nfrom lemon_pi.shared.radio import Radio\nfrom python_settings import settings\n\nfrom threading import Thread\nimport time\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass RadioInterface(Thread, EventHandler):\n\n def __init__(self, radio: Radio):\n Thread.__init__(self, daemon=True)\n self.radio = radio\n RaceStatusEvent.register_handler(self)\n LapCompletedEvent.register_handler(self)\n SendMessageEvent.register_handler(self)\n\n def handle_event(self, event, **kwargs):\n if event == RaceStatusEvent:\n self.send_race_status(**kwargs)\n\n if event == LapCompletedEvent:\n self.send_lap_completed(**kwargs)\n\n if event == SendMessageEvent:\n self.send_driver_message(**kwargs)\n\n def run(self):\n while True:\n item = self.radio.receive_queue.get()\n logger.info(\"received : {}\".format(item.__repr__()))\n self.radio.receive_queue.task_done()\n self.convert_to_event(item)\n RadioReceiveEvent.emit(car=item.sender)\n\n def send_race_status(self, flag=\"\"):\n logger.info(\"race status changed to {}\".format(flag))\n msg = ToCarMessage()\n msg.race_status.flag_status = self.set_flag_status(flag)\n self.radio.send_async(msg)\n\n @classmethod\n def set_flag_status(cls, flag):\n try:\n # this can fail with an empty string, in which case it remains\n # set to UNKNOWN\n return RaceFlagStatus.Value(flag.upper())\n except ValueError:\n pass\n return RaceFlagStatus.UNKNOWN\n\n def send_lap_completed(self, car=\"\", position=0, class_position=0,\n laps=0, ahead=None, gap=\"\", last_lap_time=0, flag=\"\"):\n logger.info(\"car: {} completed lap {} in pos {}({}) last = {}\".\n format(car, laps, position, class_position, last_lap_time))\n msg = ToCarMessage()\n msg.race_position.car_number = car\n msg.race_position.position = position\n msg.race_position.position_in_class = class_position\n msg.race_position.lap_count = laps\n msg.race_position.flag_status = self.set_flag_status(flag)\n if ahead:\n msg.race_position.car_ahead.car_number = ahead\n msg.race_position.car_ahead.gap_text = gap\n delayed_send = Thread(target=self.__delayed_send__, args=(msg, settings.RACE_DATA_SEND_DELAY_SEC))\n if settings.RACE_DATA_SEND_DELAY_SEC > 0:\n delayed_send.start()\n else:\n # run it in foreground for unittests\n delayed_send.run()\n\n def send_driver_message(self, car=\"\", msg=\"\"):\n wrapper = ToCarMessage()\n wrapper.message.text = msg\n wrapper.message.car_number = car\n self.radio.send_async(wrapper)\n\n @classmethod\n def convert_to_event(cls, proto_msg):\n if type(proto_msg) == EnteringPits:\n PittingEvent.emit(car=proto_msg.sender)\n return\n elif type(proto_msg) == CarTelemetry:\n TelemetryEvent.emit(car=proto_msg.sender,\n ts=proto_msg.timestamp,\n coolant_temp=proto_msg.coolant_temp,\n lap_count=proto_msg.lap_count,\n last_lap_time=proto_msg.last_lap_time,\n last_lap_fuel=proto_msg.last_lap_fuel_usage,\n fuel_percent=proto_msg.fuel_remaining_percent)\n elif type(proto_msg) == Ping:\n PingEvent.emit(car=proto_msg.sender, ts=proto_msg.timestamp)\n else:\n logger.info(\"unknown radio message {}\".format(type(proto_msg)))\n\n # sleep for a moment before sending data to the car so it doesn't collide with\n # data coming from the car as it passes the line\n def __delayed_send__(self, pos, delay):\n time.sleep(delay)\n self.radio.send_async(pos)\n","sub_path":"lemon_pi/pit/radio_interface.py","file_name":"radio_interface.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"569246725","text":"import sqlite3\n\n# Create an in-memory SQLite3 database\n# Create a table called sales with four attributes\ncon = sqlite3.connect(':memory:')\n\n\n# Query the sales table\ncursor = con.execute(\"SELECT * FROM sales\")\nrows = cursor.fetchall()\n\n# Count the number of rows in the output\nrow_counter = 0\nfor row in rows:\n\tprint(row)\n\trow_counter += 1\nprint('Number of rows: {}'.format(row_counter))\n","sub_path":"03. Data_Science/2. Analysis/database_Chap04/1db_sqllite_test.py","file_name":"1db_sqllite_test.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"231395606","text":"#!/usr/bin/env python\nimport os\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\n__author__ = 'Max Arnold '\n__version__ = '0.1.0'\n\nsetup(\n name='pg-extras',\n version=__version__,\n\n # Package dependencies.\n # install_requires=[''],\n\n # Metadata for PyPI.\n author='Max Arnold',\n author_email='arnold.maxim@gmail.com',\n license='BSD',\n url='http://github.com/max-arnold/pg-extras',\n keywords='',\n description='',\n long_description=open(os.path.abspath(os.path.join(os.path.dirname(__file__), 'README.md')), 'rb').read(),\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: Internet'\n ],\n packages=['pg_extras'],\n platforms='any',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"273404344","text":"PARAMS = {'batch_size': 6000,\n 'embed_dim': 100,\n 'epoch': 10,\n 'learning_rate': 1e-2,\n 'regularization': 0,\n 'num_of_neg_samples': 6,\n 'seed': 42}\n\nDATA_CONST = {'work_dir': 'data',\n 'drug_train': \"/polyphar_train.csv\",\n 'drug_val': \"/polyphar_validate.csv\",\n 'drug_test': \"/polyphar_test.csv\",\n 'ent_maps': \"/ent_maps.csv\",\n 'rel_maps': \"/rel_maps.csv\",\n 'ppi': \"/ppi_data.csv\",\n 'targets': \"/targets_data.csv\",\n 'save_path': \"/trivec_saved\"}\n\nKG_CONST = {'column_names': ['from', 'rel', 'to']}\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"552999439","text":"import matplotlib.pyplot as plt\nimport os \n\nclass AvgMeter(object):\n\t\"\"\" this class is to record one variable such as loss or acc \"\"\"\n\n\tdef __init__(self):\n\t\tself.avg = 0\n\t\tself.sum = 0\n\t\tself.count = 0\n\t\tself.val = 0\n\t\tself.reset()\n\n\tdef reset(self):\n\t\tself.val = 0\n\t\tself.avg = 0\n\t\tself.sum = 0\n\t\tself.count = 0\n\n\tdef update(self, val, n=1):\n\t\tself.val = val\n\t\tself.sum += val * n\n\t\tself.count += n\n\t\tself.avg = self.sum / self.count\n\n\n\ndef check_dir(path):\n\tif not os.path.exists(path):\n\t\ttry:\n\t\t\tos.mkdir(path)\n\t\texcept:\n\t\t\tos.makedirs(path)\n\n\n\n\ndef plot(data1,data2, output_path, y_label1,y_label2,batch_size,name, color1='r',color2='b', x_label=\"epoch\",pre_train = True):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(data1, color1, label=y_label1)\n ax.plot(data2, color2, label=y_label2)\n if pre_train:\n ax.set_title(\"Pre-train with batch_size=%d\"%(batch_size))\n else:\n ax.set_title(\"Fine-tuning with batch_size=%d\"%(batch_size))\n ax.set_xlabel(x_label)\n ax.set_ylabel(name)\n plt.grid() # 生成网格\n plt.legend()\n plt.savefig(os.path.join(output_path, \"{}.png\".format(name)))\n plt.close()","sub_path":"pytorch_stack_bi_LSTM/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423852223","text":"import scrapy\nfrom scrapy.spiders import CrawlSpider, Rule\nfrom scrapy.linkextractors import LinkExtractor\nfrom planetadelibros.items import BookItem\n\n\n# NC: Novela contemporanea\nclass PlanetaCrawlerNC(scrapy.Spider):\n name = \"crawlerPlaneta\"\n start_urls = [\n 'https://www.planetadelibros.com.co/libros/novelas/00038',\n 'https://www.planetadelibros.com.co/libros/novela-historica/00013',\n 'https://www.planetadelibros.com.co/libros/novela-literaria/00012',\n 'https://www.planetadelibros.com.co/libros/novela-negra/00015',\n 'https://www.planetadelibros.com.co/libros/novelas-romanticas/00014',\n 'https://www.planetadelibros.com.co/libros/poesia/00051',\n 'https://www.planetadelibros.com.co/libros/teatro/00052'\n ]\n npag = 0\n\n def parse(self, response):\n for libro in response.xpath('//ul[@class=\"llibres-miniatures llibres-graella\"]/li'):\n tipoLibro = libro.xpath('div[@class=\"soporte\"]/text()').extract()\n if tipoLibro[0] != \"Audiolibro\" and tipoLibro[0] != \"Libro Electrónico\":\n detalle_libro = libro.xpath('div[@class=\"titol\"]/span/@data-link-js').get()\n request = scrapy.Request(detalle_libro, callback=self.parse_libro)\n yield request\n\n next_page = response.xpath('//div[@class=\"paginacio-seguent\"]/a/@href').get()\n if next_page is not None and self.npag < int(self.maxpag):\n print(\"SIGUIENTE \", next_page)\n next_page = response.urljoin(next_page)\n #print(next_page)\n #print(\"********PAGINA \", self.i)\n self.npag += 1\n yield scrapy.Request(next_page, callback=self.parse, dont_filter=True)\n\n def parse_libro(self, response):\n item = BookItem()\n nombre = response.css('div.titol::text').get()\n autor = response.css('div.autors h2 a::text').get()\n if autor is None:\n autor = response.css('div.autors h2::text').get()\n editorial = response.css('div.segell-nom a::text').get()\n nro_paginas = response.css('#num_pagines::text').get()[19:]\n precio = response.css('div.preu::text').get()[2:].replace(\".\", \"\")\n\n item['nombre'] = nombre\n item['autor'] = autor\n item['editorial'] = editorial\n item['nro_paginas'] = int(nro_paginas)\n item['precio'] = float(precio)\n item['url'] = response.url\n yield item\n","sub_path":"planetadelibros/planetadelibros/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":2429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"391782094","text":"#!/usr/bin/python\n#Pavlos Antoniou\n#Get vaf from illumina vcf file\nimport re \nimport argparse\nimport gzip\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"filename2\")\nparser.add_argument(\"bedfile\")\nargs = parser.parse_args()\n\ninputfile=args.filename2\noutvcf=args.bedfile\ndefault=\"0,0\"\ndefault1=0\nvariantdepths={}\nfilename = gzip.open(inputfile, 'rt')\n\n\nchromosomeslist=['chr1','chr2','chr3','chr4','chr5','chr6','chr7','chr8','chr9','chr10','chr11','chr12','chr13','chr14','chr15','chr16','chr17','chr18','chr19','chr20','chr21','chr22',]\n\nf = open(outvcf, 'w')\n\nf.write(\"chr\\tstartpos\\tendpos\\tAF_Tumor\\tPN_B\\n\")\ncomment=0\nfor line in filename:\n\tline=line.strip()\n\tif(line.startswith(\"#\")):\n\t\tcomment=comment+1\n\telse:\n\t\tcolumns=str(line).split('\\t')\n\t\tpassed=columns[6]\n\t\tstart=columns[1]\n\t\tstop=columns[1]\n\t\tref=columns[3]\n\t\talt=columns[4]\n\t\tinfovalues=columns[9]\n\t\tinfonames=columns[8]\n\n\t\t#print(infovalues)\n\t\t#GT:DP:FDP:SDP:SUBDP:AU:CU:GU:TU\t\n\n\t\tif (passed==\"PASS\"):\n\n\t\t\tif(len(ref)==len(alt) and len(alt)==1):\n\t\t\t\tvartype=\"SNV\"\n\t\t\t\tvariantdepths={}\n\t\t\t\t\n\t\t\t\tchromosomechr=columns[0]\n\t\t\t\tif(chromosomechr in chromosomeslist):\n\t\t\t\t\tchromosome=chromosomechr.split('chr',1)[1]\n\t\t\t#GT:DP:FDP:SDP:SUBDP:AU:CU:GU:TU\n\t\t\t\t\tinfos=infovalues.split(':')\n\t\t\t\t\tinfoname=infonames.split(':')\n\t\t\t\t\tinfodict = dict(zip(infoname, infos))\n\t\t\t\t\tinfodict = dict(zip(infoname, infos))\n\t\t\t\t\tDP=infodict.get('DP',default1)\n\t\t\t\t\tAU=infodict.get('AU',default)\n\t\t\t\t\tCU=infodict.get('CU',default)\n\t\t\t\t\tGU=infodict.get('GU',default)\n\t\t\t\t\tTU=infodict.get('TU',default)\n\t\t\t\t\tAUS=AU.split(',')\n\t\t\t\t\tCUS=CU.split(',')\n\t\t\t\t\tGUS=GU.split(',')\n\t\t\t\t\tTUS=TU.split(',')\n\t\t\t\t\tvariantdepths['A']=AUS[0]\n\t\t\t\t\tvariantdepths['C']=CUS[0]\n\t\t\t\t\tvariantdepths['G']=GUS[0]\n\t\t\t\t\tvariantdepths['T']=TUS[0]\n\t\t\t\t\tvaf=float(float(variantdepths[alt])/float(DP))\n\t\t\t#print(ref + \" \"+ alt + \" \" + str(variantdepths[alt]) + \" \" + str(DP)+ \" \" +str(vaf))\n\t\t\t\t\tif(chromosome!='X' and chromosome !='Y'):\n\t\t\t\t\t\tf.write(chromosome+\"\\t\"+start+\"\\t\"+stop+\"\\t\"+str(vaf)+\"\\t0\"+\"\\n\")\n\t\t\telse:\n\t\t\t\tinfos=infovalues.split(':')\n\t\t\t\tinfoname=infonames.split(':')\n\t\t\t\tinfodict = dict(zip(infoname, infos))\n\t\t\t\ttir= (infodict.get('TIR', default)).split(',')[0]\n\t\t\t\ttar=(infodict.get('TAR',default)).split(',')[0]\n\t\t\t\tdp = infodict.get('DP',default1)\n\t\t\t\t#if(tir != 0 and tar !=0):\n\t\t\t\t#\tvaf1=float(float(tir)/(float(tar) +float(tir)))\t\n\t\t\t\t#\tif(chromosome!='X' and chromosome !='Y'):\n\t\t\t\t#\t\tf.write(chromosome+\"\\t\"+start+\"\\t\"+stop+str(vaf)+\"0\"+\"\\n\")\n\t\t\n\t\t\t#print(line)\n\n\n","sub_path":"python_scripts/get_snv_info_from_somaticVCF_expands.py","file_name":"get_snv_info_from_somaticVCF_expands.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"41696682","text":"from django.conf.urls import url\nfrom django.db import connection\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^listing/((?P[A-Za-z0-9@._+\\-]+)/)??((?P[A-Za-z0-9@._+\\-]+)/)?$', views.listing, name='listing'),\n url(r'^man/'\n r'((?P[A-Za-z0-9@._+\\-]+)/)??'\n r'((?P[A-Za-z0-9@._+\\-]+)/)?'\n r'(?P[A-Za-z0-9@._+\\-:\\[\\]]+?)'\n r'(\\.(?Phtml|txt|raw))?$',\n views.man_page, name='man_page'),\n url(r'^search', views.search, name=\"search\"),\n url(r'^(?P[a-z]+)', views.simple_view, name='simple_view'),\n]\n","sub_path":"archweb_manpages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"13803362","text":"import sys\nimport time\nimport random\nimport pygame\nfrom pygame.locals import *\nimport Text\nimport Color\nimport Settings\nfrom Screen import Screen\nfrom Board import Board\nfrom Piece import Piece\nfrom LevelManager import LevelManager\n\n\ndef start_game_loop(clock, surface, screen, font, level_manager):\n \"\"\"The game loop\"\"\"\n board = Board(Settings.BOARD_WIDTH, Settings.BOARD_HEIGHT)\n next_piece, piece = Piece(), Piece()\n move_left, move_right = False, False\n last_left, last_right, last_fall = time.time(), time.time(), time.time()\n while True:\n check_quit()\n if piece is None:\n piece = next_piece\n next_piece = Piece()\n if not board.can_move(piece, 0, 0): # game over\n piece.draw(screen)\n return\n for event in pygame.event.get():\n if event.type is KEYUP:\n if event.key is K_p:\n pygame.mixer.music.pause()\n screen.show_text_screen(Text.GAME_PAUSED_TEXT)\n check_key_press(clock)\n pygame.mixer.music.unpause()\n elif event.key in [K_a, K_LEFT]:\n move_left = False\n elif event.key in [K_d, K_RIGHT]:\n move_right = False\n elif event.type is KEYDOWN:\n if event.key in [K_w, K_UP]:\n piece.rotate()\n if not board.can_move(piece, 0, 0):\n piece.rotate(3)\n elif event.key in [K_a, K_LEFT]:\n move_left, move_right = True, False\n if board.can_move(piece, -1, 0):\n piece.board_x -= 1\n last_left = time.time()\n elif event.key in [K_d, K_RIGHT]:\n move_left, move_right = False, True\n if board.can_move(piece, 1, 0):\n piece.board_x += 1\n last_right = time.time()\n elif event.key in [K_s, K_DOWN] and board.can_move(piece, 0, 1):\n piece.board_y += 1\n elif event.key is K_SPACE:\n dy = 1\n while board.can_move(piece, 0, dy):\n dy += 1\n else:\n piece.board_y += dy - 1\n elif event.key is K_l: # level up\n level_manager.level_up()\n current_time = time.time()\n # handle piece falling\n if current_time - last_fall >= level_manager.down_freq:\n if board.can_move(piece, 0, 1):\n last_fall = current_time\n piece.board_y += 1\n # handle piece sliding left/right due to key hold\n if move_left and (current_time - last_left >= Settings.MOVE_SIDEWAYS_FREQ) and board.can_move(piece, -1, 0):\n piece.board_x -= 1\n last_left = time.time()\n if move_right and (current_time - last_right >= Settings.MOVE_SIDEWAYS_FREQ) and board.can_move(piece, 1, 0):\n piece.board_x += 1\n last_right = time.time()\n # piece landing\n if not board.can_move(piece, 0, 1):\n board.add_piece(piece)\n piece = None\n # check for complete lines and update score\n n_complete = board.check_complete_lines()\n level_manager.update_score(n_complete)\n # redraw the screen\n surface.fill(Color.BG_COLOR)\n top, left, vertical_space = Settings.MARGIN_TOP, Settings.MARGIN_LEFT, Settings.VERTICAL_SPACE\n screen.draw_text_top_left('Score: %s' % level_manager.score, font, Color.TEXT_COLOR, left * 7.5, top)\n screen.draw_text_top_left('Level: %s' % level_manager.level, font, Color.TEXT_COLOR, left, top)\n screen.draw_text_top_left('Next:', font, Color.TEXT_COLOR, left, top + vertical_space)\n board.draw(screen)\n next_piece.draw(screen, pixel_x=left, pixel_y=top + 2 * vertical_space)\n if piece is not None:\n piece.draw(screen)\n pygame.display.update()\n clock.tick(Settings.FPS)\n\n\ndef quit_game():\n pygame.quit()\n sys.exit()\n\n\ndef check_quit(quit_keys=(K_ESCAPE,)):\n for _ in pygame.event.get(QUIT):\n quit_game()\n for event in pygame.event.get(KEYUP):\n if event.key in quit_keys:\n quit_game()\n pygame.event.post(event)\n\n\ndef check_key_press(fps_clock):\n while True:\n check_quit()\n for event in pygame.event.get([KEYDOWN, KEYUP]):\n if event.type == KEYUP:\n return\n else:\n pygame.display.update()\n fps_clock.tick()\n\n\ndef main():\n \"\"\"The main function of the game\"\"\"\n pygame.init()\n fps_clock = pygame.time.Clock()\n main_surface = pygame.display.set_mode((Settings.WINDOW_WIDTH, Settings.WINDOW_HEIGHT))\n basic_font = pygame.font.Font(Settings.BASIC_FONT, Settings.FONT_SIZE)\n big_font = pygame.font.Font(Settings.BIG_FONT, Settings.BIG_FONT_SIZE)\n pygame.display.set_caption(Text.GAME_NAME)\n screen = Screen(main_surface, big_font, basic_font)\n screen.show_text_screen(Text.SPLASH_TEXT)\n check_key_press(fps_clock)\n while True: # the main loop\n check_quit()\n level_manager = LevelManager()\n sound = random.choice([\"sounds/tetrisb.mid\", \"sounds/tetrisc.mid\"])\n pygame.mixer.music.load(sound)\n pygame.mixer.music.play(-1, 0.0)\n start_game_loop(fps_clock, main_surface, screen, basic_font, level_manager)\n pygame.mixer.music.stop()\n screen.show_text_screen(Text.GAME_OVER_TEXT, fill=False)\n check_key_press(fps_clock)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"tetris.py","file_name":"tetris.py","file_ext":"py","file_size_in_byte":5690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"560159265","text":"print('Было сгенерировано число от 1 до 15. Давай сыграем в угадайку :)')\nimport random\nimport sys\na = random.randint(1, 15)\ncounter = 3\nprint('У тебя есть', counter, 'попытки.')\nprint('Введите число от 1 до 15')\nwhile counter != 0:\n b = int(input()) \n if b > 15 or b < 0:\n False\n print('Ты ввел не то число. Попробуй сыграть сначала')\n break \n else:\n if a == b:\n print('Поздравляю, ты угадал! Действительно, искомое число было', a)\n break\n sys.exit()\n else:\n counter = counter - 1\n print('Попыток осталось:', counter)\nif counter == 0:\n print('Ты проиграл. Загаданное число было', a)\ninput('Нажмите Enter чтобы продолжить')\n","sub_path":"rnd_v3.py","file_name":"rnd_v3.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"348335483","text":"from dash.dependencies import Input, Output\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_table\nimport pandas as pd\n\nfrom components.data_prepare import get_data, get_column_vals, convert_month_to_date, filter_data\nfrom components.constants import BASE_DATA_COLUMNS\nfrom app import app\n\ndf_data = get_data()\npayer_vals = get_column_vals(df_data, 'PAYER')\nserv_cat_vals = get_column_vals(df_data, 'SERVICE_CATEGORY')\ncl_spec_vals = get_column_vals(df_data, 'CLAIM_SPECIALTY')\n\napp.layout = html.Div([\n html.Header([\n html.H2(\n 'Test financial dashboard')\n ],\n style={'background-color': '#88a3d4'},\n className='row gs-header gs-text-header'\n ),\n\n html.Div(\n [\n html.H4('Base filters'),\n html.Div(\n [\n html.H6('Select date range'),\n dcc.DatePickerRange(\n id='date_period_selector',\n start_date_placeholder_text='Start Period',\n end_date_placeholder_text='End Period',\n calendar_orientation='vertical',\n display_format='MMMM Y',\n stay_open_on_select=True,\n start_date=convert_month_to_date(df_data['MONTH'].min()),\n end_date=convert_month_to_date(df_data['MONTH'].max()),\n )\n\n ]\n ),\n html.Div(\n [\n html.H6('Select PAYERS'),\n dcc.Dropdown(\n id='payer-dropdown',\n options=[{'label': item, 'value': item} for item in payer_vals],\n value=payer_vals[:3],\n multi=True,\n ),\n\n ],\n ),\n html.Div(\n [\n html.H6('Select SERVICE CATEGORIES'),\n dcc.Dropdown(\n id='serv-cat-dropdown',\n options=[{'label': item, 'value': item} for item in serv_cat_vals],\n value=serv_cat_vals[:3],\n multi=True\n ),\n\n ],\n ),\n html.Div(\n [\n html.H6('Select CLAIM SPECIALTY'),\n dcc.Dropdown(\n id='cl-spec-dropdown',\n options=[{'label': item, 'value': item} for item in cl_spec_vals],\n value=cl_spec_vals,\n multi=True,\n style={'overflow-y': 'scroll', 'height': '200px'}\n ),\n ],\n ),\n ],\n style={'background-color': '#dbe0ec',\n 'padding': '10px'},\n ),\n\n\n html.Div(\n [\n html.H6('PAID AMOUNT graph by month'),\n dcc.Graph(id='base-graph'),\n ]\n ),\n html.Div(\n className='row',\n children=[\n html.Div(\n [\n html.H6('Data in table format'),\n dash_table.DataTable(\n id='table-paging-with-graph',\n columns=[{'name': i, 'id': i} for i in BASE_DATA_COLUMNS],\n data=df_data[BASE_DATA_COLUMNS].to_dict('records'),\n page_size=20,\n sort_mode='multi',\n filter_action='native',\n sort_action='native',\n ),\n ],\n\n style={'margin': {'l': 10, 'r': 10, 't': 10, 'b': 50}}\n ),\n html.Div(\n id='table-paging-with-graph-container',\n )\n ]\n )\n\n], style={'width': '500'})\n\n\n@app.callback(Output('base-graph', 'figure'), [Input('payer-dropdown', 'value'),\n Input('serv-cat-dropdown', 'value'),\n Input('cl-spec-dropdown', 'value'),\n Input('date_period_selector', 'start_date'),\n Input('date_period_selector', 'end_date')])\ndef update_graph(payer_value, serv_cat_value, cl_spec_value, start_date, end_date):\n filtered_df = filter_data(df_data, payer_value, serv_cat_value, cl_spec_value, start_date, end_date)\n return {\n 'data': [{\n 'x': filtered_df.MONTH_DT,\n 'y': filtered_df.PAID_AMOUNT\n }],\n 'layout': {'margin': {'l': 40, 'r': 0, 't': 20, 'b': 30}}\n }\n\n\n@app.callback(Output('table-paging-with-graph', 'data'), [Input('payer-dropdown', 'value'),\n Input('serv-cat-dropdown', 'value'),\n Input('cl-spec-dropdown', 'value'),\n Input('date_period_selector', 'start_date'),\n Input('date_period_selector', 'end_date')])\ndef update_table(payer_value, serv_cat_value, cl_spec_value, start_date, end_date):\n filtered_df = filter_data(df_data, payer_value, serv_cat_value, cl_spec_value, start_date, end_date)\n return filtered_df[BASE_DATA_COLUMNS].to_dict('records')\n\n\n@app.callback(\n Output('table-paging-with-graph-container', 'children'),\n [Input('table-paging-with-graph', 'data')])\ndef update_table_graph(rows):\n dff = pd.DataFrame(rows)\n if not dff.empty:\n return html.Div(\n [\n html.H6('PAID AMOUNT bars group by'),\n html.Div([\n dcc.Graph(\n id=column,\n figure={\n 'data': [\n {\n 'x': dff[column] if column in dff else [],\n 'y': dff['PAID_AMOUNT'],\n 'type': 'bar',\n 'marker': {'color': '#0074D9'},\n }\n ],\n 'layout': {\n 'title': column,\n 'xaxis': {'automargin': True},\n 'yaxis': {'automargin': True},\n 'height': '400px',\n 'margin': {'t': '20px', 'l': '20px', 'r': '20px'},\n },\n },\n )\n for column in ['SERVICE_CATEGORY', 'PAYER']\n ])\n ]\n )\n\n\nif __name__ == '__main__':\n app.run_server(debug=True, host='0.0.0.0', port=8050)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":6838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"146954057","text":"# Copyright (C) 2016 Google Inc.\n# Licensed under http://www.apache.org/licenses/LICENSE-2.0 \n\nfrom datetime import date\n\nimport holidays\n\nclass GoogleHolidays(holidays.UnitedStates):\n def _populate(self, year):\n holidays.UnitedStates._populate(self, year)\n self[date(year, 11, 27)] = \"Thanksgiving Day 2\"\n self[date(year, 12, 23)] = \"Christmas Holiday\"\n self[date(year, 12, 24)] = \"Christmas Eve\"\n self[date(year, 12, 31)] = \"New Year's Eve\"\n","sub_path":"src/ggrc_workflows/services/workflow_cycle_calculator/google_holidays.py","file_name":"google_holidays.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"515700277","text":"import http.client\nimport json\nimport os\nimport sys\nimport datetime\n\n# config = loadConfiguration()\n\ndef loadConfiguration():\n return []\n\ndef getPath():\n conn = http.client.HTTPConnection('69.64.32.172', 3010)\n id = sys.argv[1]\n conn.request('GET', '/blackboxes/' + id + '/getPath')\n resp = conn.getresponse()\n responseStr = resp.read().decode()\n if resp.status == 200:\n data = json.loads(responseStr)\n awsFolder = data['path']\n return awsFolder\n else:\n return ''\n\ndef updateSettingsFile():\n conn = http.client.HTTPConnection('69.64.32.172', 3010)\n id = sys.argv[1]\n conn.request('GET', '/blackboxes/' + id + '/getActiveSettings')\n resp = conn.getresponse()\n responseStr = resp.read().decode()\n if is_json(responseStr):\n jsonDict = json.loads(responseStr)\n now = datetime.datetime.now();\n nowStr = now.strftime(\"%d/%m/%Y %H:%M:%S\")\n jsonDict.update({'lastSyncDate': nowStr})\n jsonDict[\"bbId\"] = id\n responseStr = json.dumps(jsonDict);\n f = open('/home/zurikato/apps/tvz-media-server/settings-bb.json', 'w')\n f.write(responseStr)\n return responseStr\n return ''\n\ndef is_json(str):\n try:\n json_object = json.loads(str)\n except ValueError as e:\n return False\n return True\n\n\n\npath = getPath()\nsettingsStr = updateSettingsFile()\nsettingsDict = json.loads(settingsStr)\nprint(settingsDict)\nif settingsDict['syncData'] and settingsDict['allow4gSync'] == True:\n if settingsDict['deleteOnSync'] == True:\n print('se puede borrar en el sync')\n os.system(\"/usr/bin/aws s3 sync s3://\" + path + \" /home/zurikato/apps/tvz-media-server/media --size-only --delete --exclude \\\"*local-*.*\\\"\")\n\n else:\n print('no se puede borrar en el sync')\n os.system(\"/usr/bin/aws s3 sync s3://\" + path + \" /home/zurikato/apps/tvz-media-server/media --size-only --exclude \\\"*local-*.*\\\"\")\nelse:\n print(\"no se sincroniza del servidor\")\n# os.system(\"/usr/bin/aws s3 sync s3://\" + path + \" /home/zurikato/apps/tvz-media-server/media --size-only --delete --exclude \\\"*local-*.*\\\"\")\nos.system(\"cd /home/zurikato/apps/tvz-media-server/media; /usr/bin/find $PWD -regex '.*\\.\\(mkv\\|webm\\|avi\\|mp4\\)$' | while read f; do /usr/bin/ffmpeg -n -i \\\"$f\\\" -ss 00:00:03 -vframes 1 -s 480x320 ${f%/*}/thumb-\\\"${f##*/}\\\".jpg; done\")\n\n\n\n","sub_path":"install_files/scripts/sync-aws.py","file_name":"sync-aws.py","file_ext":"py","file_size_in_byte":2366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"595450919","text":"\"\"\"kvdomingo URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/3.0/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import include, path\r\nfrom django.views.generic.base import TemplateView\r\nfrom django.contrib.sitemaps import GenericSitemap\r\nfrom django.contrib.sitemaps.views import sitemap\r\nfrom svip.models import BlogPost, Course\r\nfrom photography.models import Client\r\nfrom web.sitemap import StaticViewSitemap\r\n\r\n\r\nsitemaps = {\r\n 'static': StaticViewSitemap,\r\n 'photography': GenericSitemap(\r\n {\r\n 'queryset': Client.objects.all(),\r\n },\r\n priority=0.5,\r\n ),\r\n 'courses': GenericSitemap(\r\n {\r\n 'queryset': Course.objects.all(),\r\n },\r\n priority=0.5,\r\n ),\r\n 'blog': GenericSitemap(\r\n {\r\n 'queryset': BlogPost.objects.all(),\r\n 'date_field': 'created',\r\n },\r\n priority=0.6,\r\n ),\r\n}\r\n\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n\tpath('robots.txt', TemplateView.as_view(template_name='web/robots.txt', content_type='text/plain')),\r\n path('sitemap.xml', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),\r\n path('api-auth/', include('rest_framework.urls')),\r\n path('tinymce/', include('tinymce.urls')),\r\n path('photography/', include('photography.urls')),\r\n path('svip/', include('svip.urls')),\r\n path('dev/', include('dev.urls')),\r\n path('', include('web.urls')),\r\n]\r\n","sub_path":"kvdomingo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"88337408","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\n\n\ndef train(model: nn.Module, loader: DataLoader, optimizer: optim.Adam, criterion: nn.Module, clip: float) -> float:\n model.train()\n\n epoch_loss = 0.0\n for _, (src, trg) in enumerate(loader):\n src = src.cuda()\n trg = trg.cuda()\n\n optimizer.zero_grad()\n\n output = model(src, trg)\n output = output[1:].view(-1, output.shape[-1])\n\n trg = trg[1:].view(-1)\n\n loss = criterion(output, trg)\n loss.backward()\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n optimizer.step()\n\n epoch_loss += loss.item()\n\n return epoch_loss / len(loader)\n\n","sub_path":"nlp_sample/seq2seq_translation_refactor/model_selection/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"427642414","text":"# _*_ coding: utf-8 _*_\nfrom django.conf.urls import url\nfrom django.urls import re_path\n\nfrom users.views import UserinfoView, UploadImageView, UpdatePwdView, SendEmailCodeView, UpdateEmailView, MyCourseView, \\\n MyFavOrgView, MyFavTeacherView, MyFavCourseView, MymessageView\n\n__author__ = 'hzy'\n__date__ = '2020/2/8 11:23'\n\napp_name = 'users'\nurlpatterns = [\n #用户信息\n url('info/$', UserinfoView.as_view(), name=\"user_info\"),\n #用户头像上传\n url('image/upload/$', UploadImageView.as_view(), name=\"image_upload\"),\n #用户个人中心修改密码\n url('update/pwd/$', UpdatePwdView.as_view(), name=\"update_pwd\"),\n #发送邮箱验证码\n url('sendemail_code/$', SendEmailCodeView.as_view(), name=\"sendemail_code\"),\n #修改邮箱\n url('update_email/$', UpdateEmailView.as_view(), name=\"update_email\"),\n #我的课程\n url('mycourse/$', MyCourseView.as_view(), name=\"mycourse\"),\n #我收藏的课程机构\n url('myfav/org/$', MyFavOrgView.as_view(), name=\"myfav_org\"),\n #我收藏的教师\n url('myfav/teacher/$', MyFavTeacherView.as_view(), name=\"myfav_teacher\"),\n #我收藏的课程\n url('myfav/course/$', MyFavCourseView.as_view(), name=\"myfav_course\"),\n #我的消息\n url('mymessage/$', MymessageView.as_view(), name=\"mymessage\"),\n\n\n\n]","sub_path":"online_edu/apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"509068847","text":"__all__ = ['customAxisTicks']\n\ndef customAxisTicks(rng, axis=0, uniform=False):\n \"\"\"Use to set custm axis ticks in the render view\n \"\"\"\n from paraview.simple import GetActiveViewOrCreate, RenderAllViews\n # note that third parameter is the step size\n # get the active view\n rv = GetActiveViewOrCreate('RenderView')\n if axis is 0 or uniform:\n rv.AxesGrid.XAxisUseCustomLabels = 1\n rv.AxesGrid.XAxisLabels = rng\n if axis is 1 or uniform:\n rv.AxesGrid.YAxisUseCustomLabels = 1\n rv.AxesGrid.YAxisLabels = rng\n if axis is 2 or uniform:\n rv.AxesGrid.ZAxisUseCustomLabels = 1\n rv.AxesGrid.ZAxisLabels = rng\n RenderAllViews()\n return None\n\ncustomAxisTicks.__displayname__ = 'Custom Axis Ticks'\ncustomAxisTicks.__type__ = 'macro'\n","sub_path":"pvmacros/vis/axes.py","file_name":"axes.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"40831222","text":"import sys\nimport socket\nimport pickle\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5 import QtCore\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import pyqtSlot\n\n\nclass MainWindow(QtWidgets.QDialog):\n\tdef __init__(self, parent = None):\n\t\tQtWidgets.QDialog.__init__(self, parent)\n\n\t\tself.HOST = socket.gethostname()\n\t\tself.PORT = 9999\n\t\tself.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM, proto = 0)\n\n\t\ttry:\n\t\t\tself.socket.connect((self.HOST, self.PORT))\n\t\texcept ConnectionRefusedError:\n\t\t\tmsgBox = QtWidgets.QMessageBox()\n\t\t\tmsgBox.setIcon(QtWidgets.QMessageBox.Critical)\n\t\t\tmsgBox.setWindowTitle(\"서버 연결 에러\")\n\t\t\tmsgBox.setText(\"서버에 연결할 수 없습니다.\")\n\t\t\tmsgBoxVal = msgBox.exec_()\n\n\t\t\tif msgBoxVal == QtWidgets.QMessageBox.Ok:\n\t\t\t\tsys.exit()\n\n\t\tself.ui = uic.loadUi(\"ui/login.ui\", self)\n\t\tself.ui.show()\n\n\t@pyqtSlot()\n\tdef loginClick(self):\n\t\tlogin_info = []\n\n\t\tmsg_box = QtWidgets.QMessageBox()\n\t\tmsg_box.setIcon(QtWidgets.QMessageBox.Information)\n\n\t\tlogin_info.append(self.idLineEdit.text())\n\t\tif login_info[0] == \"\":\n\t\t\tmsg_box.setText(\"아이디를 입력하세요.\")\n\t\t\tmsg_box.exec_()\n\t\t\treturn\n\n\t\tlogin_info.append(self.passwordLineEdit.text())\n\t\tif login_info[1] == \"\":\n\t\t\tmsg_box.setText(\"비밀번호를 입력하세요.\")\n\t\t\tmsg_box.exec_()\n\t\t\treturn\n\n\t\tpic = pickle.dumps(login_info)\n\t\tself.socket.send(pic)\n\t\tflag = self.socket.recv(1024)\n\t\tprint(flag.decode())\n\n\t@pyqtSlot()\n\tdef registClick(self):\n\t\tuser_info = []\n\t\tself.close()\n\t\tself.dialog = uic.loadUi(\"ui/regist.ui\", self)\n\t\tself.dialog.show()\n\nif __name__ == \"__main__\":\n\tapp = QtWidgets.QApplication(sys.argv)\n\tsecure_ween = MainWindow()\n\tsys.exit(app.exec())","sub_path":"Client.py","file_name":"Client.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"433513178","text":"import os\n\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nSECRET_KEY = 'pvh9d)+7aui4=evh$yv!qgbr3oyz-4=^oj_%6g8+v57b=de5)7'\nSETTINGS_ROOT = os.path.abspath(os.path.dirname(__file__))\nPROJECT_ROOT = os.path.abspath(os.path.dirname(SETTINGS_ROOT))\nWSGI_APPLICATION = 'openbudget.wsgi.application'\n\nROOT_URLCONF = 'openbudget.ui.urls'\nSUBDOMAIN_URLCONFS = {\n '': 'openbudget.ui.urls',\n 'www': 'openbudget.ui.urls',\n 'he': 'openbudget.ui.urls',\n 'en': 'openbudget.ui.urls',\n 'ru': 'openbudget.ui.urls',\n 'ar': 'openbudget.ui.urls',\n 'api': 'openbudget.api.urls',\n}\n\nSITE_ID = 1\nTIME_ZONE = 'UTC'\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.abspath(os.path.join(os.path.dirname(PROJECT_ROOT), 'local.db')),\n 'USER': '',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nMEDIA_ROOT = os.path.abspath(\n os.path.join(os.path.dirname(PROJECT_ROOT),\n 'static',\n 'media'\n )\n)\n\nMEDIA_URL = '/static/media/'\n\nSTATIC_ROOT = os.path.abspath(\n os.path.join(os.path.dirname(PROJECT_ROOT),\n 'static'\n )\n)\n\nSTATIC_URL = '/static/'\n\nSTATICFILES_DIRS = (\n os.path.abspath(\n os.path.join(PROJECT_ROOT, 'commons', 'static')\n ),\n)\n\nTEMPLATE_DIRS = (\n os.path.abspath(\n os.path.join(PROJECT_ROOT, 'commons', 'templates')\n ),\n)\n\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'subdomains.middleware.SubdomainURLRoutingMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'openbudget.apps.international.middleware.InterfaceLanguage',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.comments',\n 'grappelli.dashboard',\n 'grappelli',\n 'grappelli_modeltranslation',\n 'django.contrib.admin',\n 'django.contrib.sitemaps',\n 'gunicorn',\n 'south',\n 'haystack',\n 'djcelery',\n 'subdomains',\n 'registration',\n 'rest_framework',\n 'rosetta_grappelli',\n 'rosetta',\n 'modeltranslation',\n 'taggit',\n 'openbudget.apps.accounts',\n 'openbudget.apps.budgets',\n 'openbudget.apps.contexts',\n 'openbudget.apps.entities',\n 'openbudget.apps.interactions',\n 'openbudget.apps.international',\n 'openbudget.apps.pages',\n 'openbudget.apps.sources',\n 'openbudget.apps.taxonomies',\n 'openbudget.apps.transport',\n 'openbudget.api',\n 'openbudget.commons',\n)\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.debug',\n 'django.core.context_processors.i18n',\n 'django.core.context_processors.media',\n 'django.core.context_processors.static',\n 'django.core.context_processors.tz',\n 'django.contrib.messages.context_processors.messages',\n 'django.core.context_processors.request',\n 'openbudget.commons.context_processors.get_site',\n)\n\n# FIXTURE CONF\nFIXTURE_DIRS = (\n os.path.abspath(\n os.path.join(PROJECT_ROOT, 'fixtures')\n ),\n)\n\n# LANGUAGE CONF\nLANGUAGE_CODE = 'en'\nMODELTRANSLATION_DEFAULT_LANGUAGE = LANGUAGE_CODE\ngettext = lambda s: s\nLANGUAGES = (\n ('en', gettext('English')),\n ('he', gettext('Hebrew')),\n ('ar', gettext('Arabic')),\n ('ru', gettext('Russian')),\n)\n\n# UNICODE SLUG CONF\nAUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'\n\n# USER ACCOUNT CONF\nACCOUNT_ACTIVATION_DAYS = 7\nAUTH_PROFILE_MODULE = 'accounts.UserProfile'\nLOGIN_URL = '/accounts/login/'\nLOGIN_REDIRECT_URL = '/'\nLOGOUT_URL = '/accounts/logout/'\nABSOLUTE_URL_OVERRIDES = {\n 'auth.user': lambda u: \"/accounts/%s/\" % u.get_profile.uuid,\n}\n\n# GRAPPELLI CONF\nGRAPPELLI_ADMIN_TITLE = 'Open Budget'\nGRAPPELLI_INDEX_DASHBOARD = 'openbudget.dashboard.OpenBudgetDashboard'\n\n# DJANGO REST FRAMEWORK CONF\nREST_FRAMEWORK = {\n 'DEFAULT_PERMISSION_CLASSES': (\n 'rest_framework.permissions.AllowAny',\n ),\n 'PAGINATE_BY': 10\n}\n\n# HAYSTACK CONF\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.whoosh_backend.WhooshEngine',\n 'PATH': os.path.join(PROJECT_ROOT, 'commons', 'search', 'index'),\n },\n}\n\n# CELERY CONF\nfrom celery.schedules import crontab\nimport djcelery\ndjcelery.setup_loader()\nBROKER_URL = 'redis://127.0.0.1:6379/'\nCELERYBEAT_SCHEDULE = {\n \"update_index\": {\n \"task\": \"tasks.update_index\",\n \"schedule\": crontab(\n minute=0,\n hour=0\n ),\n },\n \"rebuild_index\": {\n \"task\": \"tasks.rebuild_index\",\n \"schedule\": crontab(\n day_of_week='saturday',\n minute=0,\n hour=0\n ),\n }\n}\n\n# EMAIL CONF\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 587\nEMAIL_HOST_USER = ''\nEMAIL_HOST_PASSWORD = ''\n\n# SENTRY CONF\nSENTRY_DSN = ''\n\n# DEVELOPER ADMINS CONF\nADMINS = (\n ('', ''),\n ('', ''),\n)\n\n# OPEN BUDGET CUSTOM CONF\nTEMP_FILES_DIR = os.path.abspath(os.path.join(os.path.dirname(PROJECT_ROOT), 'tmp'))\n","sub_path":"openbudget/settings/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":6226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"586550925","text":"import sys\nimport os.path\nimport system\nimport dirutils\nimport tempfile\n\n\ntemp_path = os.path.abspath(sys.argv[1])\ndirectory = os.path.abspath(sys.argv[2])\ncsv = os.path.abspath(sys.argv[3])\nexe = sys.argv[4]\nif (len(sys.argv) > 5):\n opts = sys.argv[5]\nelse:\n opts = \"\"\n\nprint(\"======Running frama-c=======\")\nprint(\"Working dir:\", directory)\nprint(\"CSV file:\", csv)\nprint(\"Excutable:\", exe)\nprint(\"Executable options:\", opts)\n\nc_files = dirutils.list_files(directory, '.c') # + dirutils.list_files(directory, '.cpp')\nc_files = [x for x in c_files if not ('invalid_extern' in x)]\nprint(c_files)\n(output, err, exit, time) = system.system_call(exe + \" -val \" + \" \".join(c_files), directory)\ntemp_file = open(temp_path, 'w')\ntemp_file.write(output.decode(\"utf-8\"))\ntemp_file.close()\n\nsys.stdout = open(csv, \"w\")\nprint(\"File, Line, Error\")\nwith open(temp_path) as f:\n for line in f.readlines():\n a = line.strip().split(\":\")\n if (len(a) >= 3 and (a[0].endswith(\".c\") or a[0].endswith(\".cpp\"))):\n message = a[2]\n i = 3\n while (i < len(a)):\n message = message + \":\" + a[i]\n i = i + 1\n print(os.path.basename(a[0]), \",\", a[1], \",\", message)\nsys.stdout = sys.__stdout__ \nprint(\"======Done with frama-c=======\")\n","sub_path":"python/framac.py","file_name":"framac.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"311243690","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDownload Calibration files for a given night\n\n@author: Tom Williams\n\"\"\"\n\nimport numpy as np\nimport os\nimport wget\nfrom astropy.table import Table\nimport urllib\nimport datetime\nimport jdcal\nimport tarfile\nimport glob\nimport gzip\nimport shutil\nimport requests\n\n#N.B DOESN'T WORK FOR PROPRIETARY DATA\n\n#Read in parameters we need\n\ndl_folder = '/home/universe/c1625914/calibrators/850'\n\nfmt = '%Y%m%d'\n\n#Get the dates we need calibrations from \n\nos.chdir('/home/universe/c1625914/M51/850')\n\nfiles = glob.glob('*.sdf')\n\ndates = []\n\nfor file in files:\n \n date = file[3:11]\n \n if date not in dates:\n \n dates.append(date)\n\n#Check we haven't downloaded files from that night already\n\nos.chdir(dl_folder)\n\ndl_files = glob.glob('*.sdf')\n\ndl_dates = []\n\nfor dl_file in dl_files:\n \n dl_date = dl_file[3:11]\n \n if dl_date not in dl_dates:\n \n dl_dates.append(dl_date)\n \nidx_to_delete = []\n \nfor i in range(len(dates)):\n \n if dates[i] in dl_dates:\n \n idx_to_delete.append(i)\n \n#Step backwards through the indices so things don't shuffle around\n \nfor idx in idx_to_delete[::-1]:\n \n del dates[idx]\n\nfor date in dates:\n \n print('Downloading '+date)\n \n time = datetime.datetime.strptime(date, fmt)\n time_tuple = time.timetuple()\n \n #Convert to a Julian Calendar date\n \n j_time = sum(jdcal.gcal2jd(time.year, \n time.month, \n time.day))\n j_time = np.floor(j_time)\n \n #Build up a URL to request SCUBA-2 Calibration data\n \n #Generic CADC stuff...not sure what this does honestly\n \n url = 'http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/'\n url += 'tap/sync?REQUEST=downloads-only&USEMAQ=true&LANG=ADQL&'\n \n url += 'QUERY=SELECT * FROM caom2.Plane AS Plane '\n url += 'JOIN caom2.Observation AS Observation ON Plane.obsID = Observation.obsID ' \n \n #Select SCUBA-2 850 observations\n \n url += \"WHERE Observation.collection = 'JCMT' \"\n url += \"AND Observation.instrument_name = 'SCUBA-2' \"\n url += \"AND Plane.energy_bandpassName = 'SCUBA-2-850um' \"\n \n #Find certain observations on a certain night -- COMMENT THIS OUT IF DOWNLOADING A WHOLE PROPOSAL \n \n url += \"AND INTERSECTS( INTERVAL( \"+str(j_time)[2:]+\", \"+str(j_time+1)[2:]+\" ), Plane.time_bounds_samples ) = 1 \"\n \n #Select calibration observations\n \n url += \"AND lower(Observation.proposal_id) LIKE 'jcmtcal' \"\n url += \"AND Observation.type = 'pointing' \" \n \n #Alternatively, select actual science observations\n \n# url += \"AND lower(Observation.proposal_id) LIKE 'm17bp003' \"\n \n #Select raw data\n \n url += \"AND Plane.calibrationLevel = '0' \"\n \n #Don't select anything that's marked by CADC as junk or by SCUBA-2 as failed\n \n url += \"AND (Plane.quality_flag IS NULL OR Plane.quality_flag != 'junk' )\"\n url += \"AND (Observation.requirements_flag IS NULL OR Observation.requirements_flag != 'fail') \"\n \n #Download and read in this VOTable\n \n votable = requests.get(url,\n stream=True)\n \n with open('votable.xml', 'wb') as f:\n shutil.copyfileobj(votable.raw, f)\n \n table = Table.read('votable.xml')\n \n #For each URI, download the file\n \n for uri in table['caomPlaneURI']:\n \n final_url = 'http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/caom2ops/pkg?ID='+uri\n \n urllib.urlretrieve(final_url, 'archive.tar')\n \n #Unzip the file\n \n output_dir = \".\"\n tar = tarfile.open('archive.tar')\n for member in tar.getmembers():\n if member.isreg(): \n member.name = os.path.basename(member.name)\n tar.extract(member,output_dir) \n \n #Remove the readme file\n \n os.remove('README')\n \n #Unzip any pesky .gz we have\n \n gz_files = glob.glob('*.sdf.gz')\n \n for gz_file in gz_files:\n with gzip.open(gz_file, 'rb') as f_in:\n with open(gz_file[:-3], 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n \n os.remove(gz_file)\n \n os.remove('archive.tar')\n \n os.remove('votable.xml')\n \n print(date+' complete!')\n\nprint('All done!')","sub_path":"gmc_catalogue/s2_cal/download_calib.py","file_name":"download_calib.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"371444055","text":"altura=int(input())\nlargura=int(input())\ncomprimento=int(input())\nArea_piso= (largura * comprimento)\nVolume_sala=(largura * comprimento * altura)\nArea_Paredes=(2 * altura)*(largura + 2)*(altura * comprimento)\n \nprint(f'{Area_piso}')\nprint(f'{Volume_sala}')\nprint(f'{Area_Paredes}')","sub_path":"atividade04.py","file_name":"atividade04.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"474609724","text":"from unittest import TestCase\nfrom parameterized import parameterized\nfrom cloudrail.dev_tools.rule_test_utils import create_empty_entity\nfrom cloudrail.knowledge.context.gcp.gcp_environment_context import GcpEnvironmentContext\nfrom cloudrail.knowledge.context.gcp.resources.compute.gcp_compute_instance import GcpComputeInstance, GcpComputeInstanceShieldInstCfg\nfrom cloudrail.knowledge.rules.base_rule import RuleResultType\nfrom cloudrail.knowledge.rules.gcp.non_context_aware.compute_instance_launch_with_vm_shield_rule import ComputeInstanceLaunchWithVmShieldRule\n\n\nclass TestComputeInstanceLaunchWithVmShieldRule(TestCase):\n def setUp(self):\n self.rule = ComputeInstanceLaunchWithVmShieldRule()\n\n @parameterized.expand(\n [\n [\"shield_vm_config_no_integrity\", True, False, True, True],\n [\"shield_vm_config_no_vtpm\", True, True, False, True],\n [\"shield_vm_config_secure_boot_enabled_no_integrity_no_vtpm\", True, False, False, True],\n [\"shield_vm_config_all_attributes_enabled\", True, True, True, False],\n [\"shield_vm_config_both_integrity_and_vtpm_enabled_secure_boot_disabled\", False, True, True, True]\n ]\n )\n\n def test_compute_instance_shield_vm_config(self, unused_name: str, secure_boot: bool,\n enable_integrity_monitoring: bool, enabled_vtpm: bool, should_alert: bool):\n # Arrange\n compute_instance = create_empty_entity(GcpComputeInstance)\n shielded_instance_config = create_empty_entity(GcpComputeInstanceShieldInstCfg)\n shielded_instance_config.enable_integrity_monitoring = enable_integrity_monitoring\n shielded_instance_config.enable_secure_boot = secure_boot\n shielded_instance_config.enable_vtpm = enabled_vtpm\n compute_instance.shielded_instance_config = shielded_instance_config\n context = GcpEnvironmentContext(compute_instances=[compute_instance])\n # Act\n result = self.rule.run(context, {})\n # Assert\n if should_alert:\n self.assertEqual(RuleResultType.FAILED, result.status)\n self.assertEqual(1, len(result.issues))\n else:\n self.assertEqual(RuleResultType.SUCCESS, result.status)\n self.assertEqual(0, len(result.issues))\n","sub_path":"tests/knowledge/rules/gcp/non_context_aware/test_compute_instance_launch_with_vm_shield_rule.py","file_name":"test_compute_instance_launch_with_vm_shield_rule.py","file_ext":"py","file_size_in_byte":2309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"81829730","text":"# Detect if a number is divisible by 3\ndef check3(n):\n n = abs(n)\n if n == 0:\n return 1\n if n == 1:\n return 0\n ec = 0\n oc = 0\n while (n):\n if (n & 1):\n oc += 1\n n = n >> 1\n if (n & 1):\n ec += 1\n n = n >> 1\n return check3(oc - ec)\n\n\nn = int(input())\nif check3(n):\n print('%d is Multiple of 3' % n)\nelse:\n print('%d is not a multiple of 3' % n)\n","sub_path":"mathematical/multiplicity3.py","file_name":"multiplicity3.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"103558600","text":"\"\"\"\nDescription: Current algorithm replicates manual swing trades made from November 2014 - February 2015.\nResults:\nTotal Returns = 12.8%\nSPY Returns = 2.4%\nSharpe Ratio = 1.28\nVolatility = 0.10\nMax Drawdown 1.2%\n\"\"\"\nimport talib # Imports TA-Lib \n\ndef initialize(context):\n context.security = symbol('SPY') # Initializing the security to be traded\n\ndef handle_data(context, data):\n currentprice = data[context.security].price # Variable for current price of security\n cash = context.portfolio.cash # Amount of cash available in the portfolio\n shares = int(cash / currentprice) # The number of shares that can be purchased in the portfolio\n if get_open_orders(): # Checks to see if there are any open orders before placing an order\n return \n if currentprice < 201 and currentprice > 199:\n order(symbol('SPY'),+shares,style=LimitOrder(200.00))\n elif currentprice > 206:\n order_target(context.security,0) # Sells all securities when price target reached\n\"\"\"\nTo Do: \nFigure out a way to replace hard keyed prices with resistance and support\n\"\"\"","sub_path":"SPYAlgo.py","file_name":"SPYAlgo.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"276003445","text":"from django.shortcuts import render\nfrom KorpaZaKupovinu.korpa import Korpa\nfrom .models import StavkaPorudzbine\nfrom .forms import FormaZaPorudzbinu\n\n\ndef KreiranjePorudzbine(request):\n korpa = Korpa(request)\n if request.method == 'POST':\n forma = FormaZaPorudzbinu(request.POST)\n if forma.is_valid():\n porudzbina = forma.save()\n for stavka in korpa:\n StavkaPorudzbine.objects.create(porudzbina=porudzbina,\n ploca=stavka['ploca'], cena=stavka['price'], kolicina=stavka['kolicina'])\n korpa.ObrisiJeIzSesije()\n return render(request, 'Porudzbina/Porudzbina/created.html', {'porudzbina': porudzbina})\n else:\n forma = FormaZaPorudzbinu()\n return render(request, 'Porudzbina/Porudzbina/create.html', {'korpa': korpa, 'forma': forma})\n","sub_path":"Porudzbina/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444732046","text":"# This is a sample Python script.\nimport json\nimport argparse\nfrom collections import OrderedDict\nimport pandas as pd\nimport math\nfrom ast import literal_eval\nimport os\nimport itertools\nfrom pathlib import Path\nimport numpy as np\n# Press Shift+F10 to execute it or replace it with your code.\n# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.\n\n\ndef compare_commit_and_analysis_dates(save_file_path, analysis_file, commit_file, project_name):\n# # The analysis of the given project in /sonar_data/analysis/ directory\n analysis_df = pd.read_csv(save_file_path + \"/analysis/\" + \"{0}.csv\".format(\n analysis_file.replace(' ', '_').replace(':', '_')))\n#\n# # The commit hash file of the given project in /sonar_data/Git_Logs/ directory\n# # For this file, I have used Savanna's script which generates the commit log history of a project and\n# # saved it in /sonar_data/Git_logs/ directory manually\n commits_df = pd.read_csv(save_file_path + \"/Git_Logs/\" + \"{0}.csv\".format(\n commit_file.replace(' ', '_').replace(':', '_')))\n#\n analysis_df = analysis_df.assign(DATE_MATCH=analysis_df.date.isin(commits_df.AUTHOR_DATE).astype(int))\n analysis_df['COMMIT_DATE'] = analysis_df['date']\n compared = json.loads(analysis_df['DATE_MATCH'].value_counts().to_json())\n not_matched = 0 if '0' not in compared else compared['0']\n matched = 0 if '1' not in compared else compared['1']\n print('PROJECT: {0}, NOT MATCHED: {1}, MATCHED: {2}, MATCHED%: {3}'.format(project_name, not_matched, matched, (matched/(len(analysis_df.index))*100)))\n print('-'*100)\n return 1\n# analysis_df.loc[analysis_df['DATE_MATCH'] == 0, 'COMMIT_DATE'] = None\n# # print(analysis_df)\n# # compare_date_path = Path(save_file_path).joinpath(\"compare_date\")\n# # compare_date_path.mkdir(parents=True, exist_ok=True)\n# # compare_date_path = compare_date_path.joinpath(commit_file)\n# # del analysis_df['project_version']\n# # del analysis_df['revision']\n# # analysis_df.to_csv(compare_date_path, index=False, header=True)\n# # exit(1)\n# #\n# # # The issues of the given project in /sonar_data/issues/ directory\n# issues_df = pd.read_csv(save_file_path + \"/issues/\"+ \"{0}.csv\".format(\n# analysis_file.replace(' ', '_').replace(':', '_')))\n# print(len(issues_df))\n# headers = OrderedDict({\n# \"PROJECT\": \"object\",\n# \"ANALYSIS_KEY\": \"object\",\n# \"DATE\": \"object\",\n# \"HASH\": \"object\",\n# })\n# #\n# matched_commits = commits_df[commits_df.AUTHOR_DATE.isin(analysis_df.date)].drop_duplicates(subset=['AUTHOR_DATE'],\n# keep='last')\n# print(\"matched_commits_date_with_analysis_date: {0}\".format(len(matched_commits)))\n# print(project_name)\n# print(len(analysis_df))\n# matched_analysis = analysis_df[analysis_df.date.isin(commits_df.AUTHOR_DATE)]\n# print(len(matched_analysis))\n# matched_commits['MODIFIED_FILES'] = matched_commits.MODIFIED_FILES.apply(literal_eval)\n# print(len(matched_commits))\n#\n# creation_date = issues_df['creation_date'].tolist()\n# update_date = issues_df['update_date'].tolist()\n# close_date = issues_df['close_date'].tolist()\n# date_list = creation_date + update_date + close_date\n# unique_issues_date = list(set(date_list))\n# issues_df_dates = pd.DataFrame({\"dates\": unique_issues_date})\n# matched_issues = analysis_df['date'].isin(issues_df_dates['dates']).value_counts()\n# print(matched_issues)\n#\n# rows = []\n# not_found = False\n# count = 0\n# for index, item in matched_commits.iterrows():\n# modified_files = sorted(item.loc['MODIFIED_FILES'])\n# issues_of_create_date = issues_df[(issues_df['creation_date'] == item.loc['AUTHOR_DATE']) | (issues_df['close_date'] == item.loc['AUTHOR_DATE'])].drop_duplicates(\n# subset=['component'], keep='last')['component'].array\n#\n# if issues_of_create_date:\n# print(item.loc['AUTHOR_DATE'])\n# component_files = sorted([os.path.basename(component) for component in issues_of_create_date])\n# for i, _ in enumerate(component_files):\n# if component_files[i] == \"org.apache:felix:pom.xml\":\n# component_files[i] = 'pom.xml'\n#\n# if all(item in modified_files for item in component_files):\n# count += 1\n# # analysis_row = matched_analysis[(matched_analysis['date'] == item.loc['AUTHOR_DATE'])]\n# # line = (analysis_row['project'].values[0], analysis_row['analysis_key'].values[0],\n# # analysis_row['date'].values[0], item.loc['HASH'])\n# # # rows.append(line)\n# # not_found = False\n# else:\n# not_found = True\n# else:\n# not_found = True\n# print(count)\n#\n# if not_found:\n# issues_of_create_update_date = issues_df[\n# (issues_df['creation_date'] == item.loc['AUTHOR_DATE']) |\n# (issues_df['update_date'] == item.loc['AUTHOR_DATE'])].drop_duplicates(\n# subset=['component'], keep='last')['component'].array\n#\n# if issues_of_create_update_date:\n# component_files = sorted([os.path.basename(component)\n# for component in issues_of_create_update_date])\n#\n# for i, _ in enumerate(component_files):\n# if component_files[i] == \"org.apache:felix:pom.xml\":\n# component_files[i] = 'pom.xml'\n#\n# if all(item in modified_files for item in component_files):\n# analysis_row = matched_analysis[(matched_analysis['date'] == item.loc['AUTHOR_DATE'])]\n# line = (analysis_row['project'].values[0], analysis_row['analysis_key'].values[0],\n# analysis_row['date'].values[0], item.loc['HASH'])\n# rows.append(line)\n# not_found = False\n# else:\n# not_found = True\n# else:\n# not_found = True\n#\n # if not_found:\n # issues_of_create_update_close_date = issues_df[(\n # (issues_df['creation_date'] == item.loc['AUTHOR_DATE']) |\n # (issues_df['close_date'] == item.loc['AUTHOR_DATE'])\n # )].drop_duplicates(subset=['component'], keep='last')['component'].array\n #\n # if issues_of_create_update_close_date:\n # component_files = sorted([os.path.basename(component)\n # for component in issues_of_create_update_close_date])\n #\n # for i, _ in enumerate(component_files):\n # if component_files[i] == \"org.apache:felix:pom.xml\":\n # component_files[i] = 'pom.xml'\n #\n # if all(item in modified_files for item in component_files):\n # analysis_row = matched_analysis[(matched_analysis['date'] == item.loc['AUTHOR_DATE'])]\n # line = (analysis_row['project'].values[0], analysis_row['analysis_key'].values[0],\n # analysis_row['date'].values[0], item.loc['HASH'])\n # rows.append(line)\n # not_found = False\n # else:\n # not_found = True\n # else:\n # not_found = True\n#\n# print(len(rows))\n# two_dates = list(zip(rows, rows[1:]))\n# #\n# # analysis_df['date'] = pd.to_datetime(analysis_df['date'])\n# # analysis_df = analysis_df.sort_values(by=['date'])\n# #\n# print(not_found)\n# # save_file_path = Path(save_file_path).joinpath(\"analysis_commit\")\n# # save_file_path.mkdir(parents=True, exist_ok=True)\n# # file_path = save_file_path.joinpath(\"sonar_analysis_commits_{0}.csv\".format(project_name))\n# # df = pd.DataFrame(data=rows, columns=headers)\n # df.to_csv(file_path, index=False, header=True)\n\n\ndef get_percentage(total_issues, count):\n frac, whole = math.modf(count / total_issues * 100)\n percentage = whole if whole > 95 else math.ceil(count / total_issues * 100)\n return percentage\n\n\ndef get_sonar_issues_match_info(file_path, file_name, project_name):\n analysis_commit_df = pd.read_csv(file_path + \"/analysis_with_revision_value/\" + \"{0}.csv\".format(\n file_name.replace(' ', '_').replace(':', '_')))\n\n issues_df = pd.read_csv(\n file_path + \"/issues/\" + \"{0}.csv\".format(file_name.replace(' ', '_').replace(':', '_')))\n\n indexes = set()\n issues_df['not_found'] = 0\n for index, row in issues_df.iterrows():\n if index not in indexes:\n if pd.isnull(row['close_date']):\n check = analysis_commit_df[analysis_commit_df['date'] == row['creation_date']]\n\n if check.empty:\n issues_df.at[index, 'not_found'] = 1\n indexes.add(index)\n else:\n check_close_date = analysis_commit_df[analysis_commit_df['date'] == row['close_date']]\n check_open_date = analysis_commit_df[analysis_commit_df['date'] == row['creation_date']]\n\n if check_close_date.empty:\n if not check_open_date.empty:\n sliced_issues = issues_df.iloc[index:]\n component = row['component']\n found = False\n for _index, _row in sliced_issues.iterrows():\n if _row['component'] == component:\n if not pd.isnull(_row['close_date']):\n create_date_found = analysis_commit_df[analysis_commit_df['date'] == _row['creation_date']]\n close_date_found = analysis_commit_df[analysis_commit_df['date'] == _row['close_date']]\n\n if not create_date_found.empty and not close_date_found.empty:\n found = True\n issues_df.at[_index, 'creation_date'] = row['creation_date']\n break\n else:\n issues_df.at[_index, 'not_found'] = 1\n indexes.add(_index)\n if not found:\n issues_df.at[index, 'close_date'] = \"\"\n else:\n issues_df.at[index, 'not_found'] = 1\n indexes.add(index)\n else:\n if check_open_date.empty:\n issues_df.at[index, 'not_found'] = 1\n indexes.add(index)\n\n indexes_to_keep = set(range(issues_df.shape[0])) - set(indexes)\n issues_df = issues_df.take(list(indexes_to_keep))\n new_issue_path = Path(file_path).joinpath(\"updated_issues\")\n new_issue_path.mkdir(parents=True, exist_ok=True)\n new_issue_path = new_issue_path.joinpath(\"{0}.csv\".format(\n file_name.replace(' ', '_').replace(':', '_')))\n issues_df.to_csv(new_issue_path, index=False, header=True)\n\n num_of_lines_in_issues = len(issues_df.index)\n count_create_date_missing = 0\n count_create_close_date_missing = 0\n count_close_date_missing = 0\n missing_creation_date = set()\n missing_creation_close_date = set()\n missing_close_date = set()\n\n for index, row in issues_df.iterrows():\n if pd.notnull(row['creation_date']):\n check = analysis_commit_df[analysis_commit_df['date'] == row['creation_date']]\n\n if check.empty:\n missing_creation_date.add(row['creation_date'])\n count_create_date_missing += 1\n\n if row['creation_date'] and pd.notnull(row['close_date']):\n check = analysis_commit_df[(analysis_commit_df['date'] == row['creation_date']) |\n (analysis_commit_df['date'] == row['close_date'])]\n\n if check.empty:\n missing_creation_close_date.add(row['creation_date'])\n missing_creation_close_date.add(row['close_date'])\n count_create_close_date_missing += 1\n\n if pd.notnull(row['close_date']):\n check = analysis_commit_df[analysis_commit_df['date'] == row['close_date']]\n\n if check.empty:\n missing_close_date.add(row['close_date'])\n count_close_date_missing += 1\n\n return (project_name, num_of_lines_in_issues,\n count_create_date_missing,\n get_percentage(total_issues=num_of_lines_in_issues, count=count_create_date_missing),\n count_create_close_date_missing,\n get_percentage(total_issues=num_of_lines_in_issues, count=count_create_close_date_missing),\n count_close_date_missing,\n get_percentage(total_issues=num_of_lines_in_issues, count=count_close_date_missing))\n\n\nif __name__ == '__main__':\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-o\", \"--output-path\", default='./sonar_data', help=\"Path to output file directory.\")\n args = vars(ap.parse_args())\n output_path = args['output_path']\n projects = pd.read_csv(output_path + \"/projects_list.csv\")\n data = []\n for pos, row in projects.iterrows():\n result = get_sonar_issues_match_info(file_path=output_path, file_name=row.sonarProjectKey,\n project_name=row.projectID)\n data.append(result)\n\n result_df = pd.DataFrame(data=data, columns={\n \"project\": \"object\",\n \"LINES_IN_ISSUES_FILE\": \"object\",\n \"MISSING_CREATION_ONLY_LINES\": \"object\",\n \"%MISSING_CREATION_ONLY_LINES\": \"object\",\n \"MISSING_CREATION_CLOSE_LINES\": \"object\",\n \"%MISSING_CREATION_CLOSE_LINES\": \"object\",\n \"MISSING_CLOSE_LINES\": \"object\",\n \"%MISSING_CLOSE_LINES\": \"object\"\n })\n\n output_path = Path(output_path)\n file_path = output_path.joinpath(\"Missing-sonar-issues-revision-information.csv\")\n result_df.to_csv(file_path, index=False, header=True)\n","sub_path":"old_elaborations/compare_dates.py","file_name":"compare_dates.py","file_ext":"py","file_size_in_byte":14361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"406752938","text":"import datetime\r\nfrom sanic import Sanic\r\nfrom sanic_cors import CORS\r\nfrom sanic_redis import SanicRedis\r\nfrom apps import db, settings, import_data\r\nfrom apps.api import crud_bp, ShanghaiPersonInfo\r\nfrom sanic.exceptions import NotFound, InvalidUsage\r\nfrom sanic.response import json\r\nfrom apps.auth.views import authenticate, store_refresh_token, retrieve_refresh_token\r\nfrom sanic_jwt import initialize\r\n\r\nfrom sanic_limiter import Limiter, get_remote_address\r\n\r\ndef create_app():\r\n app = Sanic(__name__)\r\n app.config.update(settings.SANIC_REDIS_CONFIG)\r\n redis = SanicRedis(app)\r\n\r\n CORS(app, automatic_options=True)\r\n\r\n app.config.LOGO = settings.LOGO.format(', Y.c')\r\n # app.config.LOGO = None\r\n\r\n app.blueprint(crud_bp)\r\n app.config.from_object(settings)\r\n\r\n # JWT 配置\r\n # jwt 返回jwt 获取token的键设置,将改变默认键 access_token\r\n app.config.SANIC_JWT_ACCESS_TOKEN_NAME = 'sanic-token'\r\n # app.config.SANIC_JWT_ACCESS_TOKEN_NAME = 'jwt'\r\n\r\n # 设置过期时间, 默认30分钟\r\n # app.config['JWT_EXPIRATION_DELTA'] = datetime.timedelta(days=10)\r\n app.config['JWT_EXPIRATION_DELTA'] = datetime.timedelta(seconds=60)\r\n\r\n # limiter = Limiter(app, global_limits=['10 per hour', '100 per day'], key_func=get_remote_address)\r\n\r\n initialize(\r\n app,\r\n authenticate=authenticate,\r\n # refresh_token_enabled=True,\r\n # store_refresh_token=store_refresh_token,\r\n # retrieve_refresh_token=retrieve_refresh_token,\r\n url_prefix='/v1/api/authentication',\r\n secret='fgkjhfkhgkfhkghfjdkgher5545458fjighui',\r\n # verify_exp = False,\r\n\r\n ## 标头令牌\r\n authorization_header='Authorization',\r\n authorization_header_prefix='Bearer',\r\n # Cookie令牌\r\n # cookie_domain - 更改与cooke关联的域(默认为'')\r\n # cookie_httponly - 是否在cookie上设置httponly标志(默认为True) 如果你是使用Cookie来传递JWTs,那么建议您不要不禁用cookie_httponly。这样做意味着客户端上运行的任何javascript都可以访问令牌。\r\n # cookie_access_token_name - 为访问令牌存储cookie的名称\r\n # cookie_refresh_token_name - 存储用于刷新令牌的cookie的名称\r\n cookie_set=True,\r\n # cookie_domain='mydomain.com',\r\n cookie_httponly=False,\r\n cookie_access_token_name='some-token',\r\n cookie_strict=False,\r\n\r\n\r\n # 设置过期时间, 默认30分钟\r\n # 访问令牌有效的时间长度。由于无法撤销访问令牌,因此建议将此时间段保持为短,并启用刷新令牌(可以撤销)以检索新的访问令牌。\r\n # 设置为一分钟\r\n expiration_delta=60,\r\n )\r\n\r\n return app\r\n\r\napp = create_app()\r\n\r\n@app.middleware('response')\r\nasync def response_json(request, response):\r\n response.headers[\"content-type\"] = \"application/json\"\r\n\r\n@app.exception(NotFound)\r\ndef not_found(request, exception):\r\n return json({\r\n 'error_code': 'not_found',\r\n 'message': exception.args[0] },\r\n status=exception.status_code,\r\n )\r\n\r\n@app.exception(InvalidUsage)\r\ndef method_not_allow(request, exception):\r\n return json({\r\n 'error_code': 'method_not_allow',\r\n 'message': exception.args[0]},\r\n status=exception.status_code,\r\n )\r\n\r\ndb.create_tables([ShanghaiPersonInfo], safe=True)\r\n# import_data.generate_data()\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host=\"0.0.0.0\", port=6070, debug=True, workers=4)\r\n","sub_path":"sanic_api/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"489060115","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\ndef compound_year(principal, year, annualized_rate, investment):\n \"\"\"\n 计算复利(年)\n :param principal: 本金\n :param year: 年\n :param annualized_rate: 年化收益率\n :param investment: 每年定期投资额\n :return: 总收益\n \"\"\"\n money = principal\n for i in range(year):\n if i != 0:\n money += investment\n money *= (annualized_rate / 100 + 1)\n print('本金:\\t\\t%d\\n年数:\\t\\t%d\\n年化收益:\\t%d\\n定投金额:\\t%d\\n' % (principal, year, annualized_rate, investment))\n print('----------------------')\n return money\n\n\ndef compound_half_profit(principal, day, day_rate):\n profit = 0\n for i in range(day):\n day_profit = principal * day_rate/2\n profit += day_profit\n principal += day_profit\n principal=round(principal,2)\n profit=round(profit,2)\n print('Position:', principal, \" Profit:\", profit)\n\ndef compound_day(principal, day, day_rate):\n for i in range(day):\n day_profit = principal * day_rate\n principal += day_profit\n principal=round(principal,2)\n print('Sum:',principal)\n\nif __name__ == '__main__':\n compound_day(100000,20,0.01)","sub_path":"bin/calculator/CompoundInterest.py","file_name":"CompoundInterest.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"500526822","text":"from ee.kafka.topics import KAFKA_PERSON, KAFKA_PERSON_UNIQUE_ID\n\nfrom .clickhouse import STORAGE_POLICY, kafka_engine, table_engine\n\nDROP_PERSON_TABLE_SQL = \"\"\"\nDROP TABLE person\n\"\"\"\n\nDROP_PERSON_DISTINCT_ID_TABLE_SQL = \"\"\"\nDROP TABLE person_distinct_id\n\"\"\"\n\nPERSONS_TABLE = \"person\"\n\nPERSONS_TABLE_BASE_SQL = \"\"\"\nCREATE TABLE {table_name} \n(\n id UUID,\n created_at datetime,\n team_id Int64,\n properties VARCHAR,\n is_identified Boolean,\n _timestamp UInt64,\n _offset UInt64\n) ENGINE = {engine} \n\"\"\"\n\nPERSONS_TABLE_SQL = (\n PERSONS_TABLE_BASE_SQL\n + \"\"\"Order By (team_id, id)\n{storage_policy}\n\"\"\"\n).format(table_name=PERSONS_TABLE, engine=table_engine(PERSONS_TABLE, \"_timestamp\"), storage_policy=STORAGE_POLICY)\n\nKAFKA_PERSONS_TABLE_SQL = PERSONS_TABLE_BASE_SQL.format(\n table_name=\"kafka_\" + PERSONS_TABLE, engine=kafka_engine(KAFKA_PERSON)\n)\n\nPERSONS_TABLE_MV_SQL = \"\"\"\nCREATE MATERIALIZED VIEW {table_name}_mv \nTO {table_name} \nAS SELECT\nid,\ncreated_at,\nteam_id,\nproperties,\nis_identified,\n_timestamp,\n_offset\nFROM kafka_{table_name} \n\"\"\".format(\n table_name=PERSONS_TABLE\n)\n\nGET_PERSON_SQL = \"\"\"\nSELECT * FROM person WHERE team_id = %(team_id)s\n\"\"\"\n\nPERSONS_DISTINCT_ID_TABLE = \"person_distinct_id\"\n\nPERSONS_DISTINCT_ID_TABLE_BASE_SQL = \"\"\"\nCREATE TABLE {table_name} \n(\n id VARCHAR,\n distinct_id VARCHAR,\n person_id UUID,\n team_id Int64,\n _timestamp UInt64,\n _offset UInt64\n) ENGINE = {engine} \n\"\"\"\n\nPERSONS_DISTINCT_ID_TABLE_SQL = (\n PERSONS_DISTINCT_ID_TABLE_BASE_SQL\n + \"\"\"Order By (team_id, distinct_id, id)\n{storage_policy}\n\"\"\"\n).format(\n table_name=PERSONS_DISTINCT_ID_TABLE,\n engine=table_engine(PERSONS_DISTINCT_ID_TABLE, \"_timestamp\"),\n storage_policy=STORAGE_POLICY,\n)\n\nKAFKA_PERSONS_DISTINCT_ID_TABLE_SQL = PERSONS_DISTINCT_ID_TABLE_BASE_SQL.format(\n table_name=\"kafka_\" + PERSONS_DISTINCT_ID_TABLE, engine=kafka_engine(KAFKA_PERSON_UNIQUE_ID)\n)\n\nPERSONS_DISTINCT_ID_TABLE_MV_SQL = \"\"\"\nCREATE MATERIALIZED VIEW {table_name}_mv \nTO {table_name} \nAS SELECT\nid,\ndistinct_id,\nperson_id,\nteam_id,\n_timestamp,\n_offset\nFROM kafka_{table_name} \n\"\"\".format(\n table_name=PERSONS_DISTINCT_ID_TABLE\n)\n\nGET_DISTINCT_IDS_SQL = \"\"\"\nSELECT * FROM person_distinct_id WHERE team_id = %(team_id)s\n\"\"\"\n\nGET_DISTINCT_IDS_SQL_BY_ID = \"\"\"\nSELECT * FROM person_distinct_id WHERE team_id = %(team_id)s AND person_id = %(person_id)s\n\"\"\"\n\nGET_PERSON_BY_DISTINCT_ID = \"\"\"\nSELECT p.* FROM person as p inner join person_distinct_id as pid on p.id = pid.person_id where team_id = %(team_id)s AND distinct_id = %(distinct_id)s\n\"\"\"\n\nPERSON_DISTINCT_ID_EXISTS_SQL = \"\"\"\nSELECT count(*) FROM person_distinct_id\ninner join (\n SELECT arrayJoin({}) as distinct_id\n ) as id_params ON id_params.distinct_id = person_distinct_id.distinct_id\nwhere person_distinct_id.team_id = %(team_id)s\n\"\"\"\n\nPERSON_EXISTS_SQL = \"\"\"\nSELECT count(*) FROM person where id = %(id)s\n\"\"\"\n\nINSERT_PERSON_SQL = \"\"\"\nINSERT INTO person SELECT %(id)s, now(), %(team_id)s, %(properties)s, 0\n\"\"\"\n\nINSERT_PERSON_DISTINCT_ID = \"\"\"\nINSERT INTO person_distinct_id SELECT generateUUIDv4(), %(distinct_id)s, %(person_id)s, %(team_id)s VALUES\n\"\"\"\n\nUPDATE_PERSON_PROPERTIES = \"\"\"\nALTER TABLE person UPDATE properties = %(properties)s where id = %(id)s\n\"\"\"\n\nUPDATE_PERSON_ATTACHED_DISTINCT_ID = \"\"\"\nALTER TABLE person_distinct_id UPDATE person_id = %(person_id)s where distinct_id = %(distinct_id)s\n\"\"\"\n\nDELETE_PERSON_BY_ID = \"\"\"\nALTER TABLE person DELETE where id = %(id)s\n\"\"\"\n\nUPDATE_PERSON_IS_IDENTIFIED = \"\"\"\nALTER TABLE person UPDATE is_identified = %(is_identified)s where id = %(id)s\n\"\"\"\n","sub_path":"ee/clickhouse/sql/person.py","file_name":"person.py","file_ext":"py","file_size_in_byte":3609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"16464583","text":"import re\n\n# Dud predictor class to replace NLP_Predictor for WeChat.\n# Only returns \"inform\"\n\nclass Regex_Predictor:\n def predict(self, raw_msg):\n pred_dict = {}\n intent = self.get_intent(raw_msg)\n pred_dict = {\n \"prediction\":intent,\n \"breakdown\":\"breakdown text\",\n \"numbers\":[]\n }\n return pred_dict\n \n def get_intent(self, msg):\n intent = \"inform\"\n return intent","sub_path":"chatbot/regex_predictor.py","file_name":"regex_predictor.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"243677868","text":"#!/usr/bin/env python\n\n###################################################################################################\n# Run-time complexity of my code is O(n). It will run efficiently even for very large input files #\n###################################################################################################\n\n# First we will import the libraries which are used in the code\nimport numpy as np\nimport pandas as pd\nimport sys\n\n\n# This function will take in the path to the input file, convert it into a dataframe, and return it.\ndef load_file(path):\n ip_file = pd.read_table(path, header=None, sep='|', dtype='str')\n return ip_file\n\n\n# This function does the major job in creating the medianvals_by_zip.txt output file.\n# It takes in the input dataframe returned by above load_file() method and two dictionaries which will get filled up\n# by this method according to the contraints. It returns the medianvals_by_zip dataframe.\ndef parse_input(inp, by_zip_dict, by_date_dict):\n medianvals_by_zip = pd.DataFrame()\n zip_update_list = []\n for i in xrange(len(inp)):\n # Entering input data line by line into two dictionaries\n CMTE_ID = inp.iloc[i][0]\n TRANSACTION_DT = inp.iloc[i][13]\n TRANSACTION_AMT = float(inp.iloc[i][14])\n OTHER_ID = inp.iloc[i][15]\n zip_c = 0\n date = 0\n # First we will check if the required fields are correct\n if (len(CMTE_ID) == 9) and (CMTE_ID.isalnum()) and (not isinstance(OTHER_ID, str)) and \\\n (TRANSACTION_AMT is not None) and (TRANSACTION_AMT >= 0):\n # Now we will check which fileds among zip and date are correct\n if (inp.iloc[i][10] is not None) and (len(inp.iloc[i][10]) >= 5) and (len(inp.iloc[i][10]) <= 9):\n ZIP_CODE = inp.iloc[i][10][:5]\n zip_c = 1\n\n if (TRANSACTION_DT is not None) and (len(TRANSACTION_DT) == 8):\n date = 1\n\n if zip_c and date:\n if CMTE_ID not in by_zip_dict:\n by_zip_dict[CMTE_ID] = {ZIP_CODE:\n {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n }\n\n else:\n if ZIP_CODE not in by_zip_dict[CMTE_ID]:\n by_zip_dict[CMTE_ID][ZIP_CODE] = {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n\n else:\n by_zip_dict[CMTE_ID][ZIP_CODE]['transaction'].append(TRANSACTION_AMT)\n by_zip_dict[CMTE_ID][ZIP_CODE]['num_transaction'] += 1\n by_zip_dict[CMTE_ID][ZIP_CODE]['median_transaction'] = \\\n int(round(np.median(by_zip_dict[CMTE_ID][ZIP_CODE]['transaction'])))\n by_zip_dict[CMTE_ID][ZIP_CODE]['total_amt'] += TRANSACTION_AMT\n\n if CMTE_ID not in by_date_dict:\n by_date_dict[CMTE_ID] = {TRANSACTION_DT:\n {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n }\n\n else:\n if TRANSACTION_DT not in by_date_dict[CMTE_ID]:\n by_date_dict[CMTE_ID][TRANSACTION_DT] = {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n\n else:\n by_date_dict[CMTE_ID][TRANSACTION_DT]['transaction'].append(TRANSACTION_AMT)\n by_date_dict[CMTE_ID][TRANSACTION_DT]['num_transaction'] += 1\n by_date_dict[CMTE_ID][TRANSACTION_DT]['median_transaction'] = \\\n int(round(np.median(by_date_dict[CMTE_ID][TRANSACTION_DT]['transaction'])))\n by_date_dict[CMTE_ID][TRANSACTION_DT]['total_amt'] += TRANSACTION_AMT\n\n # Appending line to output dataframe medianvals_by_zip so that order remains intact\n new_entry = []\n new_entry.append(CMTE_ID)\n new_entry.append(ZIP_CODE)\n new_entry.append(by_zip_dict[CMTE_ID][ZIP_CODE]['median_transaction'])\n new_entry.append(by_zip_dict[CMTE_ID][ZIP_CODE]['num_transaction'])\n new_entry.append(int(by_zip_dict[CMTE_ID][ZIP_CODE]['total_amt']))\n zip_update_list.append(new_entry)\n\n elif zip_c and not date:\n if CMTE_ID not in by_zip_dict:\n by_zip_dict[CMTE_ID] = {ZIP_CODE:\n {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n }\n\n else:\n if ZIP_CODE not in by_zip_dict[CMTE_ID]:\n by_zip_dict[CMTE_ID][ZIP_CODE] = {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n\n else:\n by_zip_dict[CMTE_ID][ZIP_CODE]['transaction'].append(TRANSACTION_AMT)\n by_zip_dict[CMTE_ID][ZIP_CODE]['num_transaction'] += 1\n by_zip_dict[CMTE_ID][ZIP_CODE]['median_transaction'] = \\\n int(round(\n np.median(by_zip_dict[CMTE_ID][ZIP_CODE]['transaction'])))\n by_zip_dict[CMTE_ID][ZIP_CODE]['total_amt'] += TRANSACTION_AMT\n\n # Appending line to output dataframe medianvals_by_zip so that order remains intact\n new_entry = []\n new_entry.append(CMTE_ID)\n new_entry.append(ZIP_CODE)\n new_entry.append(by_zip_dict[CMTE_ID][ZIP_CODE]['median_transaction'])\n new_entry.append(by_zip_dict[CMTE_ID][ZIP_CODE]['num_transaction'])\n new_entry.append(int(by_zip_dict[CMTE_ID][ZIP_CODE]['total_amt']))\n zip_update_list.append(new_entry)\n\n elif not zip_c and date:\n if CMTE_ID not in by_date_dict:\n by_date_dict[CMTE_ID] = {TRANSACTION_DT:\n {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n }\n\n else:\n if TRANSACTION_DT not in by_date_dict[CMTE_ID]:\n by_date_dict[CMTE_ID][TRANSACTION_DT] = {'transaction': [TRANSACTION_AMT],\n 'num_transaction': 1,\n 'median_transaction': int(round(TRANSACTION_AMT)),\n 'total_amt': TRANSACTION_AMT\n }\n\n else:\n by_date_dict[CMTE_ID][TRANSACTION_DT]['transaction'].append(TRANSACTION_AMT)\n by_date_dict[CMTE_ID][TRANSACTION_DT]['num_transaction'] += 1\n by_date_dict[CMTE_ID][TRANSACTION_DT]['median_transaction'] = \\\n int(round(\n np.median(by_date_dict[CMTE_ID][TRANSACTION_DT]['transaction'])))\n by_date_dict[CMTE_ID][TRANSACTION_DT]['total_amt'] += TRANSACTION_AMT\n\n medianvals_by_zip = medianvals_by_zip.append(zip_update_list)\n return medianvals_by_zip\n\n\n# This method takes in the by_date_dict modified by the \"parse_input\" method, and makes and returns a dataframe from it\n# which contains the fields sorted by recipient ID and date.\ndef make_date_df(by_date_dict):\n date_update_list = []\n medianvals_by_date = pd.DataFrame()\n for id in by_date_dict:\n for date in by_date_dict[id]:\n temp = [id, date]\n temp.append(by_date_dict[id][date]['median_transaction'])\n temp.append(by_date_dict[id][date]['num_transaction'])\n temp.append(int(by_date_dict[id][date]['total_amt']))\n date_update_list.append(temp)\n medianvals_by_date = medianvals_by_date.append(date_update_list)\n medianvals_by_date.sort([0,1], ascending=True, inplace=True)\n return medianvals_by_date\n\n\n# This method takes in the two final dataframes for medianvals by zip and dict respectively and makes the required\n# output text files from them.\ndef make_text_files(by_zip_df, by_date_df):\n by_zip_df.to_csv('..\\output\\medianvals_by_zip.txt', header=None, index=None, sep='|', mode='w')\n by_date_df.to_csv('..\\output\\medianvals_by_date.txt', header=None, index=None, sep='|', mode='w')\n\n\n# This is the main method which will be called when this program is run. It will call all the required methods.\n# Note that input file's name will be given by the user as an argument to the program when we will run it, and it\n# MUST be present in the input directory.\ndef main():\n by_zip_dict = dict()\n by_date_dict = dict()\n base_input_dir = \"../input/\"\n user_entered_file = str(sys.argv[1])\n inp = load_file(base_input_dir + user_entered_file)\n medianvals_by_zip = parse_input(inp, by_zip_dict, by_date_dict)\n medianvals_by_date = make_date_df(by_date_dict)\n make_text_files(medianvals_by_zip, medianvals_by_date)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"insight_testsuite/temp/src/find_political_donors.py","file_name":"find_political_donors.py","file_ext":"py","file_size_in_byte":11329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"215125570","text":"import sys\n\nfrom xml.dom.ext.reader import PyExpat\nfrom xml.xpath import Evaluate\n\nsubtype = \"//conclusion/assigned/name\"\n\nsubtype_file = open(sys.argv[2],'w');\n\ndom = PyExpat.Reader().fromUri(sys.argv[1])\n\nelements = Evaluate(subtype, dom.documentElement)\n\nfor element in elements:\n \tsubtype_file.write(element.childNodes[0].data)\n\nsubtype_file.close()\n","sub_path":"regadb-wts-services/regadb-subtype/parse_subtype.py","file_name":"parse_subtype.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"589858338","text":"# Copyright 2013 New Dream Network, LLC (DreamHost)\n# Copyright 2015 Rackspace\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom neutron.agent import rpc as agent_rpc\nfrom neutron import context as ncontext\nfrom neutron.plugins.common import constants\nfrom neutron.services import provider_configuration as provconfig\nfrom neutron_lib import exceptions as n_exc\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\nimport oslo_messaging\nfrom oslo_service import loopingcall\nfrom oslo_service import periodic_task\nfrom oslo_utils import importutils\n\nfrom neutron_lbaas._i18n import _, _LE, _LI\nfrom neutron_lbaas.agent import agent_api\nfrom neutron_lbaas.drivers.common import agent_driver_base\nfrom neutron_lbaas.services.loadbalancer import constants as lb_const\nfrom neutron_lbaas.services.loadbalancer import data_models\n\nLOG = logging.getLogger(__name__)\n\nDEVICE_DRIVERS = 'device_drivers'\n\nOPTS = [\n cfg.MultiStrOpt(\n 'device_driver',\n default=['neutron_lbaas.drivers.haproxy.'\n 'namespace_driver.HaproxyNSDriver'],\n help=_('Drivers used to manage loadbalancing devices'),\n ),\n]\n\n\nclass DeviceNotFoundOnAgent(n_exc.NotFound):\n message = _('Unknown device with loadbalancer_id %(loadbalancer_id)s')\n\n\nclass LbaasAgentManager(periodic_task.PeriodicTasks):\n\n # history\n # 1.0 Initial version\n target = oslo_messaging.Target(version='1.0')\n\n def __init__(self, conf):\n super(LbaasAgentManager, self).__init__(conf)\n self.conf = conf\n self.context = ncontext.get_admin_context_without_session()\n self.serializer = agent_driver_base.DataModelSerializer()\n self.plugin_rpc = agent_api.LbaasAgentApi(\n lb_const.LOADBALANCER_PLUGINV2,\n self.context,\n self.conf.host\n )\n self._load_drivers()\n\n self.agent_state = {\n 'binary': 'neutron-lbaasv2-agent',\n 'host': conf.host,\n 'topic': lb_const.LOADBALANCER_AGENTV2,\n 'configurations': {'device_drivers': self.device_drivers.keys()},\n 'agent_type': lb_const.AGENT_TYPE_LOADBALANCERV2,\n 'start_flag': True}\n self.admin_state_up = True\n\n self._setup_state_rpc()\n self.needs_resync = False\n # pool_id->device_driver_name mapping used to store known instances\n self.instance_mapping = {}\n\n def _load_drivers(self):\n self.device_drivers = {}\n for driver in self.conf.device_driver:\n driver = provconfig.get_provider_driver_class(driver,\n DEVICE_DRIVERS)\n try:\n driver_inst = importutils.import_object(\n driver,\n self.conf,\n self.plugin_rpc\n )\n except ImportError:\n msg = _('Error importing loadbalancer device driver: %s')\n raise SystemExit(msg % driver)\n\n driver_name = driver_inst.get_name()\n if driver_name not in self.device_drivers:\n self.device_drivers[driver_name] = driver_inst\n else:\n msg = _('Multiple device drivers with the same name found: %s')\n raise SystemExit(msg % driver_name)\n\n def _setup_state_rpc(self):\n self.state_rpc = agent_rpc.PluginReportStateAPI(\n lb_const.LOADBALANCER_PLUGINV2)\n report_interval = self.conf.AGENT.report_interval\n if report_interval:\n heartbeat = loopingcall.FixedIntervalLoopingCall(\n self._report_state)\n heartbeat.start(interval=report_interval)\n\n def _report_state(self):\n try:\n instance_count = len(self.instance_mapping)\n self.agent_state['configurations']['instances'] = instance_count\n self.state_rpc.report_state(self.context, self.agent_state)\n self.agent_state.pop('start_flag', None)\n except Exception:\n LOG.exception(_LE(\"Failed reporting state!\"))\n\n def initialize_service_hook(self, started_by):\n self.sync_state()\n\n @periodic_task.periodic_task\n def periodic_resync(self, context):\n if self.needs_resync:\n self.needs_resync = False\n self.sync_state()\n\n @periodic_task.periodic_task(spacing=6)\n def collect_stats(self, context):\n for loadbalancer_id, driver_name in self.instance_mapping.items():\n driver = self.device_drivers[driver_name]\n try:\n stats = driver.loadbalancer.get_stats(loadbalancer_id)\n if stats:\n self.plugin_rpc.update_loadbalancer_stats(\n loadbalancer_id, stats)\n except Exception:\n LOG.exception(_LE('Error updating statistics on loadbalancer'\n ' %s'),\n loadbalancer_id)\n self.needs_resync = True\n\n def sync_state(self):\n known_instances = set(self.instance_mapping.keys())\n try:\n ready_instances = set(self.plugin_rpc.get_ready_devices())\n\n for deleted_id in known_instances - ready_instances:\n self._destroy_loadbalancer(deleted_id)\n\n for loadbalancer_id in ready_instances:\n self._reload_loadbalancer(loadbalancer_id)\n\n except Exception:\n LOG.exception(_LE('Unable to retrieve ready devices'))\n self.needs_resync = True\n\n self.remove_orphans()\n\n def _get_driver(self, loadbalancer_id):\n if loadbalancer_id not in self.instance_mapping:\n raise DeviceNotFoundOnAgent(loadbalancer_id=loadbalancer_id)\n\n driver_name = self.instance_mapping[loadbalancer_id]\n return self.device_drivers[driver_name]\n\n def _reload_loadbalancer(self, loadbalancer_id):\n try:\n loadbalancer_dict = self.plugin_rpc.get_loadbalancer(\n loadbalancer_id)\n loadbalancer = data_models.LoadBalancer.from_dict(\n loadbalancer_dict)\n driver_name = loadbalancer.provider.device_driver\n if driver_name not in self.device_drivers:\n LOG.error(_LE('No device driver on agent: %s.'), driver_name)\n self.plugin_rpc.update_status(\n 'loadbalancer', loadbalancer_id, constants.ERROR)\n return\n\n self.device_drivers[driver_name].deploy_instance(loadbalancer)\n self.instance_mapping[loadbalancer_id] = driver_name\n self.plugin_rpc.loadbalancer_deployed(loadbalancer_id)\n except Exception:\n LOG.exception(_LE('Unable to deploy instance for '\n 'loadbalancer: %s'),\n loadbalancer_id)\n self.needs_resync = True\n\n def _destroy_loadbalancer(self, lb_id):\n driver = self._get_driver(lb_id)\n try:\n driver.undeploy_instance(lb_id, delete_namespace=True)\n del self.instance_mapping[lb_id]\n self.plugin_rpc.loadbalancer_destroyed(lb_id)\n except Exception:\n LOG.exception(_LE('Unable to destroy device for loadbalancer: %s'),\n lb_id)\n self.needs_resync = True\n\n def remove_orphans(self):\n for driver_name in self.device_drivers:\n lb_ids = [lb_id for lb_id in self.instance_mapping\n if self.instance_mapping[lb_id] == driver_name]\n try:\n self.device_drivers[driver_name].remove_orphans(lb_ids)\n except NotImplementedError:\n pass # Not all drivers will support this\n\n def _handle_failed_driver_call(self, operation, obj, driver):\n obj_type = obj.__class__.__name__.lower()\n LOG.exception(_LE('%(operation)s %(obj)s %(id)s failed on device '\n 'driver %(driver)s'),\n {'operation': operation.capitalize(), 'obj': obj_type,\n 'id': obj.id, 'driver': driver})\n self._update_statuses(obj, error=True)\n\n def agent_updated(self, context, payload):\n \"\"\"Handle the agent_updated notification event.\"\"\"\n if payload['admin_state_up'] != self.admin_state_up:\n self.admin_state_up = payload['admin_state_up']\n if self.admin_state_up:\n self.needs_resync = True\n else:\n # Copy keys since the dictionary is modified in the loop body\n for loadbalancer_id in list(self.instance_mapping.keys()):\n LOG.info(_LI(\"Destroying loadbalancer %s due to agent \"\n \"disabling\"), loadbalancer_id)\n self._destroy_loadbalancer(loadbalancer_id)\n LOG.info(_LI(\"Agent_updated by server side %s!\"), payload)\n\n def _update_statuses(self, obj, error=False):\n lb_p_status = constants.ACTIVE\n lb_o_status = None\n obj_type = obj.__class__.__name__.lower()\n obj_p_status = constants.ACTIVE\n obj_o_status = lb_const.ONLINE\n if error:\n obj_p_status = constants.ERROR\n obj_o_status = lb_const.OFFLINE\n if isinstance(obj, data_models.HealthMonitor):\n obj_o_status = None\n if isinstance(obj, data_models.LoadBalancer):\n lb_o_status = lb_const.ONLINE\n if error:\n lb_p_status = constants.ERROR\n lb_o_status = lb_const.OFFLINE\n lb = obj\n else:\n lb = obj.root_loadbalancer\n self.plugin_rpc.update_status(obj_type, obj.id,\n provisioning_status=obj_p_status,\n operating_status=obj_o_status)\n self.plugin_rpc.update_status('loadbalancer', lb.id,\n provisioning_status=lb_p_status,\n operating_status=lb_o_status)\n\n def create_loadbalancer(self, context, loadbalancer, driver_name):\n loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer)\n if driver_name not in self.device_drivers:\n LOG.error(_LE('No device driver on agent: %s.'), driver_name)\n self.plugin_rpc.update_status('loadbalancer', loadbalancer.id,\n provisioning_status=constants.ERROR)\n return\n driver = self.device_drivers[driver_name]\n try:\n driver.loadbalancer.create(loadbalancer)\n except Exception:\n self._handle_failed_driver_call('create', loadbalancer,\n driver.get_name())\n else:\n self.instance_mapping[loadbalancer.id] = driver_name\n self._update_statuses(loadbalancer)\n\n def update_loadbalancer(self, context, old_loadbalancer, loadbalancer):\n loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer)\n old_loadbalancer = data_models.LoadBalancer.from_dict(old_loadbalancer)\n driver = self._get_driver(loadbalancer.id)\n try:\n driver.loadbalancer.update(old_loadbalancer, loadbalancer)\n except Exception:\n self._handle_failed_driver_call('update', loadbalancer,\n driver.get_name())\n else:\n self._update_statuses(loadbalancer)\n\n def delete_loadbalancer(self, context, loadbalancer):\n loadbalancer = data_models.LoadBalancer.from_dict(loadbalancer)\n driver = self._get_driver(loadbalancer.id)\n driver.loadbalancer.delete(loadbalancer)\n del self.instance_mapping[loadbalancer.id]\n\n def create_listener(self, context, listener):\n listener = data_models.Listener.from_dict(listener)\n driver = self._get_driver(listener.loadbalancer.id)\n try:\n driver.listener.create(listener)\n except Exception:\n self._handle_failed_driver_call('create', listener,\n driver.get_name())\n else:\n self._update_statuses(listener)\n\n def update_listener(self, context, old_listener, listener):\n listener = data_models.Listener.from_dict(listener)\n old_listener = data_models.Listener.from_dict(old_listener)\n driver = self._get_driver(listener.loadbalancer.id)\n try:\n driver.listener.update(old_listener, listener)\n except Exception:\n self._handle_failed_driver_call('update', listener,\n driver.get_name())\n else:\n self._update_statuses(listener)\n\n def delete_listener(self, context, listener):\n listener = data_models.Listener.from_dict(listener)\n driver = self._get_driver(listener.loadbalancer.id)\n driver.listener.delete(listener)\n\n def create_pool(self, context, pool):\n pool = data_models.Pool.from_dict(pool)\n driver = self._get_driver(pool.loadbalancer.id)\n try:\n driver.pool.create(pool)\n except Exception:\n self._handle_failed_driver_call('create', pool, driver.get_name())\n else:\n self._update_statuses(pool)\n\n def update_pool(self, context, old_pool, pool):\n pool = data_models.Pool.from_dict(pool)\n old_pool = data_models.Pool.from_dict(old_pool)\n driver = self._get_driver(pool.loadbalancer.id)\n try:\n driver.pool.update(old_pool, pool)\n except Exception:\n self._handle_failed_driver_call('create', pool, driver.get_name())\n else:\n self._update_statuses(pool)\n\n def delete_pool(self, context, pool):\n pool = data_models.Pool.from_dict(pool)\n driver = self._get_driver(pool.loadbalancer.id)\n driver.pool.delete(pool)\n\n def create_member(self, context, member):\n member = data_models.Member.from_dict(member)\n driver = self._get_driver(member.pool.loadbalancer.id)\n try:\n driver.member.create(member)\n except Exception:\n self._handle_failed_driver_call('create', member,\n driver.get_name())\n else:\n self._update_statuses(member)\n\n def update_member(self, context, old_member, member):\n member = data_models.Member.from_dict(member)\n old_member = data_models.Member.from_dict(old_member)\n driver = self._get_driver(member.pool.loadbalancer.id)\n try:\n driver.member.update(old_member, member)\n except Exception:\n self._handle_failed_driver_call('create', member,\n driver.get_name())\n else:\n self._update_statuses(member)\n\n def delete_member(self, context, member):\n member = data_models.Member.from_dict(member)\n driver = self._get_driver(member.pool.loadbalancer.id)\n driver.member.delete(member)\n\n def create_healthmonitor(self, context, healthmonitor):\n healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor)\n driver = self._get_driver(healthmonitor.pool.loadbalancer.id)\n try:\n driver.healthmonitor.create(healthmonitor)\n except Exception:\n self._handle_failed_driver_call('create', healthmonitor,\n driver.get_name())\n else:\n self._update_statuses(healthmonitor)\n\n def update_healthmonitor(self, context, old_healthmonitor,\n healthmonitor):\n healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor)\n old_healthmonitor = data_models.HealthMonitor.from_dict(\n old_healthmonitor)\n driver = self._get_driver(healthmonitor.pool.loadbalancer.id)\n try:\n driver.healthmonitor.update(old_healthmonitor, healthmonitor)\n except Exception:\n self._handle_failed_driver_call('create', healthmonitor,\n driver.get_name())\n else:\n self._update_statuses(healthmonitor)\n\n def delete_healthmonitor(self, context, healthmonitor):\n healthmonitor = data_models.HealthMonitor.from_dict(healthmonitor)\n driver = self._get_driver(healthmonitor.pool.loadbalancer.id)\n driver.healthmonitor.delete(healthmonitor)\n","sub_path":"neutron_lbaas/agent/agent_manager.py","file_name":"agent_manager.py","file_ext":"py","file_size_in_byte":16940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"534890918","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#Recursive way to define a basis spline\ndef recBasisSpline(i,k,t):\n if k < 2: #trivial case\n if t > i and t < (i+1):\n return 1\n else:\n return 0\n else: #Tree recursion as described by slide 260\n return ((t-i)/(i+k-1-i)) * recBasisSpline(i,k-1,t) + ((i+k-t)/(i+k-(i+1))) * recBasisSpline(i+1,k-1,t)\n\n#Create plot\nfor k in range(1,5): #order 1 to 4\n x = np.linspace(0,5) #time values from 0 to 5 given by numpy\n #Here, some magic happens\n #when we create a new function out of the recBasisSpline Function via lambda which only accepts one time value\n #while the rest of the arguments is set to our needs\n #then the function is applied to all time values in the list via map\n line, = plt.plot(x, list(map(lambda x: recBasisSpline(0,k,x),x)), '-', linewidth=1.5, label='order '+str(k)) #creation of line in the plot for bspline of order k\n \nplt.legend() #Add the legend\nplt.xlabel('time t') #Add the xlabel\nplt.ylabel('function value') #Add the ylabel\n#plt.show() #in case we directly want to see the plot\nplt.savefig('Task_5_1.png') #save the plot to .png\n","sub_path":"Assignment5/Task_5_1.py","file_name":"Task_5_1.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"506007701","text":"import numpy as np\nimport pandas as pd\nimport torch\nimport torch.utils.data\nimport torchvision.transforms as tfms\nimport imageio\nfrom texar.torch.hyperparams import HParams\nfrom texar.torch.data.data import DatasetBase\nfrom texar.torch.data.data import DataSource\n\n\nclass MIMICCXR_DataSource(DataSource):\n \"\"\"\n Dataset website here:\n https://physionet.org/content/mimic-cxr-jpg/2.0.0/\n \"\"\"\n\n def __init__(self, hparams):\n self._hparams = HParams(hparams, self.default_hparams())\n self.mode = self._hparams[\"mode\"]\n self.csvpath = self._hparams[\"processed_csv\"]\n self.csv = pd.read_csv(self.csvpath)\n self.transforms = self.build_transform(self._hparams['transforms'])\n\n def __len__(self):\n return len(self.csv)\n\n def __iter__(self):\n for index, row in self.csv.iterrows():\n yield row\n\n def __getitem__(self, index):\n index = int(index)\n def get_entries(index):\n df = self.csv.iloc[index]\n paths = [x for x in df[0].split(',')]\n label = df[1:].tolist()\n return paths, label\n\n if self.mode == \"PER_IMAGE\":\n img_paths, label = get_entries(index)\n image_tensor = self.get_image(img_paths[0], self.transforms)\n target_tensor = torch.FloatTensor(label)\n channels = 3\n else: # PER_STUDY\n img_paths, label = get_entries(index)\n image_tensor = self.get_study(img_paths, self.transforms)\n target_tensor = torch.FloatTensor(label)\n channels = len(img_paths)\n\n return image_tensor, target_tensor, channels\n\n @staticmethod\n def build_transform(tsfm_list):\n t = []\n for func, args in tsfm_list:\n t.append(getattr(tfms, func)(**args))\n return tfms.Compose(t)\n\n def get_study(self, img_paths, shuffle=False):\n if shuffle:\n img_paths = np.random.permutation(img_paths).tolist()\n ret = []\n for i, img_path in enumerate(img_paths):\n image = imageio.imread(img_path, as_gray=True)\n ret.append(self.transforms(image))\n return ret\n\n def get_image(self, img_path, transforms):\n if self._hparams[\"input_channel\"] == \"GRAY\":\n image = imageio.imread(img_path, as_gray=True)\n else:\n image = imageio.imread(img_path, as_gray=False, pilmode=\"RGB\")\n image_tensor = transforms(image)\n return image_tensor\n\n @staticmethod\n def default_hparams():\n r\"\"\"Returns a dictionary of default hyperparameters.\n See the specific subclasses for the details.\n \"\"\"\n hparams = DatasetBase.default_hparams()\n hparams.update({\n \"transforms\": None,\n \"processed_csv\": None,\n \"mode\": None,\n \"batch_size\": 1,\n \"input_channel\": \"RGB\"\n })\n return hparams\n\n\n\nif __name__ == \"__main__\":\n hparams = config.dataset\n dataset = MIMICCXR_DataSource(hparams)\n # Dataloader\n train_size = int(0.8 * len(dataset))\n valid_size = len(dataset) - train_size\n torch.manual_seed(0)\n train_dataset, valid_dataset = torch.utils.data.random_split(dataset, [train_size, valid_size])\n\n # disable data aug\n valid_dataset.data_aug = None\n\n train_dataset.csv = dataset.csv.iloc[train_dataset.indices]\n valid_dataset.csv = dataset.csv.iloc[valid_dataset.indices]\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=10,\n shuffle=True,\n num_workers=0,\n pin_memory=True,\n drop_last=True)\n\n for batch in train_loader:\n print(batch)\n","sub_path":"mimic_cxr.py","file_name":"mimic_cxr.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"326024102","text":"#!/usr/bin/env python\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.kernel_ridge import KernelRidge\nfrom numpy.lib.recfunctions import append_fields\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom numpy import *\nfrom sklearn.metrics import r2_score\nfrom astropy.table import Table\nfrom sklearn.utils import shuffle\nimport time\nimport argparse\n\nfrom sklearn.externals import joblib\n\ndef import_data(true_path, tar_path):\n \n BGS_true = Table.read(true_path)\n BGS_tar = Table.read(tar_path)\n \n alpha = array(BGS_true['TRUEZ']/BGS_tar['Z'])\n alpha_sign = alpha>0\n \n fluxes = array(BGS_tar['TARGETID','FLUX_G', 'FLUX_R', 'FLUX_Z', \\\n 'FLUX_W1', 'FLUX_W2', 'MW_TRANSMISSION_G', \\\n 'MW_TRANSMISSION_R', 'MW_TRANSMISSION_Z', \\\n 'MW_TRANSMISSION_W1', 'MW_TRANSMISSION_W2','Z'])\n \n fluxes = append_fields(fluxes, 'TRUEZ', array(BGS_true['TRUEZ']), usemask=False)\n fluxes = append_fields(fluxes, 'alpha', alpha, usemask=False)\n \n #Se eliminan los alphas negativos. \n fluxes = fluxes[alpha_sign]\n return fluxes\n\ndef transform_to_array(rec_array):\n \n dim_1 = len(rec_array)\n dim_2 = len(rec_array[0])\n temp = zeros((dim_1, dim_2))\n\n for i in range(dim_1):\n for j in range(dim_2):\n temp[i, j] = rec_array[i][j]\n \n dict_of_names = {name:i for i, name in enumerate(rec_array.dtype.names)} #index of the names in the new array\n return temp, dict_of_names\n\ndef omit_columns_of_array(columns_to_omit, x_array):\n \n dim_1 = x_array.shape[0] #rows\n dim_2 = x_array.shape[1] #columns\n \n n = arange(dim_2)\n \n for i in columns_to_omit:\n msk = n != i\n n = n[msk]\n return x_array[:, n]\n\n\ndef train_test_data(test_size, fluxes_array, n_datos, r_state=0):\n \n fluxes_array = shuffle(fluxes_array, random_state=r_state)\n \n X_entrada, dict_of_names = transform_to_array(fluxes_array[0:n_datos][['FLUX_G', 'FLUX_R', 'FLUX_Z', 'FLUX_W1', 'FLUX_W2', 'Z']])\n y_entrada = fluxes_array[0:n_datos]['TRUEZ']\n\n z_col_idx = dict_of_names['Z']\n\n X_train, X_test, y_train, y_test = train_test_split(X_entrada, y_entrada , test_size = test_size, random_state=r_state)\n\n Z_tar_train = reshape(X_train[:, [z_col_idx]], (len(X_train)))\n Z_tar_test = reshape(X_test[:, [z_col_idx]], (len(X_test)))\n \n return X_train, X_test, y_train, y_test, Z_tar_train, Z_tar_test\n\ndef Standard_scale_data(X_train, X_test):\n sc_X = StandardScaler()\n X_train = sc_X.fit_transform(X_train)\n X_test = sc_X.transform(X_test)\n\n return X_train, X_test\n\n\ndef main():\n '''\n '''\n \n parser = argparse.ArgumentParser(description='n_jobs to fit the model')\n parser.add_argument('--n_jobs', help=\"number of procesors to fit the model\", type= int, default=1)\n arguments = parser.parse_args()\n n_jobs = arguments.n_jobs\n \n total_time = time.time()\n print()\n print('Importing BGS data, generating the fluxes array')\n print()\n true_data_path = '/hpcfs/home/sd.lobo251/Redshift_ML/BGS_files/BGS_true_file.fits'\n tar_data_path = '/hpcfs/home/sd.lobo251/Redshift_ML/BGS_files/BGS_tar_file.fits'\n fluxes = import_data(true_data_path, tar_data_path)\n fluxes = fluxes[0:600000]\n flux_names = ['FLUX_G', 'FLUX_R', 'FLUX_Z', 'FLUX_W1', 'FLUX_W2']\n\n #changing flux por log(flux)\n print()\n print('computing Log Flux, Log Alpha')\n print()\n for name in flux_names:\n fluxes[name] = log10(fluxes[name])\n fluxes['alpha'] = log10(fluxes['alpha'])\n\n n_data_points = [50000]\n fit_time_per_n = [[] for i in range(len(n_data_points))]\n predict_time_per_n = [[] for i in range(len(n_data_points))]\n n_repetitions = 4\n for i in range(n_repetitions):\n for j, n_data in enumerate(n_data_points):\n\n print('Generating train-test set for {} data points \\n'.format(n_data))\n X_train, X_test, y_train, y_test, Z_tar_train, Z_tar_test = train_test_data(test_size=0.2, fluxes_array=fluxes, n_datos=n_data, r_state= i)\n\n print('Scaling data \\n')\n\n X_train, X_test = Standard_scale_data(X_train, X_test)\n\n score = 'r2'#, 'neg_mean_squared_error']\n best_parameters = {'r2':0, 'neg_mean_squared_error':0, 'explained_variance':0}\n print(\"# Tuning hyper-parameters for %s\" % score)\n print()\n clf = GridSearchCV(KernelRidge(kernel='rbf', gamma=0.1), cv=3, n_jobs=n_jobs,\n param_grid={\"alpha\": [0.001, 0.0001],\n \"gamma\": [0.1, 1], 'kernel':['rbf']}, refit=True)\n\n print('Fitting model with n_jobs = {}'.format(n_jobs))\n print()\n t0 = time.time()\n clf.fit(X_train, y_train)\n fit_time = time.time() - t0\n fit_time_per_n[j].append(fit_time)\n print()\n print(\"Best parameters set found on development set:\")\n print()\n print(clf.best_params_)\n print()\n best_parameters[score] = clf.best_params_\n print(\"Grid scores on development set:\")\n print()\n means = clf.cv_results_['mean_test_score']\n stds = clf.cv_results_['std_test_score']\n for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n print(\"%0.3f (+/-%0.03f) for %r\"\n % (mean, std * 2, params))\n print()\n print(\"%s score computed on the full evaluation set:\" % score)\n print()\n\n t0 = time.time()\n y_true, y_pred = y_test, clf.predict(X_test)\n predict_time = time.time() - t0\n predict_time_per_n[j].append(predict_time)\n model_name = 'krr_model_{}'.format(i)\n best_model = clf.best_estimator_\n print(r2_score(y_true, y_pred))\n print()\n print('Saving Model to {}'.format(model_name))\n joblib.dump(best_model, '{}.pkl'.format(model_name), compress=1)\n y_train_pred = best_model.predict(X_train)\n savetxt('{}.txt'.format(model_name), (y_train_pred, y_train, Z_tar_train))\n savetxt('{}.txt'.format(model_name), (y_pred, y_test, Z_tar_test))\n print('-----Done for n = {}-----'.format(n_data))\n \n #best_model = clf.best_estimator_\n #joblib.dump(best_model, 'best_krr.pkl', compress=1)\n total_run_time = time.time() - total_time\n #y_pred = best_model.predict(X_test)\n #y_train_pred = best_model.predict(X_train)\n #savetxt('krr_train.out', (y_train_pred, y_train, Z_tar_train))\n #savetxt('krr_test.out', (y_pred, y_test, Z_tar_test))\n set_printoptions(precision=4)\n print()\n print('-----------------------------------Results--------------------------------------------')\n print('Time results after {} repetitions, n_jobs = {}'.format(n_repetitions, n_jobs))\n print('n_datos: ', n_data_points)\n print()\n print('fit_time mean: ', array(fit_time_per_n).mean(axis=1))\n print('fit_time error: ', array(fit_time_per_n).std(axis=1)/sqrt(n_repetitions), '\\n')\n\n print('predict_time mean: ', array(predict_time_per_n).mean(axis=1))\n print('predict_time error: ', array(predict_time_per_n).std(axis=1)/sqrt(n_repetitions), '\\n')\n print('Total run time: {:.2f} s'.format(total_run_time))\n print('------------------------------------ Done --------------------------------------------')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Python/results_modelo/krr_model-50000.480138.clustermaster.uniandes.edu.co/krr_grid_search_modelo.py","file_name":"krr_grid_search_modelo.py","file_ext":"py","file_size_in_byte":7616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"27716115","text":"\"\"\"\r\nRANDOMIZED-SELECT(A, p, r, i)\r\n\tif p==r\r\n\t return A[p]\r\n\tq = RANDOMIZED-PARTITION(A, p, r)\r\n\tk = q-p+1\r\n\tif i == k\r\n\t return A[q]\r\n\telseif i k,则第i小的元素在子数组A[q+1..r]中,\r\n且是该数组第i-k小元素。\r\n\"\"\"\r\ndef RandomizedSelsct(a, p, r, i):\r\n if p == r:\r\n return a[p]\r\n #q 表示划分的的位置\r\n q = RandomizedPartition(a, p, r)\r\n #k 子数组元素个数\r\n k = q - p +1\r\n #i 表示第i小\r\n if i == k:\r\n return a[q]\r\n elif i < k:\r\n return RandomizedSelsct(a, p, q - 1, i)\r\n else:\r\n return RandomizedSelsct(a, q + 1, r, i - k)\r\n\r\ndef main():\r\n #输入一组数\r\n array = np.array(input().split(),dtype = np.int)\r\n i = int(input())\r\n r = len(array) - 1\r\n p = 0\r\n print(RandomizedSelsct(array, p, r,i))\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"算法分析/随机策略线性时间内寻找元素(递归与分治).py","file_name":"随机策略线性时间内寻找元素(递归与分治).py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"627462278","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019-12-24 22:03\n@Author : Wang Xin\n@Email : wangxin_buaa@163.com\n@File : config.py\n\"\"\"\n\nfrom ruamel import yaml\nimport collections\n\n\ndef load_config(path):\n with open(path) as f:\n config = yaml.load(f, Loader=yaml.RoundTripLoader)\n return config\n\n### https://stackoverflow.com/questions/7204805/how-to-merge-dictionaries-of-dictionaries\n### In case we have more than one config files, use this function to deep merge two nested dictionaries. \ndef merge_config(a, b, path=None):\n \"merges b into a\"\n if path is None: path = []\n for key in b:\n if key in a:\n if isinstance(a[key], dict) and isinstance(b[key], dict):\n merge_config(a[key], b[key], path + [str(key)])\n elif a[key] == b[key]:\n pass # same leaf value\n else:\n raise Exception('Conflict at %s' % '.'.join(path + [str(key)]))\n else:\n a[key] = b[key]\n return a\n\ndef save_config(path, config):\n with open(path, 'w') as nf:\n yaml.dump(config, nf, Dumper=yaml.RoundTripDumper)\n\n\ndef print_config(config, step=''):\n for k, v in config.items():\n if isinstance(v, collections.OrderedDict):\n new_step = step + ' '\n print(step + k + ':')\n print_config(v, new_step)\n else:\n print(step + k + ':', v)\n\nclass Config:\n\n def __init__(self, defualt_path='./config/default.yaml'):\n with open(defualt_path) as f:\n self.config = yaml.load(f, Loader=yaml.RoundTripLoader)\n\n def load(self, path):\n with open(path) as f:\n self.config = yaml.load(f, Loader=yaml.RoundTripLoader)\n\n def save(self, path):\n with open(path, 'w') as nf:\n yaml.dump(self.config, nf, Dumper=yaml.RoundTripDumper)\n\n def get(self):\n return self.config\n\n def set(self, config):\n self.config = config\n","sub_path":"dp/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"86195407","text":"# -*- coding: utf-8 -*-\n\ndef closest_power(base, num):\n '''\n base: base of the exponential, integer > 1\n Find the integer exponent such that base**exponent\nis closest to num.\n Note that the base**exponent may be either greater or\nsmaller than num.\n In case of a tie, return the smaller value.\n Returns the exponent.\n '''\n # using bisection search to guess\n # add the exponent and the corresponding absolute value of difference to a dictionary\n low = 0\n high = int(num)\n exponent = (low + high) // 2\n diff = abs(base**exponent - num)\n power_dict = {exponent: diff}\n\n # find all the exponent and its difference\n while high - low > 1:\n if base**exponent > num:\n high = exponent\n else:\n low = exponent\n exponent = (low + high) // 2\n diff = abs(base**exponent - num)\n power_dict[exponent] = diff\n\n # find the minimum difference\n min_diff = min(power_dict.values())\n closest_power = []\n for key in power_dict.keys():\n if power_dict[key] == min_diff:\n closest_power.append(key)\n\n return min(closest_power)\n\nprint(closest_power(11, 66.0))\nprint(closest_power(2, 384.0))\n# print(closest_power(155, 135532121)) # this method fails\n","sub_path":"mid_ex/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"397622325","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 19 15:26:42 2019\n\n@author: wuzhiqiang\n\"\"\"\nimport sys,os\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))#存放c.py所在的绝对路径\nlib_dir = \"libs\"\nlevel = 2\nwhile level > 0:\n BASE_DIR = os.path.dirname(BASE_DIR)\n level -= 1\nlib_dir = os.path.join(BASE_DIR, lib_dir)\nif lib_dir not in sys.path:\n sys.path.append(lib_dir)","sub_path":"lib_path.py","file_name":"lib_path.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"299339909","text":"#!/usr/bin/env python\nfrom reddit import RedditWrapper\nimport random\nimport logging\nfrom enum import Enum\n\n# Game components\nclass Game(object):\n \"\"\"docstring for Game\"\"\"\n def __init__(self, api):\n super(Game, self).__init__()\n self.api = api\n self.players = self.init_players()\n self.timer = self.init_timer()\n self.current_round = Round()\n\n def init_players(self):\n player1 = Player(\"HUMAN\", \"bread\", \"man\")\n player2 = Player(\"AI1\", \"goat\", \"brain\")\n player3 = Player(\"AI2\", \"coffee\", \"cat\")\n return [player1, player2, player3]\n\n def init_timer(self):\n pass\n\n def trigger_words(self):\n return [player.trigger_word for player in self.players]\n\n def start(self):\n chosen_subreddit = random.choice(self.api.default_subreddits)\n seed = self.api.subreddit(backing_object=chosen_subreddit)\n choices = seed.top_posts\n \n for player in self.players:\n turn = Turn(self.api, choices, player)\n turn.start()\n choices = turn.new_choices\n\nclass Round(object):\n \"\"\"docstring for Round\"\"\"\n def __init__(self):\n super(Round, self).__init__()\n self.current_course = DinnerCourse.DRINKS\n\nclass DinnerCourse(Enum):\n DRINKS = 1\n APPETIZERS = 2\n MAIN = 3\n DESSERT = 4 \n\nclass Turn(object):\n \"\"\"docstring for Turn\"\"\"\n def __init__(self, api, choices, player):\n super(Turn, self).__init__()\n self.api = api\n self.choices = choices\n self.player = player\n self.seed_post = None\n self.new_choices = None\n\n def start(self):\n print (\"====PLAYER {}'s TURN====\".format(self.player.name))\n print(\"Choices:\")\n self.api.print_submission_list(self.choices)\n self.seed_post = random.choice(self.choices)\n\n print(u\"Chosen post:\\n\\t\\\"{}\\\"\".format(self.seed_post.title))\n linking_word = self.choose_linking_word()\n search_results = self.perform_search(seed_word=linking_word, trigger_word=self.player.trigger_word)\n self.new_choices = search_results\n\n def choose_linking_word(self):\n nouns = self.seed_post.nouns\n print (\"Nouns:\\n\\t{}\\n\".format(nouns))\n chosen_noun = random.choice(nouns)\n print (\"Chosen linking word:\\n\\t{}\\n\".format(chosen_noun))\n return chosen_noun\n\n def perform_search(self, seed_word, trigger_word, subreddit=None):\n if subreddit == None:\n subreddit = self.api.subreddit('all')\n return list(subreddit.search([seed_word, trigger_word]))\n\nclass Room(object):\n \"\"\"docstring for Room\"\"\"\n def __init__(self):\n super(Room, self).__init__()\n self.anger_meter = self.init_meter()\n def init_meter(self):\n pass\n \n\nclass Player(object):\n \"\"\"docstring for Player\"\"\"\n def __init__(self, name, trigger_word, olive_word):\n super(Player, self).__init__()\n self.name = name\n self.trigger_word = trigger_word\n self.olive_word = olive_word\n self.anger_meter = self.init_meter()\n\n def init_meter(self):\n pass\n\ndef main():\n api = RedditWrapper()\n game = Game(api)\n game.start()\n\nif __name__ == '__main__':\n main()\n\n ","sub_path":"src/Server/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454864450","text":"from django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nimport datetime\n\n@csrf_exempt\ndef index(request):\n\n if request.method == 'POST':\n return postrequest()\n else:\n return postrequest()\n\n\ndef postrequest():\n response = {\n 'name': 'Vitor',\n 'location': 'Finland',\n 'is_active': True,\n 'count': 28\n }\n return JsonResponse(response)\n\n\ndef httprequest():\n now = datetime.datetime.now()\n response = \"It is now %s.\" % now\n return HttpResponse(response)\n\n","sub_path":"app/firsttab/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"395121280","text":"from urllib.request import Request, urlopen\nimport json\nimport fn\nimport tdx\nimport sys\nimport re\nimport sqllite_zxg as stock_zxg\nimport sqllite_gn as stock_gn\nimport gn\n\n# 实例化 通达信 行业类\ntdx_hy = tdx.hy()\n\n\nclass Zxg():\n gn_Sbider = ''\n\n def __init__(self, param): # 构造函数带参数\n\n self.gn_Sbider = gn.Spider()\n\n #生成概念\n self.gn_Sbider.get_code_from_gn()\n\n if param != 'n':\n self.add(param)\n\n def list(self):\n print (1)\n\n def add(self, stock_code):\n data = {}\n data['code'] = stock_code\n res = stock_zxg.save(data)\n\n if res == 1:\n\n self.gn_Sbider.gn_op(stock_code)\n\n def go(self):\n\n lists = stock_zxg.list()\n # fn.pt(lists)\n\n # 获取in的数据id\n code_rows = []\n for row in lists:\n code_rows.append(\"'\" + row['code'] + \"'\")\n code_ids = ','.join(code_rows) # '000537','002089'\n\n # 获取概念列表\n gn_rows = stock_gn.find_all(code_ids)\n stock_key_value = {}\n for row in gn_rows:\n code = row.get('code')\n stock_key_value[code] = row['info']\n\n # 输出列表\n for row in lists:\n code = row['code']\n info = stock_key_value.get(code)\n hy = fn.fill(tdx_hy.get_hy(code), 8)\n stock_name = fn.fill(tdx_hy.get_hy(code,1), 8)\n lt = fn.fill(tdx_hy.get_hy(code,2), 8)\n print (\"%s %s %s %s %s\" % (code,stock_name, hy,lt,info))\n\n# 举例\n# zxg 000537 //添加自选股\n# zxg //查看\n\nzxg = Zxg(sys.argv[1])\n# zxg = Zxg('n')\n\n# zxg = Zxg('000537')\nzxg.go()\n","sub_path":"linux/shell/stock/zxg.py","file_name":"zxg.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"632300164","text":"import httplib2\nimport json\nimport random\nimport string\n\n# Importing the database classes\nfrom db import Base, Category, Items, User\n\n# Importing necessary sqlalchemy modules\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import and_\n\n# Importing Google Sign IN OAuth Libs\nfrom oauth2client.client import flow_from_clientsecrets, FlowExchangeError\n\n# Importing neccessary flask modules\nfrom flask import Flask, render_template, url_for, request, jsonify, flash, redirect\nfrom flask import session as login_session\nfrom flask import make_response\n\n# Creating instance of Flask\napp = Flask(__name__, template_folder='templates')\n\n# Connect to the database\nengine = create_engine('sqlite:///catalog.db')\n\n# Bind engine to Base\nBase.metadata.bind = engine\n\n# Creating instance of sessionmaker\nDBSession = sessionmaker(bind=engine)\n\n# Instatiate the DBSession Class\nsession = DBSession()\n\n# Fetching Client_id from client_secrets.json\nCLIENT_ID = json.loads(\n open('Client_Secrets/client_secrets.json', 'r').read())['web']['client_id']\n\n\n# Route for showing all categories\n@app.route('/')\n@app.route('/categories')\ndef showCategories():\n categories = session.query(Category).all()\n if 'username' in login_session:\n return render_template('categories.html', categories=categories)\n else:\n return render_template('public_categories.html', categories=categories)\n\n\n# Route to show specific category items\n@app.route('/categories//items')\ndef showItems(category_id):\n category = session.query(Category).filter_by(id=category_id).one()\n items = session.query(Items).filter_by(category_id=category_id).all()\n if 'username' in login_session:\n return render_template(\n 'items.html',\n items=items,\n category_id=category_id)\n else:\n return render_template(\n 'public_items.html',\n items=items,\n category_id=category_id, category_name=category.name)\n\n\n# Route to create a new item for a category_id\n@app.route('/categories//items/new', methods=['GET', 'POST'])\ndef newItem(category_id):\n if 'username' in login_session:\n if request.method == 'GET':\n category = session.query(Category).filter_by(id=category_id).one()\n return render_template(\n 'newItem.html',\n category_id=category_id,\n category_name=category.name)\n elif request.method == 'POST':\n name = request.form['name']\n price = request.form['price']\n desc = request.form['desc']\n item = Items(\n name=name,\n price=price,\n desc=desc,\n category_id=category_id,\n user_id=login_session['user_id'])\n session.add(item)\n session.commit()\n flash('A New Item Added')\n return redirect(url_for('showItems', category_id=item.category_id))\n else:\n response = make_response(json.dumps('You are not Authorized'), 403)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# Route to edit a specific item of a category_id\n@app.route(\n '/categories//items//edit',\n methods=[\n 'GET',\n 'POST'])\ndef editItem(category_id, item_id):\n if 'username' in login_session:\n item = session.query(Items).filter(\n Items.id == item_id,\n Items.category_id == category_id,\n Items.user_id == login_session['user_id']).one()\n\n # If no item matches current user_id returns error\n if not item:\n return jsonify({\"error\": \"You can delete your items only\"})\n\n if request.method == 'GET':\n return render_template('edit_item.html', item=item)\n elif request.method == 'POST':\n item.name = request.form['name']\n item.price = request.form['price']\n item.desc = request.form['desc']\n session.add(item)\n session.commit()\n flash('Item Edit Successfully')\n return redirect(url_for('showItems', category_id=item.category_id))\n else:\n response = make_response(json.dumps('You are not Authorized'), 403)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# Route to delete a specific item of a category_id\n@app.route(\n '/categories//items//delete',\n methods=[\n 'GET',\n 'POST'])\ndef delItem(category_id, item_id):\n if 'username' in login_session:\n item = session.query(Items).filter(\n Items.id == item_id,\n Items.category_id == category_id,\n Items.user_id == login_session['user_id']).one()\n\n # If no item matches current user_id returns error\n if not item:\n return jsonify({\"error\": \"You can delete your items only\"})\n\n if request.method == 'GET':\n return render_template('delete_item.html', item=item)\n elif request.method == 'POST':\n session.delete(item)\n session.commit()\n flash('Item Deleted Successfully')\n return redirect(url_for('showItems', category_id=item.category_id))\n else:\n response = make_response(json.dumps('You are not Authorized'), 403)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n\n# API Endpoint for All Categories\n@app.route('/categories/json')\ndef categoriesEndpoint():\n categories = session.query(Category).all()\n if categories:\n return jsonify(category=[i.serialize for i in categories])\n else:\n return jsonify({\"error\": \"%s Not Found\" % '404'})\n\n\n# API Endpoint for Items of a Specific category\n@app.route('/categories//json')\ndef category_Id_Endpoint(category_id):\n items = session.query(Items).filter_by(category_id=category_id).all()\n if items:\n return jsonify(category=[i.serialize for i in items])\n else:\n return jsonify({\"error\": \"%s Not Found\" % '404'})\n\n\n# API Endpoint for a specific item\n@app.route('/items//json')\ndef category_item_Endpoint(item_id):\n item = session.query(Items).filter_by(id=item_id).one()\n if not item:\n return jsonify({\"error\": \"%s Not Found\" % '404'})\n else:\n return jsonify(item=item.serialize)\n\n\n# Create an anti-forgery state token\n@app.route('/login')\ndef showLogin():\n state = ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in xrange(32))\n login_session['state'] = state\n return render_template('login.html', STATE=state)\n\n\n# Route for Google Sign in Authentication\n@app.route('/gconnect', methods=['POST'])\ndef gconnect():\n # Validate state token\n if request.args.get('state') != login_session['state']:\n response = make_response(json.dumps('Invalid state parameter.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Obtain authorization code\n code = request.data\n\n try:\n # Upgrade the authorization code into a credentials object\n oauth_flow = flow_from_clientsecrets(\n 'Client_Secrets/client_secrets.json', scope='')\n oauth_flow.redirect_uri = 'postmessage'\n credentials = oauth_flow.step2_exchange(code)\n except FlowExchangeError:\n response = make_response(\n json.dumps('Failed to upgrade the authorization code.'), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Check that the access token is valid.\n access_token = credentials.access_token\n url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=%s'\n % access_token)\n h = httplib2.Http()\n result = json.loads(h.request(url, 'GET')[1])\n # If there was an error in the access token info, abort.\n if result.get('error') is not None:\n response = make_response(json.dumps(result.get('error')), 500)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is used for the intended user.\n gplus_id = credentials.id_token['sub']\n if result['user_id'] != gplus_id:\n response = make_response(\n json.dumps(\"Token's user ID doesn't match given user ID.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Verify that the access token is valid for this app.\n if result['issued_to'] != CLIENT_ID:\n response = make_response(\n json.dumps(\"Token's client ID does not match app's.\"), 401)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n stored_access_token = login_session.get('access_token')\n stored_gplus_id = login_session.get('gplus_id')\n\n # Verify that user is already logged in or not\n if stored_access_token is not None and gplus_id == stored_gplus_id:\n response = make_response(\n json.dumps('Current user is already connected.'), 200)\n response.headers['Content-Type'] = 'application/json'\n return response\n\n # Store the access token in the session for later use.\n login_session['access_token'] = credentials.access_token\n login_session['gplus_id'] = gplus_id\n\n # Get user info\n url1 = (\n 'https://www.googleapis.com/oauth2/v1/userinfo?access_token=%s&alt=%s' %\n (access_token, 'json'))\n data = json.loads(h.request(url1, 'GET')[1])\n\n login_session['username'] = data['name']\n login_session['picture'] = data['picture']\n login_session['email'] = data['email']\n\n user_id = getUserID(login_session['email'])\n\n # If user not exists create one\n if not user_id:\n user_id = createUser(login_session)\n\n login_session['user_id'] = user_id\n\n output = ''\n output += ' 1]\n return tokens\n\n# add doc to vocab after loading\ndef add_doc_to_vocab(filename, vocab):\n # load doc\n doc = load_doc(filename)\n # clean doc\n for sentence in doc:\n tmp = clean_doc(sentence)\n vocab.append(tmp)\n print(vocab)\n# define vocab\nvocab = []\n# add doc to vocab\nfilename = '/Users/anishmeka/Desktop/CS196/Team_21/Project6500.csv'\nadd_doc_to_vocab(filename, vocab)\nprint(len(vocab))\n","sub_path":"datapreprocessing.py","file_name":"datapreprocessing.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214044460","text":"import numpy as np\nimport pandas as pd\nimport ast\nimport statistics as st\n\nprotocolosLargosFisico = [\"02\",\"03\",\"06\",\"07\",\"10\",\"11\",\"14\",\"15\",\"18\",\"19\",\"22\",\"23\",\"26\",\"27\",\"30\",\"31\"]\nprotocolosLargosDigital = [\"01\",\"04\",\"05\",\"08\",\"09\",\"12\",\"13\",\"16\",\"17\",\"20\",\"21\",\"24\",\"25\",\"28\",\"29\"]\n\nprotocolosCortosDigital = protocolosLargosFisico\nprotocolosCortosFisico = protocolosLargosDigital\n\n#Diccionarios para almacenar tiempos\ntiemposLargoFisico = {}\ntiemposLargoDigital = {}\ntiemposCortoFisico = {}\ntiemposCortoDigital = {}\n\n#Muestra las secuencias correctas y los tiempos para la version fisica\ndef mostrarCorrectasFisico(csvFisico):\n\t#Me quedo con todo los casos en que contestaron correctamente\n\tsecusCorrectasFisico = csvFisico[csvFisico['Tipo Evento'] == \"True\"]\n\n\tprint(secusCorrectasFisico.loc[:, ['Valor Evento', 'Tiempo Desde Inicio']])\n\n\n\n#Muestra las secuencias correctas y los tiempos para la version digital\ndef mostrarCorrectasDigital(csvDigital):\n\t#Me quedo con todo los casos en que contestaron correctamente\n\tsecusCorrectasDigital = csvDigital[csvDigital['col1'] == \"CORRECT\"]\n\n\t#Lista de filas con respuestas correctas\n\tindicesSecusCorrectas = list(csvDigital[csvDigital['col1'] == \"CORRECT\"].index)\n\n\tfor i in indicesSecusCorrectas:\n\t\tprint(csvDigital.iloc[i,9],csvDigital.iloc[i-1,9])\n\n\ndef recorroCSVsFisico():\n\t#Recorro todos los archivos del Fisico\n\tfor nroUsr in protocolosLargosFisico:\n\t\t#Conformo cada nombre segun el array\t\n\t\tnombreCSV = path+\"/CSVsFisico/Usr\"+nroUsr+\"-PL-Fisico.csv\"\n\n\t\t#Levanto el csv, definiendo como header la linea 29\n\t\tcsvFisico = pd.read_csv(nombreCSV, header = 29) \n\n\t\t#Llamo a la funcion que procesa el fisico\n\t\tmostrarCorrectasFisico(csvFisico)\n\n\ndef recorroCSVsDigital():\n\t#Recorro todos los archivos del Digital\n\tfor nroUsr in protocolosLargosDigital:\n\t\t#Conformo cada nombre segun el array\n\t\tnombreCSV = path+\"/CSVsDigital/Usr\"+nroUsr+\"-PL-Digital.csv\"\n\t\tprint(nombreCSV)\n\n\t\tcsvDigital = pd.read_csv(nombreCSV, delimiter = \";\")\n\n\t\t#Llamo a la funcion que procesa el Digital\n\t\tmostrarCorrectasDigital(csvDigital)\n\n\ndef registrarTiemposCorrectosFisico(listaCSVs):\n\n\tfor nombreCSV in listaCSVs:\n\n\t\t#Levanto el csv, definiendo como header la linea 29\n\t\tcsvFisico = pd.read_csv(nombreCSV, header = 29)\n\n\t\t#Me quedo con todo los casos en que contestaron correctamente\n\t\tsecusCorrectasFisico = csvFisico[csvFisico['Tipo Evento'] == \"True\"]\n\n\t\t#Lista de filas con respuestas correctas\n\t\tindicesSecusCorrectas = list(csvFisico[csvFisico['Tipo Evento'] == \"True\"].index)\n\n\t\tfor i in indicesSecusCorrectas:\n\t\t\t#Convierto el ['A','B','C'] en ABC\n\t\t\tstringSecu = \"\".join(ast.literal_eval(csvFisico.iloc[i,6]))\n\t\t\ttiempoTotal = float(csvFisico.iloc[i,8])\n\n\t\t\t#Asigno el tiempo al diccionario de tiempos, o defino vacío si aún no está\n\t\t\ttiemposLargoFisico.setdefault(stringSecu,[]).append(tiempoTotal)\n\n\treturn tiemposLargoFisico\n\n\ndef registrarTiemposCorrectosDigital(listaCSVs):\n\n\tfor nombreCSV in listaCSVs:\n\n\t\tcsvDigital = pd.read_csv(nombreCSV, delimiter = \";\")\n\n\t\t#Lista de filas con respuestas correctas\n\t\tindicesSecusCorrectas = list(csvDigital[csvDigital['col1'] == \"CORRECT\"].index)\n\n\t\tfor i in indicesSecusCorrectas:\n\n\t\t\t#Convierto el ['A','B','C'] en ABC\n\t\t\tstringSecu = csvDigital.iloc[i,9]\n\t\t\t#Me quedo con el tiempo de cada correcto\n\t\t\ttiempoTotal = float(csvDigital.iloc[i-1,9])\n\n\t\t\t#Asigno el tiempo al diccionario de tiempos, o defino vacío si aún no está\n\t\t\ttiemposLargoDigital.setdefault(stringSecu,[]).append(tiempoTotal)\n\n\treturn tiemposLargoDigital\n\n\npath = \"/home/maxi/git/corsi/results\"\n\n######PROBANDO PROTOCOLO LARGO###########\n\nlistaCSVs = []\n#Recorro todos los archivos del Fisico\nfor nroUsr in protocolosLargosFisico:\n\t#Conformo cada nombre segun el array\t\n\tnombreCSV = path+\"/CSVsFisico/Usr\"+nroUsr+\"-PL-Fisico.csv\"\n\n\tlistaCSVs.append(nombreCSV)\n\ntiemposLargoFisico = registrarTiemposCorrectosFisico(listaCSVs)\n\nlistaCSVs = []\n#Recorro todos los archivos del Digital\nfor nroUsr in protocolosLargosDigital:\n\t#Conformo cada nombre segun el array\t\n\tnombreCSV = path+\"/CSVsDigital/Usr\"+nroUsr+\"-PL-Digital.csv\"\n\n\tlistaCSVs.append(nombreCSV)\n\ntiemposLargoDigital = registrarTiemposCorrectosDigital(listaCSVs)\n\n\ndiferencia = 0\n#Recorro los tiempos, saco promedios y calculo diferencias\nfor secu in tiemposLargoDigital.keys():\n\tparcial = 0\n\ttry: \n\t\tpromedioTiemposSecuFisico = st.median(tiemposLargoFisico[secu])\n\t\tparcial = promedioTiemposSecuFisico\n\t\tprint(\"Fisico : \"+secu, promedioTiemposSecuFisico)\n\texcept:\n\t\tprint(\"No hay secus correctas en el Fisico para: \", secu)\n\n\ttry:\n\t\tpromedioTiemposSecuDigital = st.median(tiemposLargoDigital[secu]) \n\t\tprint(\"Digital: \"+secu, promedioTiemposSecuDigital)\n\t\tparcial -= promedioTiemposSecuDigital\n\texcept:\n\t\tprint(\"No hay secus correctas en el Digital para: \", secu)\n\n\tdiferencia+=parcial\n\nprint(\"Diferencia total Largo: \", diferencia)\n\n######PROBANDO PROTOCOLO CORTO###########\n\nlistaCSVs = []\n#Recorro todos los archivos del Fisico\nfor nroUsr in protocolosCortosFisico:\n\t#Conformo cada nombre segun el array\t\n\tnombreCSV = path+\"/CSVsFisico/Usr\"+nroUsr+\"-PC-Fisico.csv\"\n\n\tlistaCSVs.append(nombreCSV)\n\ntiemposCortoFisico = registrarTiemposCorrectosFisico(listaCSVs)\n\nlistaCSVs = []\n#Recorro todos los archivos del Digital\nfor nroUsr in protocolosCortosDigital:\n\t#Conformo cada nombre segun el array\t\n\tnombreCSV = path+\"/CSVsDigital/Usr\"+nroUsr+\"-PC-Digital.csv\"\n\n\tlistaCSVs.append(nombreCSV)\n\ntiemposCortoDigital = registrarTiemposCorrectosDigital(listaCSVs)\n\n\ndiferencia = 0\n#Recorro los tiempos, saco promedios y calculo diferencias\nfor secu in tiemposCortoDigital.keys():\n\tparcial = 0\n\ttry: \n\t\tpromedioTiemposSecuFisico = st.median(tiemposCortoFisico[secu])\n\t\tparcial = promedioTiemposSecuFisico\n\t\tprint(\"Fisico : \"+secu, promedioTiemposSecuFisico)\n\texcept:\n\t\tprint(\"No hay secus correctas en el Fisico para: \", secu)\n\n\ttry:\n\t\tpromedioTiemposSecuDigital = st.median(tiemposCortoDigital[secu]) \n\t\tprint(\"Digital: \"+secu, promedioTiemposSecuDigital)\n\t\tparcial -= promedioTiemposSecuDigital\n\texcept:\n\t\tprint(\"No hay secus correctas en el Digital para: \", secu)\n\n\tdiferencia+=parcial\n\t#print(parcial)\n\t#print(diferencia)\n\nprint(\"Diferencia total Corto: \", diferencia)\n\n\n","sub_path":"results/levantoCSVs.py","file_name":"levantoCSVs.py","file_ext":"py","file_size_in_byte":6184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"300718371","text":"import numpy as np\nimport nnpy\nimport json\nimport os\n\nclass GanglionDataReceiver:\n def __init__(self):\n self.ENDPOINT = 'tcp://0.0.0.0:8080'\n self.cs = nnpy.Socket(nnpy.AF_SP, nnpy.SUB)\n self.cs.connect(self.ENDPOINT)\n self.cs.setsockopt(nnpy.SUB, nnpy.SUB_SUBSCRIBE, '')\n\n self.duration = 0.3\n self.freq = 440\n\n def get_signal(self):\n alpha_arr = np.zeros((2,1604))\n for i in range(alpha_arr.shape[1]):\n json_data = self.cs.recv()\n data_ls = json.loads(json_data)\n alpha_arr[0][i] = data_ls[0]\n alpha_arr[1][i] = data_ls[1]\n timestamp = data_ls[2]\n \n if i in {0, 401, 802, 1203, 1603}:\n os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (self.duration, self.freq))\n # print(timestamp)\n\n self.cs.close()\n return alpha_arr\n\n# if __name__ == '__main__':\n # main = GanglionDataReceiver()\n # main.get_signal()","sub_path":"data_receive.py","file_name":"data_receive.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"523540741","text":"# Fit & predict for regression\n# Now, you will fit a linear regression and predict life expectancy using just one feature. You saw Andy do this\n# earlier using the 'RM' feature of the Boston housing dataset. In this exercise, you will use the 'fertility' feature\n# of the Gapminder dataset. Since the goal is to predict life expectancy, the target variable here is 'life'. The\n# array for the target variable has been pre-loaded as y and the array for 'fertility' has been pre-loaded as\n# X_fertility.\n#\n# A scatter plot with 'fertility' on the x-axis and 'life' on the y-axis has been generated. As you can see, there is\n# a strongly negative correlation, so a linear regression should be able to capture this trend. Your job is to fit a\n# linear regression and then predict the life expectancy, overlaying these predicted values on the plot to generate a\n# regression line. You will also compute and print the score using sckit-learn's .score() method.\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom preload import X_fertility, y\n\nplt.scatter(X_fertility, y)\nplt.xlabel('Fertility')\nplt.ylabel('Life Expectancy')\n\n\n# Import LinearRegression\nfrom sklearn.linear_model import LinearRegression\n\n# Create the regressor: reg\nreg = LinearRegression()\n\n# Create the prediction space\nprediction_space = np.linspace(min(X_fertility), max(X_fertility)).reshape(-1, 1)\n\n# Fit the model to the data\nreg.fit(X_fertility, y)\n\n# Compute predictions over the prediction space: y_pred\ny_pred = reg.predict(prediction_space)\n\n# Print R^2\nprint(reg.score(X_fertility, y))\n\n# Plot regression line\nplt.plot(prediction_space, y_pred, color='black', linewidth=3)\nplt.show()\n","sub_path":"Supervised_Learning_with_scikit-learn/02_Regression/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"408845051","text":"# Made by Riggyman\n# Discord: Riggyman#8281\n# Epic Games: KING ازگر\n# Steam: Phantom_Riggyman\n# ROBLOX: Fake_Riggyman\n\n# UPLOADED TO GITHUB ON 22/08/19\n\nimport random\nfrom os import system\nfrom sys import exit\n\nops = ['addition', 'multiplication', 'subtraction', 'division']\n\nprint(\"Enter a number, or type 'random' to get a random one generated between 1 and 100.\")\nnum1 = input()\nif (num1.upper() == \"RANDOM\"):\n num1 = random.randint(1, 101)\nelse:\n num1 = int(num1)\nsystem(\"cls\")\nprint(\"Enter another number, or type 'random' to get a random one generated between 1 and 100.\")\nnum2 = input()\nif (num2.upper() == \"RANDOM\"):\n num2 = random.randint(1, 101)\nelse:\n num2 = int(num2)\nsystem(\"cls\")\nprint(\"Now, please select a mathematical operation (e.g., addition, multiplication, subtraction and division), or, type 'random' to have one auto-selected for you.\")\noper = input()\nif (oper.upper() == \"RANDOM\"):\n fin = random.choice(ops)\n if (fin == \"addition\"):\n finint = num1 + num2\n if (fin == \"multiplication\"):\n finint = num1 * num2\n if (fin == \"subtraction\"):\n finint = num1 - num2\n if (fin == \"division\"):\n finint = num1 / num2\nelif (oper.upper() == \"ADDITION\"):\n finint = num1 + num2\nelif (oper.upper() == \"MULTIPLICATION\"):\n finint = num1 * num2\nelif (oper.upper() == \"SUBTRACTION\"):\n finint = num1 - num2\nelif (oper.upper() == \"DIVISION\"):\n finint = num1 / num2\nelse:\n print(oper + \"is not valid. Please re-launch the application if you would like to do this again.\")\n input(\"...\")\n exit()\nprint(\"Your final number is: \", finint)\ninput(\"...\")\n","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"127458807","text":"import copy\nimport random\nfrom players.scripts.Script import Script\n\nclass DSL:\n \n def __init__(self):\n \n self.start = 'S'\n \n self._grammar = {}\n self._grammar[self.start] = ['if B S', '']\n self._grammar['B'] = ['B1', 'B1 and B1']\n self._grammar['B1'] = ['DSL.isDoubles(a)', 'DSL.containsNumber(a, NUMBER )', 'DSL.actionWinsColumn(state,a)', 'DSL.hasWonColumn(state,a)', \n 'DSL.numberPositionsProgressedThisRoundColumn(state, NUMBER ) > SMALL_NUMBER and DSL.isStopAction(a)', 'DSL.isStopAction(a)',\n 'DSL.numberPositionsConquered(state, NUMBER ) > SMALL_NUMBER and DSL.containsNumber(a, NUMBER )',\n 'DSL.hasIncentive(state, a, SMALL_NUMBER, SMALL_NUMBER)']\n self._grammar['NUMBER'] = ['2', '3', '4', '5', '6']\n self._grammar['SMALL_NUMBER'] = ['0', '1', '2'] \n \n @staticmethod\n def isDoubles(action):\n \"\"\"\n Returns true if the action is a double. \n \n Examples of doubles: (2, 2), (3, 3), (4, ,4)\n \"\"\"\n if len(action) > 1 and action[0] == action[1]:\n return True\n else:\n return False\n \n @staticmethod\n def containsNumber(action, number):\n \"\"\"\n Returns true if the action contains the number\n \n Example: returns true for action (2, 3) and number 3\n returns true for action (2, 6) and number 4\n \"\"\"\n if not isinstance(action, str):\n if number in action:\n return True\n return False\n \n @staticmethod\n def actionWinsColumn(state, action):\n \"\"\"\n Returns true if the action completes a column for the player\n \"\"\"\n copy_state = copy.deepcopy(state)\n copy_state.play(action)\n columns_won = copy_state.columns_won_current_round()\n columns_won_previously = state.columns_won_current_round()\n if len(columns_won) > 0 and columns_won != columns_won_previously:\n return True\n return False\n \n @staticmethod\n def numberPositionsProgressedThisRoundColumn(state, column):\n \"\"\"\n Returns the number of positions progressed in a given column in the current round.\n A round finishes once the player chooses to stop, which is action n in this implementation. \n \"\"\"\n return state.number_positions_conquered_this_round(column)\n \n @staticmethod\n def numberPositionsConquered(state, column):\n \"\"\"\n Returns the number of positions conquered in a given column. A position is\n conquered once the player progresses in the column and decides to stop. By stopping, the\n temporary markers are replaced by permanent markers and the positions are conquered. \n \"\"\"\n return state.number_positions_conquered(column)\n\n @staticmethod\n def hasWonColumn(state, action):\n \"\"\"\n Returns true if the player has won a column, i.e., if the player progressed all the way\n to the top of a given column. \n \"\"\"\n return len(state.columns_won_current_round()) > 0\n \n @staticmethod\n def isStopAction(action):\n \"\"\"\n Returns true if the action is a stop action, i.e., action n in this implementation.\n \"\"\"\n if isinstance(action, str) and action == 'n':\n return True\n return False\n\n @staticmethod\n def hasIncentive(state, action, small_number_1, small_number_2):\n \"\"\"\n Returns true with probability proportional to the action's gathered incentive points when incentives are\n to catch up to the opponent at most small_number_1 steps away or\n to reach the top of the column if it is at most small_number_2 steps away.\n \"\"\"\n copy_state = state.clone()\n\n player_column_positions = {}\n\n if isinstance(action, str): # Not sure what to do with these other than selecting randomly\n return random.randint(0, 100) < 50\n \n copy_state.play(action)\n\n who_won, is_over = copy_state.is_finished()\n\n if is_over == True and who_won != copy_state.player_turn: # Select action if it wins the game\n return True\n\n for column_index in range(copy_state.board_game.column_range[0], copy_state.board_game.column_range[1] + 1): # Current player's marker positions after this action\n player_column_positions[column_index] = copy_state.number_positions_conquered(column_index) + copy_state.number_positions_conquered_this_round(column_index)\n \n copy_state.play(\"n\") # End turn so that the other player becomes active and we can get the positions of its markers\n\n incentive_points = 0 # Gather up all the different incentives\n\n for column_index, _ in enumerate(copy_state.board_game.board):\n if column_index in action:\n column_size = len(copy_state.board_game.board[column_index])\n opponent_column_position = copy_state.number_positions_conquered(column_index)\n\n distance_from_opponent = opponent_column_position - player_column_positions[column_index]\n distance_from_top = column_size - player_column_positions[column_index]\n\n if opponent_column_position >= 0 and abs(distance_from_opponent) <= small_number_1: # If opponent is within reach\n incentive_points += 1 # Could be modified to have varying points depending on the distance\n \n if distance_from_top <= small_number_2: # If the top of the column is within reach\n incentive_points += 1\n \n if random.randint(0, 100) <= incentive_points * 10 + 50: # Higher the total incentive, higher the probability of selecting the action\n return True\n else:\n return False","sub_path":"Assignment 1/players/scripts/DSL.py","file_name":"DSL.py","file_ext":"py","file_size_in_byte":5913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"220936585","text":"import time\nimport torch\nimport numpy as np\nimport networkx as nx\nfrom pathlib import Path\n\nfrom dataset import load_dataset\nfrom .experiment import load_experiment\n\nfrom utils import evaluation\nfrom utils.constants import DATASET_NAMES\nfrom utils.serializer import save_yaml, load_yaml\n\n\ndef pad_and_add(v1, v2):\n if v1 is None:\n return v2\n\n maxdim = max(len(v1), len(v2))\n new_v1 = np.zeros((maxdim,))\n new_v2 = np.zeros((maxdim,))\n new_v1[:len(v1)] = v1\n new_v2[:len(v2)] = v2\n return new_v1 + new_v2\n\n\nclass Metric:\n @classmethod\n def load(cls, datadict):\n metric = cls()\n metric.scores = datadict['scores']\n metric.mean = datadict['mean']\n metric.std = datadict['std']\n metric.data_hist = datadict['data_hist']\n metric.samples_hist = datadict['samples_hist']\n return metric\n\n def __init__(self):\n self.scores = []\n self.mean = None\n self.std = None\n self.data_hist = None\n self.samples_hist = None\n\n @property\n def is_computed(self):\n return self.scores != []\n\n def get_score_func(self):\n raise NotImplementedError\n\n def update(self, test_data, samples):\n kld, data_hist, samples_hist = evaluation.kl_divergence(test_data, samples, self.name)\n self.scores.append(float(kld))\n self.data_hist = pad_and_add(self.data_hist, data_hist)\n self.samples_hist = pad_and_add(self.samples_hist, samples_hist)\n\n def finalize(self, num_trials):\n self.mean = float(np.mean(self.scores))\n self.std = float(np.std(self.scores))\n self.data_hist = [float(x) for x in self.data_hist / num_trials]\n self.samples_hist = [float(x) for x in self.samples_hist / num_trials]\n\n def asdict(self):\n return self.__dict__\n\n\nclass DegreeDistribution(Metric):\n name = \"degree\"\n\n\nclass ClusteringCoefficient(Metric):\n name = \"clustering\"\n\n\nclass OrbitCount(Metric):\n name = \"orbit\"\n\n\nclass BetweennessCentrality(Metric):\n name = \"betweenness\"\n\n\nclass Result:\n @classmethod\n def load(cls, model_name, dataset_name, path):\n resultdict = load_yaml(path)\n result = cls(model_name, dataset_name)\n result.degree = DegreeDistribution.load(resultdict.pop('degree'))\n result.clustering = ClusteringCoefficient.load(resultdict.pop('clustering'))\n result.orbit = OrbitCount.load(resultdict.pop('orbit'))\n result.betweenness = OrbitCount.load(resultdict.pop('betweenness'))\n for key in resultdict:\n setattr(result, key, resultdict[key])\n return result\n\n def __init__(self, model_name, dataset_name):\n self.model_name = model_name\n self.dataset_name = dataset_name\n self.degree = DegreeDistribution()\n self.clustering = ClusteringCoefficient()\n self.orbit = OrbitCount()\n self.betweenness = BetweennessCentrality()\n\n @property\n def uniqueness_not_calculated(self):\n return not hasattr(self, 'uniqueness1000')\n\n @property\n def novelty_not_calculated(self):\n return not hasattr(self, 'novelty1000')\n\n def asdict(self):\n data = self.__dict__\n data['degree'] = self.degree.asdict()\n data['clustering'] = self.clustering.asdict()\n data['orbit'] = self.orbit.asdict()\n data['betweenness'] = self.betweenness.asdict()\n return data\n\n def save(self, path):\n save_yaml(self.asdict(), path / f\"{self.dataset_name}.yaml\")\n\n def update(self, name, value):\n setattr(self, name, value)\n\n def update_time(self, num_samples, time_elapsed):\n if time_elapsed is not None:\n setattr(self, f\"time{num_samples}\", time_elapsed)\n\n def update_metric(self, name, test_data, samples):\n metric = getattr(self, name)\n metric.update(test_data, samples)\n\n def finalize_metric(self, name, num_trials):\n metric = getattr(self, name)\n metric.finalize(num_trials)\n\n def clean_orbit(self):\n self.orbit.scores = []\n self.orbit.data_hist = None\n self.orbit.samples_hist = None\n self.orbit.mean = None\n self.orbit.std = None\n\n def clean_degree(self):\n self.degree.scores = []\n self.degree.data_hist = None\n self.degree.samples_hist = None\n self.degree.mean = None\n self.degree.std = None\n\n def clean_clustering(self):\n self.clustering.scores = []\n self.clustering.data_hist = None\n self.clustering.samples_hist = None\n self.clustering.mean = None\n self.clustering.std = None\n\n def clean_betweenness(self):\n self.betweenness.scores = []\n self.betweenness.data_hist = None\n self.betweenness.samples_hist = None\n self.betweenness.mean = None\n self.betweenness.std = None\n\n\nclass EvaluatorBase:\n def __init__(self, model_name):\n self.model_name = model_name\n self.num_samples = [1000, 5000]\n self.num_trials = 10\n self.fast = model_name == \"GRAPHER\"\n\n def novelty_not_calculated(self, result):\n return result.novelty_not_calculated\n\n def uniqueness_not_calculated(self, result):\n return result.uniqueness_not_calculated\n\n def evaluate(self):\n for dataset_name in DATASET_NAMES:\n if self.model_name == \"smiles\" and dataset_name not in [\"PROTEINS_full\", \"ENZYMES\"]:\n continue\n print(dataset_name)\n exp = load_experiment(self.root, self.model_name, dataset_name)\n dataset = load_dataset(dataset_name, self.model_name, exp)\n\n path = exp.root / \"results\" / f\"{dataset_name}.yaml\"\n if not path.exists():\n result = Result(self.model_name, dataset_name)\n else:\n result = Result.load(self.model_name, dataset_name, path)\n\n if self.novelty_not_calculated(result):\n self.evaluate_novelty(result, exp, dataset)\n\n if self.uniqueness_not_calculated(result):\n self.evaluate_uniqueness(result, exp, dataset)\n\n if not result.degree.is_computed:\n self.evaluate_kl(result, exp, dataset, 'degree')\n\n if not result.clustering.is_computed:\n self.evaluate_kl(result, exp, dataset, 'clustering')\n\n if not result.orbit.is_computed:\n self.evaluate_kl(result, exp, dataset, 'orbit')\n\n if not result.betweenness.is_computed:\n self.evaluate_kl(result, exp, dataset, 'betweenness')\n\n result.save(exp.root / \"results\")\n\n def _sample_or_get_samples_kl(self, result, exp, num_samples, trial):\n filename = f\"samples_{num_samples}_{trial}.pt\"\n\n if not (exp.root / \"samples\" / filename).exists():\n samples = exp.sample(num_samples=num_samples)\n torch.save(samples, exp.root / \"samples\" /filename)\n samples = torch.load(exp.root / \"samples\" / filename)\n return [G for G in samples if G.number_of_nodes() > 0]\n\n def _sample_or_get_samples_metric(self, result, exp, num_samples, trial=None):\n time_elapsed = None\n filename = f\"samples_{num_samples}.pt\"\n\n if not (exp.root / \"samples\" /filename).exists():\n start = time.time()\n samples = exp.sample(num_samples=num_samples)\n time_elapsed = time.time() - start\n torch.save(samples, exp.root / \"samples\" /filename)\n\n samples = torch.load(exp.root / \"samples\" / filename)\n return time_elapsed, [G for G in samples if G.number_of_nodes() > 0]\n\n def evaluate_novelty(self, result, exp, dataset):\n train_data = dataset.get_data('train')\n for num_samples in self.num_samples:\n time_elapsed, samples = self._sample_or_get_samples_metric(result, exp, num_samples)\n novelty, _ = evaluation.novelty(train_data, samples, self.fast)\n result.update(f'novelty{num_samples}', novelty)\n result.update_time(num_samples, time_elapsed)\n\n def evaluate_uniqueness(self, result, exp, dataset):\n for num_samples in self.num_samples:\n time_elapsed, samples = self._sample_or_get_samples_metric(result, exp, num_samples)\n uniqueness, _ = evaluation.uniqueness(samples, self.fast)\n result.update(f'uniqueness{num_samples}', uniqueness)\n result.update_time(num_samples, time_elapsed)\n\n def evaluate_kl(self, result, exp, dataset, metric_name):\n test_data = dataset.get_data('test')\n\n for trial in range(self.num_trials):\n samples = self._sample_or_get_samples_kl(result, exp, len(test_data), trial)\n result.update_metric(metric_name, test_data, samples)\n result.finalize_metric(metric_name, self.num_trials)\n\n\nclass Evaluator(EvaluatorBase):\n root = Path(\"RUNS\")\n\n\nclass OrderEvaluator(EvaluatorBase):\n root = Path(\"RUNS\") / \"ORDER\"\n\n def __init__(self, model_name):\n super().__init__(model_name)\n self.fast = True\n\n def novelty_not_calculated(self, result):\n return False\n\n def uniqueness_not_calculated(self, result):\n return False","sub_path":"experiment/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":9163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"104783998","text":"from __future__ import annotations\n\nimport datetime\nimport re\nfrom typing import Union\n\nfrom plexapi import X_PLEX_CONTAINER_SIZE\nfrom plexapi.exceptions import BadRequest, NotFound, Unauthorized\nfrom plexapi.library import LibrarySection, MovieSection, ShowSection\nfrom plexapi.server import PlexServer, SystemAccount, SystemDevice\nfrom trakt.utils import timestamp\n\nfrom plex_trakt_sync.decorators.deprecated import deprecated\nfrom plex_trakt_sync.decorators.memoize import memoize\nfrom plex_trakt_sync.decorators.nocache import nocache\nfrom plex_trakt_sync.decorators.rate_limit import rate_limit\nfrom plex_trakt_sync.factory import factory\nfrom plex_trakt_sync.logging import logger\n\nAUDIO_CODECS = {\n 'lpcm': 'pcm',\n 'mp3': None,\n 'aac': None,\n 'ogg': 'vorbis',\n 'wma': None,\n\n 'dts': '(dca|dta)',\n 'dts_ma': 'dtsma',\n\n 'dolby_prologic': 'dolby.?pro',\n 'dolby_digital': 'ac.?3',\n 'dolby_digital_plus': 'eac.?3',\n 'dolby_truehd': 'truehd'\n}\n\n# compile patterns in `AUDIO_CODECS`\nfor k, v in AUDIO_CODECS.items():\n if v is None:\n continue\n\n try:\n AUDIO_CODECS[k] = re.compile(v, re.IGNORECASE)\n except Exception:\n logger.warn('Unable to compile regex pattern: %r', v, exc_info=True)\n\n\nclass PlexGuid:\n def __init__(self, guid: str, type: str, pm: PlexLibraryItem):\n self.guid = guid\n self.type = type\n self.pm = pm\n\n @property\n @memoize\n def media_type(self):\n return f\"{self.type}s\"\n\n @property\n @memoize\n def provider(self):\n if self.guid_is_imdb_legacy:\n return \"imdb\"\n x = self.guid.split(\"://\")[0]\n x = x.replace(\"com.plexapp.agents.\", \"\")\n x = x.replace(\"tv.plex.agents.\", \"\")\n x = x.replace(\"themoviedb\", \"tmdb\")\n x = x.replace(\"thetvdb\", \"tvdb\")\n if x == \"xbmcnfo\":\n CONFIG = factory.config()\n x = CONFIG[\"xbmc-providers\"][self.media_type]\n if x == \"xbmcnfotv\":\n CONFIG = factory.config()\n x = CONFIG[\"xbmc-providers\"][\"shows\"]\n\n return x\n\n @property\n @memoize\n def id(self):\n if self.guid_is_imdb_legacy:\n return self.guid\n x = self.guid.split(\"://\")[1]\n x = x.split(\"?\")[0]\n return x\n\n @property\n @memoize\n def is_episode(self):\n \"\"\"\n Return true of the id is in form of //\n \"\"\"\n parts = self.id.split(\"/\")\n if len(parts) == 3 and all(x.isnumeric() for x in parts):\n return True\n\n return False\n\n @property\n @memoize\n def show_id(self):\n if not self.is_episode:\n raise ValueError(\"show_id is not valid for non-episodes\")\n\n show = self.id.split(\"/\", 1)[0]\n if not show.isnumeric():\n raise ValueError(f\"show_id is not numeric: {show}\")\n\n return int(show)\n\n @property\n @memoize\n def guid_is_imdb_legacy(self):\n guid = self.guid\n\n # old item, like imdb 'tt0112253'\n return guid[0:2] == \"tt\" and guid[2:].isnumeric()\n\n def __str__(self):\n return self.guid\n\n\nclass PlexLibraryItem:\n def __init__(self, item):\n self.item = item\n\n @property\n @memoize\n def is_legacy_agent(self):\n return not self.item.guid.startswith('plex://')\n\n @property\n @memoize\n @deprecated(\"Use .guids directly\")\n def guid(self):\n return self.guids[0]\n\n @nocache\n @rate_limit()\n def get_guids(self):\n return self.item.guids\n\n @property\n @memoize\n def guids(self):\n # return early if legacy agent\n # accessing .guids for legacy agent\n # will make another round-trip to plex server\n # and the result is always empty.\n if self.is_legacy_agent:\n return [PlexGuid(self.item.guid, self.type, self)]\n\n guids = [PlexGuid(guid.id, self.type, self) for guid in self.get_guids()]\n\n # take guid in this order:\n # - tmdb, tvdb, then imdb\n # https://github.com/Taxel/PlexTraktSync/issues/313#issuecomment-838447631\n sort_order = {\n \"tmdb\": 1,\n \"tvdb\": 2,\n \"imdb\": 3,\n \"local\": 100,\n }\n ordered = sorted(guids, key=lambda guid: sort_order[guid.provider])\n return ordered\n\n @property\n @memoize\n def media_type(self):\n return f\"{self.type}s\"\n\n @property\n @memoize\n def type(self):\n return self.item.type\n\n @property\n @memoize\n @deprecated(\"Use .guid.provider directly\")\n def provider(self):\n return self.guid.provider\n\n @property\n @memoize\n @deprecated(\"Use .guid.id directly\")\n def id(self):\n return self.guid.id\n\n @property\n @memoize\n @deprecated(\"Use .guid.is_episode directly\")\n def is_episode(self):\n return self.guid.is_episode\n\n @property\n @memoize\n @deprecated(\"Use .guid.show_id directly\")\n def show_id(self):\n return self.guid.show_id\n\n @property\n @memoize\n def rating(self):\n return int(self.item.userRating) if self.item.userRating is not None else None\n\n @property\n @memoize\n def seen_date(self):\n return self.date_value(self.item.lastViewedAt)\n\n @property\n @memoize\n def collected_at(self):\n return self.date_value(self.item.addedAt)\n\n @property\n @memoize\n def audio_channels(self):\n \"\"\"\n Set to 1.0, 2.0, 2.1, 3.0, 3.1, 4.1, 5.1, 6.1, 7.1, 9.1, or 10.1\n \"\"\"\n\n try:\n media = self.item.media[0]\n channels = media.audioChannels\n assert channels is not None\n except (AttributeError, IndexError, TypeError, AssertionError):\n return None\n\n if channels < 3:\n return '%.01f' % channels\n\n return '%.01f' % (channels - 0.9)\n\n @property\n @memoize\n def audio_codec(self):\n\n try:\n media = self.item.media[0]\n codec = media.audioCodec\n assert codec is not None\n except (AttributeError, IndexError, TypeError, AssertionError):\n return None\n\n for key, regex in AUDIO_CODECS.items():\n if key == codec:\n return key\n\n if regex and regex.match(codec):\n return key\n\n return None\n\n @property\n @memoize\n def resolution(self):\n \"\"\"\n Set to uhd_4k, hd_1080p, hd_1080i, hd_720p, sd_480p, sd_480i, sd_576p, or sd_576i.\n \"\"\"\n try:\n media = self.item.media[0]\n width = media.width\n assert width is not None\n except (AttributeError, IndexError, TypeError, AssertionError):\n return None\n # 4k\n if width >= 3840:\n return 'uhd_4k'\n\n # 1080\n if width >= 1920:\n return 'hd_1080p'\n\n # 720\n if width >= 1280:\n return 'hd_720p'\n\n # 576\n if width >= 768:\n return 'sd_576p'\n\n # 480\n return 'sd_480p'\n\n @property\n @memoize\n def hdr(self):\n \"\"\"\n Set to dolby_vision, hdr10, hdr10_plus, or hlg\n \"\"\"\n try:\n stream = self.item.media[0].parts[0].streams[0]\n colorTrc = stream.colorTrc\n except (AttributeError, IndexError, TypeError):\n return None\n\n if colorTrc == 'smpte2084':\n return 'hdr10'\n elif colorTrc == 'arib-std-b67':\n return 'hlg'\n\n try:\n dovi = stream.DOVIPresent\n except AttributeError:\n return None\n\n if dovi:\n return 'dolby_vision'\n\n return None\n\n def watch_progress(self, view_offset):\n percent = view_offset / self.item.duration * 100\n return percent\n\n def episodes(self):\n for ep in self._get_episodes():\n yield PlexLibraryItem(ep)\n\n @nocache\n def _get_episodes(self):\n return self.item.episodes()\n\n @property\n @memoize\n def season_number(self):\n return self.item.seasonNumber\n\n @property\n @memoize\n def episode_number(self):\n return self.item.index\n\n @staticmethod\n def date_value(date):\n if not date:\n raise ValueError(\"Value can't be None\")\n\n return date.astimezone(datetime.timezone.utc)\n\n def __repr__(self):\n try:\n guid = self.guids[0]\n return f\"<{guid.provider}:{guid.id}:{self.item}>\"\n except IndexError:\n return f\"<{self.item}>\"\n\n def to_json(self):\n\n metadata = {\n \"collected_at\": timestamp(self.collected_at),\n \"media_type\": \"digital\",\n \"resolution\": self.resolution,\n \"hdr\": self.hdr,\n \"audio\": self.audio_codec,\n \"audio_channels\": self.audio_channels,\n }\n\n return {k: v for k, v in metadata.items() if v is not None}\n\n\nclass PlexLibrarySection:\n def __init__(self, section: LibrarySection):\n self.section = section\n\n @nocache\n def __len__(self):\n return self.section.totalSize\n\n @property\n def title(self):\n return self.section.title\n\n def all(self, max_items: int):\n libtype = self.section.TYPE\n key = self.section._buildSearchKey(libtype=libtype, returnKwargs=False)\n start = 0\n size = X_PLEX_CONTAINER_SIZE\n\n while True:\n items = self.fetch_items(key, size, start)\n if not len(items):\n break\n\n yield from items\n\n start += size\n if start > max_items:\n break\n\n @nocache\n def fetch_items(self, key: str, size: int, start: int):\n return self.section.fetchItems(key, container_start=start, container_size=size)\n\n def items(self, max_items: int):\n for item in self.all(max_items):\n yield PlexLibraryItem(item)\n\n\nclass PlexApi:\n \"\"\"\n Plex API class abstracting common data access and dealing with requests cache.\n \"\"\"\n\n def __init__(self, plex: PlexServer):\n self.plex = plex\n\n @property\n @memoize\n def plex_base_url(self):\n return f\"https://app.plex.tv/desktop/#!/server/{self.plex.machineIdentifier}\"\n\n def movie_sections(self, library=None):\n result = []\n for section in self.library_sections:\n if not type(section) is MovieSection:\n continue\n if library and section.title != library:\n continue\n result.append(PlexLibrarySection(section))\n\n return result\n\n def show_sections(self, library=None):\n result = []\n for section in self.library_sections:\n if not type(section) is ShowSection:\n continue\n if library and section.title != library:\n continue\n result.append(PlexLibrarySection(section))\n\n return result\n\n @memoize\n def fetch_item(self, key: Union[int, str]):\n media = self.plex.library.fetchItem(key)\n return PlexLibraryItem(media)\n\n def reload_item(self, pm):\n self.fetch_item.cache_clear()\n return self.fetch_item(pm.item.ratingKey)\n\n def media_url(self, m: PlexLibraryItem):\n return f\"{self.plex_base_url}/details?key={m.item.key}\"\n\n @memoize\n def search(self, title: str, **kwargs):\n result = self.plex.library.search(title, **kwargs)\n for media in result:\n yield PlexLibraryItem(media)\n\n @property\n @memoize\n @nocache\n def version(self):\n return self.plex.version\n\n @property\n @memoize\n @nocache\n def updated_at(self):\n return self.plex.updatedAt\n\n @property\n @memoize\n @nocache\n def library_sections(self):\n CONFIG = factory.config()\n result = []\n for section in self.plex.library.sections():\n if section.title in CONFIG[\"excluded-libraries\"]:\n continue\n result.append(section)\n\n return result\n\n @property\n def library_section_names(self):\n return [s.title for s in self.library_sections]\n\n @memoize\n @nocache\n def system_device(self, device_id: int) -> SystemDevice:\n return self.plex.systemDevice(device_id)\n\n @memoize\n @nocache\n def system_account(self, account_id: int) -> SystemAccount:\n return self.plex.systemAccount(account_id)\n\n @nocache\n def rate(self, m, rating):\n m.rate(rating)\n\n @nocache\n def create_playlist(self, name: str, items):\n _, plex_items_sorted = zip(*sorted(dict(reversed(items)).items()))\n self.plex.createPlaylist(name, items=plex_items_sorted)\n\n @nocache\n def delete_playlist(self, name: str):\n try:\n self.plex.playlist(name).delete()\n except (NotFound, BadRequest):\n logger.debug(f\"Playlist '{name}' not found, so it could not be deleted\")\n\n @nocache\n def history(self, m, device=False, account=False):\n try:\n history = m.history()\n except Unauthorized as e:\n logger.debug(f\"No permission to access play history: {e}\")\n return\n\n for h in history:\n if device:\n h.device = self.system_device(h.deviceID)\n if account:\n h.account = self.system_account(h.accountID)\n yield h\n\n @nocache\n def mark_watched(self, m):\n m.markWatched()\n","sub_path":"plex_trakt_sync/plex_api.py","file_name":"plex_api.py","file_ext":"py","file_size_in_byte":13524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"177332843","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport sys\n\n# 입력 파일 지정\nimage_file = \"./photo/face1.jpg\"\n\n# 케스케이드 파일의 경로 지정\ncascade_file = \"C:/opencv/build/etc/haarcascades/haarcascade_frontalface_alt.xml\"\n\n# 이미지 읽어 들이기\nimage = cv2.imread(image_file)\n# 그레이스케일로 변환하기\nimage_gs = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n# 얼굴 인식 특징 파일 읽어들이기\ncascade = cv2.CascadeClassifier(cascade_file)\n\n# 얼굴인식 실행\nface_list = cascade.detectMultiScale(image_gs, scaleFactor=1.1, minNeighbors=1, minSize=(150, 150))\n\nif len(face_list) > 0:\n # 인식한 부분 표시\n print(face_list)\n color = (0, 0, 255)\n for face in face_list:\n x, y, w, h = face\n cv2.rectangle(image, (x, y), (x + w, y + h), color, thickness=8)\n\n # 파일로 출력\n cv2.imwrite(\"facedetect-output.PNG\", image)\n\nelse:\n print(\"no face\")\n","sub_path":"python-machine-learning/ch07/ch07-4/facedetect.py","file_name":"facedetect.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"589777096","text":"from django.http import HttpResponse\r\nfrom django.shortcuts import render\r\nfrom .models import publication\r\n\r\nall_publications = publication.objects.all()[:30]\r\ntotal_publications = publication.objects.count()\r\nall_years = publication.objects.order_by('-year__year').values('year__year').distinct()\r\n# Create your views here.\r\ndef index(request):\r\n global all_publications\r\n all_publications = publication.objects.all()[:30]\r\n total = all_publications.count()\r\n context = {\r\n 'all_publications': all_publications,\r\n 'total': total_publications,\r\n 'all_years': all_years\r\n }\r\n return render(request, 'publications/index.html', context)\r\n\r\n\r\ndef bob(request):\r\n global all_publications\r\n ''' This could be your actual view or a new one '''\r\n # Your code\r\n if request.method == 'GET': # If the form is submitted\r\n\r\n search_query = request.GET.get('search_box', None)\r\n search_dept = request.GET.get('search_param1', None)\r\n search_year = request.GET.get('search_param2', None)\r\n search_type = request.GET.get('search_param3', None)\r\n search_nationality = request.GET.get('search_param4', None)\r\n\r\n if (search_year == ''):\r\n all_publications = publication.objects.filter(\r\n title__contains=search_query).__or__(publication.objects.filter(author__contains=search_query)).filter(\r\n dept__startswith=search_dept, type__contains=search_type,\r\n nationality__startswith=search_nationality)\r\n else:\r\n all_publications = publication.objects.filter(\r\n title__contains=search_query).__or__(publication.objects.filter(author__contains=search_query)).filter(\r\n dept__startswith=search_dept,\r\n year__year=int(search_year), type__contains=search_type,\r\n nationality__startswith=search_nationality)\r\n # Do whatever you need with the word the user looked for\r\n # Your code\r\n total = all_publications.count()\r\n context = {\r\n 'all_publications': all_publications,\r\n 'total': total,\r\n 'all_years': all_years\r\n }\r\n return render(request, 'publications/index.html', context)\r\n\r\n\r\ndef print(request):\r\n context = {\r\n 'all_publications': all_publications,\r\n 'all_years': all_years\r\n }\r\n return render(request, 'publications/print.html', context)\r\n","sub_path":"Publications/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"271392137","text":"word = \"Cat\"\ndef length_string(string) -> int:\n count = 0\n for letter in string:\n count += 1\n return count\n\ndef hangman_game(word):\n word = word.lower()\n attempts = 5\n sucesses = 0\n correct_guess=\"\"\n word_len = length_string(word)\n\n while attempts > 0 and sucesses < word_len:\n #Ensuring user input is valid\n val = input(\" :Enter letter ->\")\n if len(val) == 1 and val.isalpha():\n print(\"Youve entered : \" + val)\n else:\n print(\"Input error, must be a single letter\")\n continue\n\n #keeping track of user progression\n if val.lower() in word:\n sucesses += word.count(val.lower())\n else:\n attempts -= 1\n print(f\"Incorrect: {attempts} tries left\")\n\n if attempts == 0:\n print(\"you have failed\")\n\n #Filling in correct guesses\n correct_guess += val.lower()\n\n for letter in word:\n if letter.lower() in correct_guess:\n print(letter.lower(), end=\"\")\n else:\n print(\"_\", end=\"\")\n\n if sucesses == word_len:\n print(\"\")\n print(\"Congratulations, you have won!\")\nhangman_game(word)\n","sub_path":"hangman_game.py","file_name":"hangman_game.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"526499275","text":"min_X = 256\r\nmax_X = 384\r\nmin_Y = 0\r\nmax_Y = 128\r\nmin_Z = 128\r\nmax_Z = 256\r\nlistb = []\r\nlistd = []\r\nwith open('Synthetic_515-2_75x_128F_G.txt','x') as f_out:\r\n with open('Synthetic_Rolled_F_fft.txt', 'r') as file:\r\n for row in file:\r\n Angle1, Angle2, Angle3, X, Y, Z, GrainID, H = row.split()\r\n if int(X) > min_X and int(X) <= max_X:\r\n if int(Y) > min_Y and int(Y) <= max_Y:\r\n if int(Z) > min_Z and int(Z) <= max_Z:\r\n count_X = int(X)-min_X\r\n count_Y = int(Y)-min_Y\r\n count_Z = int(Z)-min_Z\r\n f_out.write(str(Angle1)+' '+str(Angle2)+' '+str(Angle3)+' '+str(count_X)+' '+str(count_Y)+' '+str(count_Z)+' '+str(GrainID)+' '+str(H)+'\\n')\r\nprint('done') \r\n \r\n \r\n","sub_path":"ASE/CutTXTFFT.py","file_name":"CutTXTFFT.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"341471229","text":"from octoblob.data_source import DataSource\nimport octoblob.functions as blobf\nimport logging\nfrom matplotlib import pyplot as plt\nfrom octoblob import diagnostics_tools\nfrom octoblob import parameters\nfrom octoblob import org_tools\nimport sys,os,glob\nimport numpy as np\nfrom octoblob import mapping_dispersion_optimizer as mdo\nfrom octoblob import file_manager\nimport pathlib\n\n\n# This example provides a new FBG alignment function based on cross-correlation. It may prove to be more robust than\n# previous methods that used 'feature'-based alignment (e.g. aligning to the largest positive or negative gradients)\n\nno_parallel = True\n\nuse_multiprocessing = False\ntry:\n assert not no_parallel\n import multiprocessing as mp\n use_multiprocessing = True\n n_cores_available = mp.cpu_count()\n n_cores = n_cores_available-2\n logging.info('multiprocessing imported')\n logging.info('n_cores_available: %d'%n_cores_available)\n logging.info('n_cores to be used: %d'%n_cores)\nexcept ImportError as ie:\n logging.info('Failed to import multiprocessing: %s'%ie)\n logging.info('Processing serially.')\nexcept AssertionError as ae:\n logging.info('Multiprocessing banned by no_parallel.')\n \ndata_filename = None\n\nif data_filename is None:\n try:\n data_filename = sys.argv[1]\n except IndexError as ie:\n sys.exit('Please check data_filename. %s not found or data_filename not passed at command line.'%data_filename)\n\nsrc = blobf.get_source(data_filename)\n\n# Create a diagnostics object for inspecting intermediate processing steps\ndiagnostics = diagnostics_tools.Diagnostics(data_filename)\n\n# Create a parameters object for storing and loading processing parameters\nparams_filename = file_manager.get_params_filename(data_filename)\nparams = parameters.Parameters(params_filename,verbose=True)\n\n\n# New prototype fbg_align function, which uses cross-correlation instead of feature-\n# based alignment of spectra.\n# Set a limit on the maximum index where the FBG trough could possibly be located.\n# This is a critical parameter, as it avoids cross correlation of spectra based on\n# structural information; this would prevent the FBG features from dominating the\n# cross-correlation and introduce additional phase noise.\n# Correlation threshold is the minimum correlation required to consider two spectra\n# to be in phase with one another\ndef fbg_align(spectra,fbg_max_index=150,correlation_threshold=0.9,diagnostics=None):\n # crop the frame to the FBG region\n f = spectra[:fbg_max_index,:].copy()\n\n if not diagnostics is None:\n fig = diagnostics.figure(figsize=(6,4))\n axes = fig.subplots(2,2)\n axes[0][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[0][1].plot(f[:,k])\n\n # group the spectra by amount of shift\n # this step avoids having to perform cross-correlation operations on every\n # spectrum; first, we group them by correlation with one another\n # make a list of spectra to group\n to_do = list(range(f.shape[1]))\n # make a list for the groups of similarly shifted spectra\n groups = []\n ref = 0\n\n # while there are spectra left to group, do the following loop:\n while(True):\n groups.append([ref])\n to_do.remove(ref)\n for tar in to_do:\n c = np.corrcoef(f[:,ref],f[:,tar])[0,1]\n if c>correlation_threshold:\n groups[-1].append(tar)\n to_do.remove(tar)\n if len(to_do)==0:\n break\n ref = to_do[0]\n\n subframes = []\n for g in groups:\n subf = f[:,g]\n subframes.append(subf)\n\n # now decide how to shift the groups of spectra by cross-correlating their means\n # we'll use the first group as the reference group:\n group_shifts = [0]\n ref = np.mean(subframes[0],axis=1)\n # now, iterate through the other groups, compute their means, and cross-correlate\n # with the reference. keep track of the cross-correlation peaks in the list group_shifts\n for taridx in range(1,len(subframes)):\n tar = np.mean(subframes[taridx],axis=1)\n xc = np.fft.ifft(np.fft.fft(ref)*np.fft.fft(tar).conj())\n shift = np.argmax(xc)\n if shift>len(xc)//2:\n shift = shift-len(xc)\n group_shifts.append(shift)\n\n # now, use the groups and the group_shifts to shift all of the spectra according to their\n # group membership:\n for g,s in zip(groups,group_shifts):\n for idx in g:\n spectra[:,idx] = np.roll(spectra[:,idx],s)\n f[:,idx] = np.roll(f[:,idx],s)\n\n if not diagnostics is None:\n axes[1][0].imshow(f,aspect='auto')\n for k in range(f.shape[1]):\n axes[1][1].plot(f[:,k])\n diagnostics.save(fig)\n\n return spectra\n\n\ndef spectra_to_bscan(mdcoefs,spectra,diagnostics=None):\n # only the fbg_align function is called locally (from this script);\n # most of the OCT processing is done by blob functions (blobf.XXXX)\n spectra = fbg_align(spectra,diagnostics=diagnostics)\n spectra = blobf.dc_subtract(spectra,diagnostics=diagnostics)\n spectra = blobf.crop_spectra(spectra,diagnostics=diagnostics)\n spectra = blobf.k_resample(spectra,mdcoefs[:2],diagnostics=diagnostics)\n spectra = blobf.dispersion_compensate(spectra,mdcoefs[2:],diagnostics=None)\n spectra = blobf.gaussian_window(spectra,sigma=0.9,diagnostics=None)\n\n # Now generate the bscan by FFT:\n bscan = np.fft.fft(spectra,axis=0)\n # remove the upper half of the B-scan and leave only the bottom half:\n bscan = bscan[bscan.shape[0]//2:,:]\n\n # could additionally crop the B-scan if desired;\n # for example, could remove the top 10 rows, bottom 50 rows, and 10 columns\n # from the left and right edges:\n # bscan = bscan[10:-50,10:-10]\n\n # artifact.png has a lens flare artifact after the 150th column, so we'll remove\n # it; we'll also remove 50 rows near the DC (bottom of the image):\n bscan = bscan[:-50,:150]\n \n if not diagnostics is None:\n fig = diagnostics.figure()\n axes = fig.subplots(1,1)\n im = axes.imshow(20*np.log10(np.abs(bscan)),aspect='auto')\n plt.colorbar(im)\n diagnostics.save(fig)\n return bscan\n\n\n# try to read dispersion/mapping coefs from a local processing_parameters file, and run optimization otherwise\ntry:\n coefs = np.array(params['mapping_dispersion_coefficients'],dtype=np.float)\n logging.info('File %s mapping dispersion coefficients found in %s. Skipping optimization.'%(data_filename,params_filename))\nexcept KeyError:\n logging.info('File %s mapping dispersion coefficients not found in %s. Running optimization.'%(data_filename,params_filename))\n samples = src.get_samples(3)\n # modify the next line to use the local spectra_to_bscan function by removing 'blobf.':\n coefs = mdo.multi_optimize(samples,spectra_to_bscan,show_all=False,show_final=True,verbose=True,diagnostics=diagnostics)\n params['mapping_dispersion_coefficients'] = coefs\n\n# get the folder name for storing bscans\nbscan_folder = file_manager.get_bscan_folder(data_filename)\n\nif __name__=='__main__':\n\n if use_multiprocessing:\n def proc(k):\n # compute the B-scan from the spectra, using the provided dispersion coefficients;\n # use the local spectra_to_bscan function, not the blobf. version\n bscan = spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n pool = mp.Pool(n_cores)\n pool.map(proc,range(src.n_total_frames))\n\n else:\n\n for k in range(src.n_total_frames):\n\n # compute the B-scan from the spectra, using the provided dispersion coefficients:\n # use the local spectra_to_bscan function, not the blobf. version\n bscan = spectra_to_bscan(coefs,src.get_frame(k),diagnostics=diagnostics)\n\n # save the complex B-scan in the B-scan folder\n outfn = os.path.join(bscan_folder,file_manager.bscan_template%k)\n np.save(outfn,bscan)\n logging.info('Saving bscan %s.'%outfn)\n\n","sub_path":"examples/fbg_alignment/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"432081599","text":"from __future__ import division\nfrom pdfminer.pdfparser import PDFParser\nfrom pdfminer.pdfdocument import PDFDocument\nfrom pdfminer.pdfpage import PDFPage\nfrom pdfminer.pdfinterp import PDFResourceManager\nfrom pdfminer.pdfinterp import PDFPageInterpreter\nfrom pdfminer.pdfdevice import PDFDevice\nfrom pdfminer.converter import PDFPageAggregator\nfrom pdfminer.layout import LAParams, LTRect, LTTextBoxHorizontal, LTChar\nfrom tempfile import TemporaryFile\nfrom xlwt import Workbook, easyxf\nfrom bisect import bisect_left\nfrom os import path\n\n\ndef insert_sorted(arr, item):\n \"\"\"\n Inserts item to sorted array\n :param arr: Sorted Array\n :param item: Item to insert\n \"\"\"\n index = bisect_left(arr, item)\n if (index < len(arr) and arr[index] - item) or index >= len(arr):\n arr.insert(index, item)\n\n\ndef unique_lines(arr, threshold=1):\n \"\"\"\n Removes lines that are too close to each other\n :param arr: Array of lines\n :param threshold: If lines are closer than the threshold one of them is removed\n :return: Array with lines that are too close removed\n \"\"\"\n arr_unique = [arr[0]]\n for i in range(1, len(arr)):\n if arr[i] - arr[i - 1] > threshold:\n arr_unique.append(arr[i])\n return arr_unique\n\n\n#\ndef get_place_in_table(x, y, rows, cols):\n \"\"\"\n Get row and column of text item in the xls table\n :param x: X Position of element\n :param y: Y Position of element\n :param rows: Array of rows\n :param cols: Array of cols\n :return: Row and Column of text item in the xls table\n \"\"\"\n return bisect_left(rows, y) - 1, bisect_left(cols, x) - 1\n\n\ndef pdf2xls(pdf_file, xls_file):\n \"\"\"\n Converts pdf tables to xls sheets\n :param pdf_file: Pdf file from which info will be extracted\n :param xls_file: Xls file generated\n :return: True/False depending on whether the process was successful\n \"\"\"\n book = Workbook(encoding='utf-8')\n\n fp = open(pdf_file, 'rb')\n parser = PDFParser(fp)\n document = PDFDocument(parser)\n document.initialize(\"\")\n if not document.is_extractable:\n return False\n\n resource_manager = PDFResourceManager()\n device = PDFDevice(resource_manager)\n la_params = LAParams()\n # Create a PDF page aggregator object\n device = PDFPageAggregator(resource_manager, laparams=la_params)\n interpreter = PDFPageInterpreter(resource_manager, device)\n page_num = 0\n for page in PDFPage.create_pages(document):\n page_num += 1\n interpreter.process_page(page)\n # Receive the LTPage object for the page.\n layout = device.get_result()\n\n text_boxes = []\n table_cols = []\n table_rows = []\n\n # *_max are the limits of the table\n x_max = y_max = 0\n x_min = y_min = float(\"inf\")\n for i in layout:\n if isinstance(i, LTTextBoxHorizontal):\n text_boxes.append(i)\n if isinstance(i, LTRect):\n x0, y0, x1, y1 = i.bbox\n insert_sorted(table_cols, x0)\n insert_sorted(table_cols, x1)\n insert_sorted(table_rows, y0)\n insert_sorted(table_rows, y1)\n x_min = min(x_min, x0)\n y_min = min(y_min, y0)\n x_max = max(x_max, x1)\n y_max = max(y_max, y1)\n\n # Table_cols_unique are the lines that create the columns of the table\n # Same for table_rows_unique\n table_cols_unique = unique_lines(table_cols)\n table_rows_unique = unique_lines(table_rows)\n num_rows = len(table_rows_unique) - 1\n num_cols = len(table_cols_unique) - 1\n\n table = [[\"\"]*num_cols for _ in range(num_rows)]\n for text_box in text_boxes:\n x0, y0, x1, y1 = text_box.bbox\n if x0 > x_min and x1 < x_max and y0 > y_min and y1 < y_max:\n for text_line in text_box:\n x0, y0, x1, y1 = text_line.bbox\n cx = (x0 + x1)/2\n cy = (y0 + y1)/2\n row, col1 = get_place_in_table((cx + x0)/2, cy, table_rows_unique, table_cols_unique)\n _, col2 = get_place_in_table((cx + x1)/2, cy, table_rows_unique, table_cols_unique)\n\n if col1 != col2:\n prev_col = None\n breaking_index = 0\n for char in text_line:\n if isinstance(char, LTChar):\n x0, y0, x1, y1 = char.bbox\n _, col = get_place_in_table((x0 + x1)/2, (y0 + y1)/2,\n table_rows_unique, table_cols_unique)\n if prev_col is not None and col != prev_col:\n break\n prev_col = col\n breaking_index += 1\n string_1 = text_line.get_text()[0:breaking_index]\n string_2 = text_line.get_text()[breaking_index:]\n table[row][col1] += string_1.strip() + \"\\n\"\n table[row][col2] += string_2.strip() + \"\\n\"\n else:\n table[row][col1] += text_line.get_text().strip() + \"\\n\"\n\n table.reverse()\n sheet = book.add_sheet(\"page \" + str(page_num))\n for row in range(num_rows):\n for col in range(num_cols):\n sheet.col(col).width = 2000 if col == 0 else 10000\n style = \"font: name Arial, bold True;\" if col == 0 or row == 0 else \"\"\n style += \"font: height 160;\" \\\n \"borders: left thin, right thin, top thin, bottom thin;\" \\\n \"alignment: wrap True;\"\n sheet.write(row, col, table[row][col].strip(), easyxf(style))\n\n book.save(xls_file)\n book.save(TemporaryFile())\n\n return True\n\n\ndef __main__():\n pdf_file_path = path.join(\"..\", \"input\", \"file.pdf\")\n xls_file_path = path.join(\"..\", \"output\", \"courses.xls\")\n pdf2xls(pdf_file_path, xls_file_path)\n\nif __name__ == \"__main__\":\n __main__()","sub_path":"src/pdf2xls.py","file_name":"pdf2xls.py","file_ext":"py","file_size_in_byte":6160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"187369191","text":"#!/usr/bin/env python\n\n'''make a run script for each chr/gene subset and output a qsub file'''\n\nqsubfile = open('qsub.txt','w')\nprescript = 'FHS_runscript_LDpruned_gene_subset'\n\nfor i in [10]:\n nk = str(i)\n for j in range(1,18): #check which chromosomes didn't finish\n c = str(j)\n for k in range(1,5):\n sub = str(k)\n outfilename = '002_run_' + prescript + '_' + c + '_' + nk + '_' + sub + '.sh'\n outfile = open(outfilename,'w')\n output = '''#!/bin/bash\n#PBS -N sub_''' + c + '_' + sub +'''\\n#PBS -S /bin/bash\n#PBS -l walltime=240:00:00\n#PBS -l nodes=1:ppn=1\n#PBS -l mem=32gb\n#PBS -e ../joblogs/${PBS_JOBNAME}.err\n#PBS -o ../joblogs/${PBS_JOBNAME}.out\ncd $PBS_O_WORKDIR\n\nmodule load gcc/6.2.0\nmodule load R/3.5.0\n\ntime R --no-save < ''' + prescript + '.R --args ' + nk + ' ' + c + ' ' + sub + '\\n'\n outfile.write(output)\n qsubfile.write('qsub ' + outfilename + '\\nsleep 3\\n')\n\n\n","sub_path":"model_training/scripts/make_FHS_runscripts_gene_subset.py","file_name":"make_FHS_runscripts_gene_subset.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"252046033","text":"from flask import Flask, jsonify, request\nimport random\n\napp = Flask('stalker')\n\n\n@app.route(\"/lucky-lotto\")\ndef lucky_lotto():\n number = random.randint(0, 99)\n return jsonify({\"number\": number})\n\n\n@app.route(\"/yes-no\")\ndef yes_no():\n question = request.args.get('question')\n answer = 'yes' if hash(question) % 2 else 'no'\n return jsonify({\"answer\": answer})\n\n\n@app.route('/versus', methods=['POST'])\ndef versus():\n player1 = request.json['player1']\n player2 = request.json['player2']\n score1 = hash(player1)\n score2 = hash(player2)\n winner, loser = (player1, player2) if score1 > score2 else (player2, player1)\n return jsonify({\n \"winner\": {\"name\": winner, \"score\": hash(winner)},\n \"loser\": {\"name\": loser, \"score\": hash(loser)},\n })\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"stalker-flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"93580184","text":"\n'''\n在utunbu系统之的所有测试\n\n客户端与服务器建立连接中的所有可能出现的错误\n1.客户端建立连接时候出错\n - 服务器可能没有启动\n ConnectionRefusedError\n2.客户端建立好后,发送信息错误\n - 服务器关闭了socket连接后,客户端发送数据\n BrokenPipeError\n3.客户单建立好后,服务器关闭连接,客户端接收数据\n - 客户端一直recv,能够一直接收b''\n\n\n4.客户端关闭了连接,但是服务器接着发送数据\n - 服务器正常一次,第二次会出现BrokenPipeError的错误\n\n'''\nimport pickle\nimport random\nimport socket\nimport socketserver\nimport time\nimport traceback\n\nimport redis\n#最大连接时间\none_time_transport_data = 1024\n#redis池\nredis_pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0)\nredis_list = []\nfor i in range(1,50):\n redis_list.append(redis.Redis(connection_pool=redis_pool))\n\n\nclass Myserver(socketserver.BaseRequestHandler):\n def exit_client_handle(self,taskparams,unique_client_mark,block_wait_queue):\n '''\n 作用:客户端关闭(客户端队列不能够使用了),处理这个任务\n '''\n print('客户端异常了(有可能是关闭了),进入异常处理函数....')\n try:\n self.request.send(b'FREE')\n except:\n pass\n self.remove_client(unique_client_mark)\n # 有可能是redis超时取出的数据\n if taskparams==None:\n pass\n else:\n result_queue = taskparams.get('result_queue')\n result_data = pickle.dumps({'status':'client_close','response':taskparams.get('params')})\n self.push_finish_task(result_queue,result_data)\n\n #必须先取出队列中的所有任务,再循环将任务标记为client_close\n popinfoList = []\n while True:\n popinfo = self.r.blpop(block_wait_queue, 1)\n if popinfo == None:\n break\n else:\n popinfoList.append(popinfo)\n\n for popinfo in popinfoList:\n taskparams = pickle.loads(popinfo[1])\n result_queue = taskparams.get('result_queue')\n result_data = pickle.dumps({'status': 'client_close', 'responseInfo': taskparams.get('params')})\n self.push_finish_task(result_queue, result_data)\n\n def judge_is_exit_connect(self,conn):\n '''\n 作用:\n 这个函数判断一个连接是否被异常断开,\n 判断方式:\n 在发送数据之前 调用一个recv\n 注:\n 不能在接收数据之前调用\n\n :return:True退出了 False正常连接\n '''\n try:\n conn.settimeout(1)\n data = conn.recv(1024)\n if not data:\n return True\n except socket.timeout:\n return False\n except ConnectionResetError:\n return True\n except BrokenPipeError:\n return True\n except:\n print('判断是否退出的时候出现其他错误!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n traceback.print_exc()\n return True\n finally:\n conn.settimeout(120)\n\n def setup(self):\n self.r = random.choice(redis_list)\n\n def add_client(self,lft_username,lft_city):\n '''\n client+lft_username+lft_city + 1\n :param lft_username:客户端\n :param lft_city: 客户端\n :return:客户端唯一标示,阻塞等待队列\n '''\n unique_client_mark = 'client_'+lft_city+'_'+lft_username\n cnt = self.r.incr(unique_client_mark)\n print('{} 增加一个客户端->{},现在数量->{}'.format(time.strftime(\"%H:%M:%S\", time.localtime()), unique_client_mark, cnt))\n return unique_client_mark,unique_client_mark.replace('client_','')\n\n def remove_client(self,unique_client_mark):\n '''\n 将这个客户端从\"ip池\"中移除\n :return:\n '''\n #增加一个下线时间\n self.r.set('lasttime_'+unique_client_mark, int(time.time()), 86400*2)\n cnt = self.r.decr(unique_client_mark)\n if cnt<=0:\n self.r.delete(unique_client_mark)\n print('{} 减少一个客户端->{},现在数量->{}'.format(time.strftime(\"%H:%M:%S\", time.localtime()), unique_client_mark,cnt))\n\n\n \n def push_finish_task(self,result_queue,result_data):\n '''\n\n :param result_queue:推送的队列\n :param data: pickle后的字节\n :return:\n '''\n\n self.r.lpush(result_queue,result_data)\n\n def handle(self):\n '''\n 流程:\n 1.接受客户端的连接\n 2,接受来自客户端的第一次传输数据(客户端的标示),,为客户端创建 unique_client_mark,并循环等待block_wart_queue_name队列\n - 接受的是字节 字节pickle.loads()以后能够得到clientinfo -> {lft_username,lft_city}\n 3.接受来自handle_requests(监听redis队列)推送的数据\n - 接受的是字节 字节pickle.loads()以后能够得到 ->taskparams {\"result_queue\":\"str完成任务后推送的队列\",\"params\":\"dict推送给client的参数\"}\n 4.推送数据到客户端\n - 推送的是字节 pickle.dumps(params)\n 5.接受来自client的数据\n - 接受的是字节 字节pickle.loads()以后是requests请求的结果\n - 或者是字符串(失败原因)\n 6.完成任务推送到 result_queue\n - 推送的是字节,返回success也不一定一定成功(有可能response是字符串,标示错误信息) pickle.dumps({\"status\":\"success\",\"response\":response} {\"status\":\"client_close\",\"response\":params(将参数传回去))\n 7.充当中间层,不修改任何数据,只负责转发\n 8.如果有代理挂了,能够更新代理池 (将client_xx_xx 值 -1)\n\n 9只要是pickle.load()后的数据 都是可以用的(没有什么字节数据)\n\n :return:\n '''\n\n # step 1 建立链接\n conn = self.request\n\n #step 2 接受客户端信息,唯一标示客户端,并阻塞等待队列\n clientinfo_zijie = conn.recv(one_time_transport_data)\n clientinfo = pickle.loads(clientinfo_zijie)\n\n\n if (not clientinfo) or (not isinstance(clientinfo,dict)):\n print('记录下这个客户端,建立了链接,第一次没有发送客户端信息,或者客户端信息不是字典类型')\n #return表示断开链接,客户单会自动重连\n return\n lft_username = clientinfo.get('lft_username')\n lft_city = clientinfo.get('lft_city')\n if lft_username==None or lft_city==None:\n print('记录下这个客户端,建立了链接,发送过来的信息没有 lft_username和lft_city')\n return\n try:\n (unique_client_mark,block_wart_queue) = self.add_client(lft_username, lft_city)\n conn_start_time = time.time()\n while True:\n print('开始阻塞({})等待...'.format(block_wart_queue))\n popinfo = self.r.blpop(block_wart_queue,30) #等待30秒\n print(time.time(),'pop出信息')\n if popinfo == None:\n taskparams = None\n else:\n taskparams = pickle.loads(popinfo[1])\n\n #判断客户端是否主动关闭了链接\n if self.judge_is_exit_connect(conn)==True:\n #将取出的任务丢回去,并处理这个关闭的链接\n return self.exit_client_handle(taskparams,unique_client_mark,block_wart_queue)\n\n if taskparams == None: #队列中没有数据\n if time.time()-conn_start_time>60*15*1:#一个链接最大时间15分钟\n print('{}:发送free,结束'.format(time.asctime(time.localtime(time.time()))))\n conn.send(b'FREE')\n break\n else:\n print('{}:继续等待'.format(time.asctime(time.localtime(time.time()))))\n continue\n else:#队列中有数据\n params_zijie = pickle.dumps(taskparams.get('params'))\n print('发送数据给客户端')\n conn.sendall(params_zijie+b'endendend')\n recv_from_server = b''\n # 是否正常结束\n is_normal_end = True\n print('等待客户端的数据...')\n\n while True:\n #循环接受来自客户端的信息\n try:\n data = conn.recv(one_time_transport_data)\n except:\n print('接收数据的时候出错!!!!!!')\n traceback.print_exc()\n is_normal_end = False\n break\n\n if not data:\n print('客户端异常结束,自动接受空值')\n is_normal_end = False\n break\n else:\n recv_from_server += data\n\n if recv_from_server.endswith(b'endendend'):\n print('完成一次传输!!!')\n recv_from_server = recv_from_server[:-20] + recv_from_server[-20:].replace(b'endendend',b'')\n break\n\n if is_normal_end:\n response = pickle.loads(recv_from_server)\n result_queue = taskparams.get('result_queue')\n result_data = pickle.dumps({\n 'status':'success',\n 'response':response,\n })\n self.push_finish_task(result_queue,result_data)\n else:\n return self.exit_client_handle(taskparams,unique_client_mark,block_wart_queue)\n print('--' * 20)\n #服务器队列没任务,正常关闭\n self.remove_client(unique_client_mark)\n except:\n #一般来说 ,不会出错, 出错的情况上面都处理了\n traceback.print_exc()\n self.remove_client(unique_client_mark)\n\n def finish(self):\n pass\n\nif __name__ == '__main__':\n server = socketserver.ThreadingTCPServer(\n ('',90),Myserver\n )\n server.serve_forever()","sub_path":"socketServerClient/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":10603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"198598313","text":"import numpy\r\nimport random\r\nprint(\"\\n\\n *****welcome to the snakes and ladders game*****\")\r\nl1=[]\r\ni=1\r\nwhile(i<=100):\r\n l1.append(i)\r\n #print(i,end=\" \")\r\n i=i+1\r\n if(i%10==0):\r\n l1.append(i)\r\n #print(i,end=\" \")\r\n for j in range(i+10,i,-1):\r\n #print(j,end=\" \")\r\n l1.append(j)\r\n i=i+11\r\n\r\nol=[]\r\nm=0\r\nn=10\r\nfor i in range(0,10):\r\n ol.append(l1[m:n])\r\n m=m+10\r\n n=n+10\r\n\r\n\r\nhg=ol\r\nol.reverse()\r\nol=numpy.array(ol)\r\n#print(\"\\n\",ol)\r\n#print(ol)\r\n\r\ndef movep1(p1,rv):\r\n p1=p1+rv\r\n #print(\"\\nplayer\",i,\"at\",p1)\r\n return p1\r\ndef movep2(p2,rv):\r\n p2=p2+rv\r\n #print(\"\\nplayer\",i,\"at\",p2)\r\n return p2\r\ndef movep3(p3,rv):\r\n p3=p3+rv\r\n #print(\"\\nplayer\",i,\"at\",p3)\r\n return p3\r\nlad=[\"2 to 19\",\"3 to 43\",\"5 to 28 \",\"7 to 34\",\"26 to 75\", \" 44 to 84 \", \"39 to 99\", \"65 to 96\"]\r\ndef ladder(d1):\r\n if(d1==2):\r\n print(\"\\nPLAYER AT 2\")\r\n print(\"he he ladder............ from 2 to 19\")\r\n return 19\r\n if(d1==3):\r\n print(\"\\nPLAYER AT 3\")\r\n print(\"he he ladder............ladder from 3 to 43\")\r\n return 43\r\n## if(d1==5):\r\n## print(\"\\nPLAYER AT 5\")\r\n## print(\"he he ladder............ladder from 5 to 77\")\r\n## return 77\r\n if(d1==7):\r\n print(\"\\nPLAYER AT 7\")\r\n print(\"he he ladder............ladder from 7 to 34\")\r\n return 34\r\n## if(d1==8):\r\n## print(\"\\nPLAYER AT 8\")\r\n## print(\"he he ladder............ladder from 8 to 28\")\r\n## return 28\r\n if(d1==26):\r\n print(\"\\nPLAYER AT 26\")\r\n print(\"he he ladder............ladder from 26 to 75\")\r\n return 75\r\n \r\n if(d1==39):\r\n print(\"\\nPLAYER AT 39\")\r\n print(\"he he ladder............ladder from 39 to 98\")\r\n return 98\r\n if(d1==44):\r\n print(\"\\nPLAYER AT 44\")\r\n print(\"he he ladder............ladder from 44 to 84\")\r\n return 84\r\n if(d1==65):\r\n print(\"\\nPLAYER AT 65\")\r\n print(\"he he ladder............ladder from 65 to 96\")\r\n return 96\r\n else:\r\n return d1\r\nsnk=[\"99 to 2\",\"91 to 10\",\"76 to 26\",\"46 to 15\",\"72 to 2\",\"37 to 14\",\"80 to \",\"80 to 60\"]\r\ndef snakes(d1):\r\n if(d1==99):\r\n print(\"\\nPLAYER AT 99\")\r\n print(\"oopsss snake bites you............ from 99 to 2\")\r\n return 2\r\n if(d1==91):\r\n print(\"\\nPLAYER AT 91\")\r\n print(\"oopsss snake bites you............ from 91 to 10\")\r\n return 10\r\n if(d1==76):\r\n print(\"\\nPLAYER AT 76\")\r\n print(\"oopsss snake bites you............ from 76 to 26\")\r\n return 26\r\n if(d1==46):\r\n print(\"\\nPLAYER AT 46\")\r\n print(\"oopsss snake bites you............ from 46 to 15\")\r\n return 15\r\n if(d1==72):\r\n print(\"\\nPLAYER AT 72\")\r\n print(\"oopsss snake bites you............ from 72 to 30\")\r\n return 2\r\n if(d1==37):\r\n print(\"\\nPLAYER AT 37\")\r\n print(\"oopsss snake bites you............ from 37 to 14\")\r\n return 14\r\n if(d1==80):\r\n print(\"\\nPLAYER AT 37\")\r\n print(\"oopsss snake bites you............ from 80 to 60\")\r\n return 60\r\n else:\r\n return d1\r\np1=1\r\np2=1\r\np3=1\r\nd1=1\r\nd2=1\r\nd3=1\r\ni1=int(input(\"\\nPlayers count:\"))\r\nwhile((p1<100 and p2<100 and p3<100)):\r\n for i in range(1,i1+1): \r\n if i==1:\r\n print(\"\\n\\n ***********{} PLATYER TURN**********\\n\\nROLL YOUR DIE,(Press enter):\".format(i),end=\"\")\r\n input()\r\n rv=random.randint(1,6)\r\n print(\"your value is:\",rv)\r\n d1=movep1(p1,rv)\r\n print(\"\\n\")\r\n d1=ladder(d1)\r\n d1=snakes(d1)\r\n p1=d1\r\n #print(\"\\n\\n\",\"*********\"*9,sep='')\r\n print(\"\\n\",ol)\r\n print(\"\\nplayer\",i,\"at\",p1)\r\n print(\"\\n\",\"The ladders are:--\",lad)\r\n print(\"\\n\",\"The snakes are:---\",snk)\r\n print(\"*********\"*9)\r\n if(d1>=100):\r\n break\r\n if i==2:\r\n print(\"\\n\\n ///////////{} PLATYER TURN/////////////\\n\\nROLL YOUR DIE,(Press enter):\".format(i),end=\"\")\r\n input()\r\n rv=random.randint(1,6)\r\n print(\"your value is:\",rv)\r\n d2=movep2(p2,rv)\r\n d2=ladder(d2)\r\n d2=snakes(d2)\r\n p2=d2\r\n #print(\"\\n\\n\",\"*********\"*9,sep='')\r\n print(\"\\n\",ol)\r\n print(\"\\nplayer\",i,\"at\",p2)\r\n print(\"\\n\",\"The ladders are:--\",lad)\r\n print(\"\\n\",\"The snakes are:---\",snk)\r\n print(\"*********\"*9)\r\n if(d2>=100):\r\n break\r\n if i==3:\r\n print(\"\\n\\n &&&&&&&&&&&{} PLATYER TURN&&&&&&&&&&&&&&\\n\\nROLL YOUR DIE,(Press enter):\".format(i),end=\"\")\r\n input()\r\n rv=random.randint(1,6)\r\n print(\"your value is:\",rv)\r\n d3=movep3(p3,rv)\r\n d3=ladder(d3)\r\n d3=snakes(d3)\r\n p3=d3\r\n #print(\"\\n\\n\",\"*********\"*9,sep='')\r\n print(\"\\n\",ol)\r\n print(\"\\nplayer\",i,\"at\",p3)\r\n print(\"\\n\",\"The ladders are:--\",lad)\r\n print(\"\\n\",\"The snakes are:---\",snk)\r\n print(\"*********\"*9)\r\n if(d3>=100):\r\n break\r\n #print(\"player\",i,\"at\",p1)\r\nif(p1>p2 and p1>p3):\r\n## print(\"\\U0001f600\")\r\n print(\"......>>>>>>>>Player 1 is the winner>>>>>>>>>>>........\")\r\nif(p2>p1 and p2>p3):\r\n print(\"......>>>>>>>>Player 2 is the winner>>>>>>>>>>>........\")\r\nif(p3>p2 and p3>p1):\r\n print(\"......>>>>>>>>player 3 is the winner>>>>>>>>>>>........\")\r\n","sub_path":"trysnakesl.py","file_name":"trysnakesl.py","file_ext":"py","file_size_in_byte":5669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"443027924","text":"import sys\nimport numpy as np\nimport scipy\n\nimport time\n\n####################################################################\n\ndef function_H(X, W):\n\n\tarr = np.exp(np.dot(X, W))\n\n\tarr_sum = np.sum(arr, axis=1)\n\tarr = arr/arr_sum.reshape((arr_sum.shape[0],1))\n\n\treturn (arr)\n\ndef function_L(W, X, Y):\n\n\tarr = np.multiply( Y, np.log(function_H(X,W)) ) \n\tsum = np.sum(arr)\n\n\treturn (sum/X.shape[0])\n\n####################################################################\n\ntrain_data_raw = open(sys.argv[1], 'rt')\ntrain_data = np.loadtxt(train_data_raw, dtype = 'str', delimiter=\",\")\n\nfeature_labels = ['usual', 'pretentious', 'great_pret', \n'proper', 'less_proper', 'improper', 'critical', 'very_crit', \n'complete', 'completed', 'incomplete', 'foster',\n'1', '2', '3', 'more',\n'convenient', 'less_conv', 'critical',\n'convenient', 'inconv',\n'nonprob', 'slightly_prob', 'problematic',\n'recommended', 'priority', 'not_recom']\n\nanswer_labels = ['not_recom', 'recommend', 'very_recom', 'priority', 'spec_prior']\n\nX = np.zeros((train_data.shape[0], 27))\nY = np.zeros((train_data.shape[0], 5))\n\nfor i in range (0, train_data.shape[0]):\n\n\tj = 0\n\tindex = 0\n\n\twhile (j < 8):\n\n\t\tif (train_data[i][j] == feature_labels[index]):\n\n\t\t\tX[i][index] = 1\n\t\t\tj = j+1\n\n\t\tindex = index+1\n\nfor i in range (0, train_data.shape[0]):\n\n\tfor j in range (0,5):\n\n\t\tif (train_data[i][8] == answer_labels[j]):\n\n\t\t\tY[i][j] = 1\n\nX1 = np.ones((train_data.shape[0],1))\nX_f = np.hstack((X1,X))\n\n####################################################################\n\nrate = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n\nbatch_size = [6000, 3000, 2000, 1000, 500, 100, 50, 10]\n\nmax_itr = 10000\n\nprint(\"Constant Learning Rate\")\nprint(\"__________________\")\n\n####################################################################\n\nfor r in (rate):\n\n\tfor b in (batch_size):\n\n\t\tW = np.zeros((28, 5))\n\n\t\titr = 0\n\n\t\tstart = time.time()\n\n\t\twhile(itr <= max_itr):\n\n\t\t\tbatches = int(train_data.shape[0]/b)\n\n\t\t\tfor i in range (0, batches):\n\n\t\t\t\tmini_X = X_f[i*b : (i+1)*b, :]\n\t\t\t\tmini_Y = Y[i*b : (i+1)*b, :]\n\n\t\t\t\tderivative = np.dot( mini_X.transpose() , (mini_Y - function_H(mini_X, W)) )\n\t\t\n\t\t\t\tW = W + np.multiply(derivative, r/b) \n\t\t\n\t\t\titr = itr+1\n\n\t\tprint(r)\n\t\tprint(b)\n\t\tprint(time.time() - start)\n\t\tprint(function_L(W, X_f, Y))\n\t\tprint(\"__________________\")\n\n\n####################################################################\n\n","sub_path":"c1.py","file_name":"c1.py","file_ext":"py","file_size_in_byte":2386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"466726692","text":"# @copyright@\n# Copyright (c) 2006 - 2018 Teradata\n# All rights reserved. Stacki(r) v5.x stacki.com\n# https://github.com/Teradata/stacki/blob/master/LICENSE.txt\n# @copyright@\n#\n# @rocks@\n# Copyright (c) 2000 - 2010 The Regents of the University of California\n# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org\n# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt\n# @rocks@\n\nimport stack.commands\nfrom stack.exception import CommandError\n\n\nclass Command(stack.commands.list.appliance.command):\n\n\t\"\"\"\n\tLists the XML profile for a given appliance type. This is useful\n\tfor high level debugging but will be missing any host specific\n\tvariables. It cannot be used to pass into 'rocks list host profile'\n\tto create a complete Kickstart/Jumpstart profile.\n\t\n\t\n\tOptional list of appliance names.\n\t\n\t\t\n\t\n\tLists the XML profile for a backend appliance.\n\t\n\n\t\n\tLists the XML profile for all appliance types.\n\t\n\t\"\"\"\n\n\tdef run(self, params, args):\n\n\t\tself.beginOutput()\n\t\tfor app in self.getApplianceNames(args):\n\t\t\tself.db.execute(\"\"\"select name from appliances\n\t\t\t\twhere name='%s'\"\"\" % app)\n\t\t\ttry:\n\t\t\t\t(name, ) = self.db.fetchone()\n\t\t\texcept TypeError:\n\t\t\t\traise CommandError(self, 'no such appliance \"%s\"' % app)\n\t\t\tif name:\n\t\t\t\txml = self.command('list.node.xml', [name])\n\t\t\t\tfor line in xml.split('\\n'):\n\t\t\t\t\tself.addOutput(app, line)\n\t\tself.endOutput(padChar='')\n\n","sub_path":"common/src/stack/command/stack/commands/list/appliance/xml/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"517868769","text":"import cv2 \r\nimport numpy as np\r\nfrom pyzbar.pyzbar import decode\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nwhile True:\r\n success,img = cap.read()\r\n for code in decode(img):\r\n data = code.data.decode('utf-8')\r\n print(data)\r\n pts = np.array([code.polygon],np.int32).reshape((-1,1,2))\r\n cv2.polylines(img,[pts],True,(0,255,0),5)\r\n cv2.putText(img,data,(code.rect[0],code.rect[1]),cv2.FONT_HERSHEY_PLAIN,0.9,(0,255,0),2)\r\n \r\n cv2.imshow('Image',img)\r\n if cv2.waitKey(1) & 0xff==ord('q'):\r\n break","sub_path":"QRCode & BarCode Detection/qrcode_and_barcode_detection.py","file_name":"qrcode_and_barcode_detection.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"313615590","text":"class Coche():\n\n\tdef __init__(self):\n\t\tself.__ancho= 24\n\t\tself.__largo= 48\n\t\tself.__estado = False\n\n\tdef enceder(self,arrancar):\n\t\tself.__estado = arrancar\n\t\testadoChequeo = self.__chequeo()\n\t\tif (self.__estado and estadoChequeo):\n\t\t\treturn \"Arrancando\"\n\t\telse:\n\t\t\treturn \"Parado\"\n\tdef __chequeo(self):\n\t\tself.motor=\"ok\"\n\t\tself.puertas=\"ok\"\n\t\tprint(\"Realizando la inspeccion\")\n\n\t\tif(self.puertas == \"ok\" and self.motor==\"ok\"):\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False \n\nmiCoche = Coche()\nprint(miCoche.enceder(True))\n\n","sub_path":"oop/tres.py","file_name":"tres.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"594791464","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\nfrom scipy.spatial.distance import cdist\nfrom skimage.morphology import binary_erosion\nimport cv2\nfrom scipy.spatial import cKDTree\nfrom collections import defaultdict\nfrom scipy import ndimage\nfrom operator import add\nimport argparse\n\n\nclass NeighbourFinder():\n\n def __init__(self):\n pass\n\n def _get_z_projection(self, shape_id, abs_df):\n proj = np.unique(abs_df.loc[abs_df['id'] == shape_id, ['x', 'y']].values.astype('int16'), axis=0)\n return proj\n\n def run(self, tolerance_xy, tolerance_z, absolute_df, dimensions_df):\n\n self.tolerance_xy = tolerance_xy\n self.tolerance_z = tolerance_z\n self.dimensions_df = dimensions_df\n self.absolute_df = absolute_df\n\n dist_dict = self.get_candidate_neighbors_dict(dimensions_df, absolute_df, tolerance_xy, tolerance_z)\n dist_dict = self.filter_distant_neighbors(dist_dict, tolerance_xy, tolerance_z)\n\n dist_df = pd.DataFrame(columns=['shape_id_1', 'shape_id_2', 'center_dist_xy',\n 'center_dist_t', 'center_of_mass_dist_xy', 'center_of_mass_dist_t'])\n\n for shape_id, neighbor_dict in tqdm(dist_dict.items()):\n shape1 = shape_id\n for shape2 in neighbor_dict.keys():\n center_dist_xy, center_dist_z = self.calculate_euc_dists(dimensions_df, shape1, shape2)\n\n com_dist_xy, com_dist_t = self.calculate_com_dists(dimensions_df, absolute_df, shape1, shape2)\n\n row = {\n 'shape_id_1': shape1,\n 'shape_id_2': shape2,\n 'center_dist_xy': center_dist_xy,\n 'center_dist_t': center_dist_z,\n 'center_of_mass_dist_xy': com_dist_xy,\n 'center_of_mass_dist_t': com_dist_t\n }\n\n dist_df = dist_df.append(row, ignore_index=True)\n dist_df = dist_df.astype('int')\n return dist_df\n\n def get_tolerance_bounding_box(self, shape, tolerance_xy, tolerance_z):\n\n xl = shape['x_min'].values[0] - tolerance_xy\n yl = shape['y_min'].values[0] - tolerance_xy\n zl = shape['z_min'].values[0] - tolerance_z\n\n xu = shape['x_max'].values[0] + tolerance_xy\n yu = shape['y_max'].values[0] + tolerance_xy\n zu = shape['z_max'].values[0] + tolerance_z\n\n return (xl, xu), (yl, yu), (zl, zu)\n\n def get_neighbor_shapes(self, shape, tolerance_xy, tolerance_z):\n\n xb, yb, zb = self.get_tolerance_bounding_box(shape, tolerance_xy, tolerance_z)\n\n neighbor_shapes = self.dimensions_df.loc[(self.dimensions_df['x_min'].between(\n *xb)) | (self.dimensions_df['x_max'].between(*xb))]\n neighbor_shapes = neighbor_shapes.loc[(neighbor_shapes['y_min'].between(*yb))\n | (neighbor_shapes['y_max'].between(*yb))]\n neighbor_shapes = neighbor_shapes.loc[(neighbor_shapes['z_min'].between(*zb))\n | (neighbor_shapes['z_max'].between(*zb))]\n\n return neighbor_shapes\n\n def get_candidate_neighbors_dict(self, ddf, adf, tolerance_xy, tolerance_z):\n\n min_dist_dict = defaultdict(dict)\n\n ids = np.unique(ddf.id.values)\n\n for i in tqdm(ids):\n\n shape = ddf.loc[ddf['id'] == i]\n\n candidate_neighbors = self.get_neighbor_shapes(shape, tolerance_xy, tolerance_z)\n\n candidate_ids = np.unique(candidate_neighbors.id.values)\n\n for j in candidate_ids:\n if i != j:\n shape1 = adf.loc[adf['id'] == i]\n shape2 = adf.loc[adf['id'] == j]\n shape1 = shape1[['x', 'y', 'z']]\n shape2 = shape2[['x', 'y', 'z']]\n # border1 = get_border_inds(shape1)\n # border2 = get_border_inds(shape2)\n\n shape1_xy = shape1[['x', 'y']]\n shape2_xy = shape2[['x', 'y']]\n\n shape1_z = np.unique(shape1[['z']].values)\n shape2_z = np.unique(shape2[['z']].values)\n\n shape1_z = np.expand_dims(shape1_z, 1)\n shape2_z = np.expand_dims(shape2_z, 1)\n\n min_dists_xy, min_dist_idx_xy = cKDTree(shape1_xy).query(shape2_xy, 1)\n min_dists_z, min_dist_idx_z = cKDTree(shape1_z).query(shape2_z, 1)\n # min_dists, min_dist_idx = cKDTree(border1).query(border2, 1)\n #min_dists = (min_dists_xy.min(), min_dists_z.min())\n\n min_dist_dict[i][j] = (min_dists_xy.min(), min_dists_z.min())\n min_dist_dict[j][i] = (min_dists_xy.min(), min_dists_z.min())\n return min_dist_dict\n\n def filter_distant_neighbors(self, dist_dict, tolerance_xy, tolerance_z):\n for shape_id in dist_dict.keys():\n candidate_neighbor_dict = dist_dict[shape_id]\n filtered_candidate_dict = dict(\n filter(\n lambda y: y[1][0] < tolerance_xy and y[1][1] < tolerance_z,\n candidate_neighbor_dict.items()))\n dist_dict[shape_id] = filtered_candidate_dict\n return dist_dict\n\n def calculate_euc_dists(self, ddf, shape1, shape2):\n xy_axis1 = ddf.loc[ddf.id == shape1, ['center_y', 'center_x']].values\n xy_axis2 = ddf.loc[ddf.id == shape2, ['center_y', 'center_x']].values\n center_dist_xy = cdist(xy_axis1, xy_axis2)[0][0]\n\n z_axis1 = ddf.loc[ddf.id == shape1, ['center_z']].values\n z_axis1 = np.vstack([z_axis1, np.zeros((z_axis1.shape[0]))]).T\n z_axis2 = ddf.loc[ddf.id == shape2, ['center_z']].values\n z_axis2 = np.vstack([z_axis2, np.zeros((z_axis2.shape[0]))]).T\n\n center_dist_z = cdist(z_axis1, z_axis2)[0][0]\n\n return center_dist_xy, center_dist_z\n\n def calculate_com_dists(self, ddf, adf, shape1_id, shape2_id):\n\n shape1 = adf.loc[adf['id'] == shape1_id]\n shape2 = adf.loc[adf['id'] == shape2_id]\n shapes = [shape1, shape2]\n\n shapes = list(map(lambda df: df[['x', 'y', 'z']], shapes))\n\n offsets = []\n\n coms = []\n\n for shape in shapes:\n offsets.append([shape.x.min(), shape.y.min(), shape.z.min()])\n shape.x = shape.x - shape.x.min()\n shape.y = shape.y - shape.y.min()\n shape.z = shape.z - shape.z.min()\n\n indices = shape.values\n\n shape_np = np.zeros(((indices[:, 0].max() + 1, indices[:, 1].max() + 1, indices[:, 2].max() + 1)))\n\n for i in range(indices.shape[0]):\n shape_np[indices[i, 0], indices[i, 1], indices[i, 2]] = 1\n\n # print(np.unique(shape_np))\n com = ndimage.measurements.center_of_mass(shape_np)\n # print(com)\n com = list(map(lambda x: int(x), com))\n coms.append(com)\n\n coms_offset = []\n\n for com, offset in zip(coms, offsets):\n coms_offset.append(list(map(add, com, offset)))\n\n xy1, xy2 = coms_offset[0][:2], coms_offset[1][:2]\n xy1, xy2 = np.array(xy1), np.array(xy2)\n xy1, xy2 = np.expand_dims(xy1, -1).T, np.expand_dims(xy2, -1).T\n z1, z2 = coms_offset[0][2], coms_offset[1][2]\n\n com_dist_z = abs(z1-z2)\n com_dist_xy = cdist(xy1, xy2)[0][0]\n\n return com_dist_xy, com_dist_z\n\n def generate_df(self, dist_dict):\n dist_df = pd.DataFrame(columns=['shape_id_1', 'shape_id_2', 'center_dist_xy',\n 'center_dist_t', 'center_of_mass_dist_xy', 'center_of_mass_dist_t'])\n\n for shape_id, neighbor_dict in tqdm(dist_dict.items()):\n shape1 = shape_id\n for shape2 in neighbor_dict.keys():\n center_dist_xy, center_dist_z = self.calculate_euc_dists(ddf, shape1, shape2)\n\n com_dist_xy, com_dist_z = self.calculate_com_dists(ddf, adf, shape1, shape2)\n\n row = {\n 'shape_id_1': shape1,\n 'shape_id_2': shape2,\n 'center_dist_xy': center_dist_xy,\n 'center_dist_t': center_dist_z,\n 'center_of_mass_dist_xy': com_dist_xy,\n 'center_of_mass_dist_t': com_dist_z\n }\n\n dist_df = dist_df.append(row, ignore_index=True)\n dist_df = dist_df.astype('int')\n return dist_df\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(prog='Segmenter')\n parser.add_argument('--directory', help='output_directory')\n parser.add_argument('--rootdir', type=str, default='/app/data', help='root directory of files')\n parser.add_argument('--tolerance_xy', help='output_directory')\n parser.add_argument('--tolerance_t', help='output_directory')\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n tolerance_xy = int(args.tolerance_xy)\n tolerance_z = int(args.tolerance_t)\n directory = args.directory\n root_dir = args.rootdir\n path = os.path.join(root_dir, directory)\n\n absolute_df = pd.read_hdf(os.path.join(path, 'segmentation_absolute.h5'))\n dims_df = pd.read_hdf(os.path.join(path, 'segmentation_dims.h5'))\n nfinder = NeighbourFinder()\n dict_df = nfinder.run(tolerance_xy, tolerance_z, absolute_df, dims_df)\n dict_df = dict_df.sort_values(by=['shape_id_1'])\n dict_df.to_csv(os.path.join(path, 'neighbors.csv'), index=False)\n\n ne2 = dict_df.groupby('shape_id_1').count().iloc[:, :1]\n ne3 = dict_df.groupby('shape_id_1').mean().iloc[:, -2:]\n\n neighbors_stat_df = pd.merge(ne2, ne3, right_index=True, left_index=True)\n neighbors_stat_df.index.Name = 'id'\n neighbors_stat_df.columns = ['n_neighbors', 'avg_xy_dist_center-of-mass', 'avg_t_interval_center-of-mass']\n neighbors_stat_df['id'] = neighbors_stat_df.index\n neighbors_stat_df = neighbors_stat_df[['id', 'n_neighbors',\n 'avg_xy_dist_center-of-mass', 'avg_t_interval_center-of-mass']]\n neighbors_stat_df.to_csv(os.path.join(path, 'neighbors_statistics.csv'), index=False)\n\n\ndef debug():\n args = parse_args()\n\n directory = 'Cont_AN_2_4'\n root_dir = r'C:\\Users\\Wojtek\\Documents\\Doktorat\\Astral\\data'\n path = os.path.join(root_dir, directory)\n\n absolute_df = pd.read_hdf(os.path.join(path, 'segmentation_absolute.h5'))\n dims_df = pd.read_hdf(os.path.join(path, 'segmentation_dims.h5'))\n\n nfinder = NeighbourFinder()\n dict_df = nfinder.run(50, 100, absolute_df, dims_df)\n dict_df = dict_df.sort_values(by=['shape_id_1'])\n dict_df.to_csv(os.path.join(path, 'neighbors.csv'), index=False)\n\n ne2 = dict_df.groupby('shape_id_1').count().iloc[:, :1]\n ne3 = dict_df.groupby('shape_id_1').mean().iloc[:, -2:]\n\n neighbors_stat_df = pd.merge(ne2, ne3, right_index=True, left_index=True)\n neighbors_stat_df.index.Name = 'id'\n neighbors_stat_df.columns = ['n_neighbors', 'avg_xy_dist_center-of-mass', 'avg_t_interval_center-of-mass']\n neighbors_stat_df['id'] = neighbors_stat_df.index\n neighbors_stat_df = neighbors_stat_df[['id', 'n_neighbors',\n 'avg_xy_dist_center-of-mass', 'avg_t_interval_center-of-mass']]\n neighbors_stat_df.to_csv(os.path.join(path, 'neighbors_statistics.csv'), index=False)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"astrowaves/tasks/NeighbourFinder.py","file_name":"NeighbourFinder.py","file_ext":"py","file_size_in_byte":11401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"388118089","text":"#v0.4\n#Author: Sean al-Baroudi\n#sean.al.baroudi@gmail.com\n#Next, I will write an outer loop function that skims a list of subreddits, and pumps out a file for each of them.\n#The files can be read in R, and put into tally tables to look at trends.\n\nimport praw\nimport re\nimport pymysql\nimport hashlib \n\n#Our target subreddit and appropriate limits\ntestSubredditList = [\"MachineLearning\" ,\"Physics\" , \"Science\", \"Biochemistry\", \"Biology\", \"Chemistry\", \"Nootropics\", \"Futurology\", \"Bitcoin\", \"datamining\", \"algorithms\"]\nwriteDirectory = \"./output/\"\nfetchLimit = 500\ncomRepLimit = 32\n\n#Filtering GV\npNounStats = {}\nbeginCutLimit = 2\nminWordLength = 3\nexceptionString = ' \\.,;\\[\\]\\(\\)'\n\n#Login Stuff\ncredRedditFile = \"~/Documents/Credentials/reddit.txt\"\ncredSQLFile = \"~/Documents/Credentials/mysql.txt\"\n\n#Here we do modular filtering\ndef clippunct(wordList):\n\t#print( \"Before Processing\", wordList)\n\tfor index in range(0, len(wordList)):\n\t\twordList[index] = re.sub(exceptionString, '', wordList[index])\n\treturn wordList\n\ndef pullwords(commentText):\n\t#split list on space\n\twordList = commentText.split(\" \")\n\t#Clip punctuation and other artifacts:\n\twordList = clippunct(wordList)\n\t#Dump first X words:\n\twordList = wordList[beginCutLimit:]\n\treturn wordList\n\n\ndef pullnouns(extractList):\n\t#In this simple case, we just get acronyms by finding capitals\n\tretList = []\n\tpattern = re.compile(\"[A-Z]{3,7}\")\n\tfor word in extractList:\n\t\thold = re.findall(pattern, word)\n\t\t#if (len(hold) != 0):\n\t\tfor anItem in hold:\n\t\t\tif len(anItem) <= 5:\n\t\t\t\tretList.append(anItem)\n\treturn retList\n\n#Next, lets try to get comment forests for one submission, and get a flattened list of them.\n#The output is piped to a shell script file container (instead of being handled in python).\ndef startredditsession():\n\t#As I am posting this code in GitHub, I need to store the credentials outside the Git Folder. Its not included\n\t#in the project at all.\n\tcredList = []\n\tcredFP = open(credRedditFile, \"r\")\n\tfor line in credFP:\n\t\thold = line.split(\"::\")\n\t\tcredList.append(hold[1])\n\tr = praw.Reddit(client_id=credList[0], \n\tclient_secret=credList[1], username=credList[2], \\\n\tpassword=credList[3], user_agent=credList[4])\n\tprint(r.user.me())\n\treturn r\n\ndef addtodict(extractList):\n\tfor noun in extractList:\n\t\tif noun in pNounStats:\n\t\t\tpNounStats[noun] = pNounStats[noun] + 1\n\t\telse:\n\t\t\tpNounStats[noun] = 1\n\ndef grabsubmissions(sR):\n\tfor aSub in sR.new(limit=fetchLimit):\n\t\taSub.comments.replace_more(limit=comRepLimit)\n\t\tfor comment in ((aSub.comments.list())): #[1] We don't care about CommentTrees structure; just pull nouns.\n\t\t\textractList = pullwords(comment.body)\n\t\t\textractList = pullnouns(extractList)\n\t\t\taddtodict(extractList)\n\treturn\n\n#The sorting and statistics are done in R; this just pumped to a file via commandline.\n\ndef printstatstofile(name,orderedTuples):\n\tfo = open(writeDirectory + name + \".txt\", \"w+\")\n\tfor tup in orderedTuples:\n\t\tfo.write(str(tup[1]) + \":\" + str(tup[2]) + \"\\n\")\n\tfo.close()\n\treturn\n\n\ndef pushtodatabase(wordDict, sR):\n\texistsQ = \"select exists(select * from \" + sR +\" where abbr=%s)\"\n\tinsertQ = \"insert into \" + sR + \" (abbr, count, hash) values(%s,%s,%s)\"\n\tupdateQ = \"update \" + sR + \" set count=%s, hash=%s where abbr=%s\"\n\torderQ = \"select * from \" + sR + \" order by count desc\"\n\n\t#Again, credentials are stored outside our little git folder.\n\tcredList = []\n\tcredFP = open(credSQLFile, \"r\")\n\tfor line in credFP:\n\t\thold = line.split(\"::\")\n\t\tcredList.append(hold[1])\n\tconn = pymysql.connect(host='localhost', port=3306, user=credList[0], passwd=credList[1], db=credList[2])\n\tprint(\"Connection status:\", str(conn))\n\tcurr = conn.cursor()\n\t#for every word in our dictionary:\n\ttry:\n\t\tfor key in list(wordDict.keys()):\n\t\t\tcurr.execute(existsQ,(key))\n\t\t\tresult = curr.fetchone()\n\t\t\tif (result[0] == 1): #we have a match; update\n\t\t\t\tcurr.execute(updateQ,(wordDict[key],hash(key),key))\n\t\t\t\tconn.commit()\n\t\t\telif (result[0] == 0): #add a new entry\n\t\t\t\tcurr.execute(insertQ,(key,wordDict[key],hash(key)))\n\t\t\t\tconn.commit()\n\t\t\telse: #error!\n\t\t\t\traise pymysql.Error(\"Unexpected row search result:\" + str(result[0]))\n\texcept pymysql.Error as e:\n\t\tprint('Got error {!r}, errno is {}'.format(e, e.args[0]))\n\t\n\tcurr.execute(orderQ)\n\torderedTuples = curr.fetchall()\n\tconn.close() #ByeBye\n\treturn orderedTuples\n\ndef printtuples(orderedTuples):\n\tfor tup in orderedTuples:\n\t\tprint( str(tup[1]) + \":\" + str(tup[2]) + \":\" + str(tup[3]) + \":\" )\n\n\nif __name__ == \"__main__\":\n\t#Start a new authenticated reddit request.\n\ttheSession = startredditsession()\n\n\t#Get a SubReddit Object from our list.\n\tfor item in testSubredditList:\n\t\tpNounStats = {} #reset this for every loop.\n\t\tsR = theSession.subreddit(item)\n\t\t#For every sumission in a subreddit, skim all comments and pull the Abbreviations.\n\t\tgrabsubmissions(sR)\n\t\t#our table was inited manually in the mysql console; we assume its already there.\n\t\t#Now we are going to push everything to a storage area: Database or File.\n\t\torderedTuples = pushtodatabase(pNounStats,item)\n\t\t#for every element in finalDict, hash it and write a table entry (id, abbr, count, hash)\n\t\t#The dictionary of keywords is assembled. We can just print it out. Lets do a simple print out line by line.\n\t\t#printstatstofile(item)\n\t\tprintstatstofile(item,orderedTuples)\n\t\tprint(\"Just finished subreddit:\" + item)\n\n#References:\n#[1]: http://praw.readthedocs.io/en/latest/getting_started/quick_start.html#submission-iteration\n#[2]: Regex Replacement (terse): http://stackoverflow.com/questions/3900054/python-strip-multiple-characters\n","sub_path":"testcode/composites/upcomingPull.py","file_name":"upcomingPull.py","file_ext":"py","file_size_in_byte":5536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"547168009","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 6 14:46:47 2017\n\n@author: JasonJe\n\"\"\"\n\nimport numpy as np\nfrom sklearn import neighbors\n\ndata = []\nlabels = []\nwith open('data2.txt') as ifile:\n for line in ifile:\n tokens = line.replace('\\t\\n','').split('\\t')\n data.append([float(tk) for tk in tokens[:-1]])\n labels.append(tokens[-1])\nx = np.array(data)\ny = np.array(labels)\n\nclf = neighbors.KNeighborsClassifier(algorithm = 'kd_tree', n_neighbors = 1)\nclf.fit(x,y)\n\nanswer = clf.predict(x)\nprint('准确率为:%f' % float(np.mean(answer == y)))#正确率","sub_path":"kNN/kNN_sklearn.py","file_name":"kNN_sklearn.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"12282022","text":"#!/usr/bin/env python3\n\nimport argparse, subprocess, os\n\ndef checkFASTQ(list1, list2):\n\n #Checking if lengths are the same, since they are paired end reads:\n if len(list1) == len(list2):\n #Since the lengths are the same, check if the files end with _1.fastq and _2.fastq\n for i in range(0, len(list1)):\n if list1[i].endswith(\"_1.fastq\") and list2[i].endswith(\"_2.fastq\"): #(Assuming file ends with .fastq and not .fq, add in .fq later)\n status = True\n else:\n status = False\n\n return status\n\ndef runKallisto(referenceindex, output_directory, input_directory, file1, file2):\n print(\"Running Kallisto for: \"+file1.split(\"/\")[-1].replace(\"_1.fastq\", \"\"))\n subprocess.run(\"kallisto quant -i \"+referenceindex+\" -o \"+output_directory+file1.split(\"/\")[-1].replace(\"_1.fastq\", \"\")+\" \"+input_directory+file1+\" \"+input_directory+file2+\" -b 100\", shell=True)\n\ndef runHISAT2(referenceindex, output_directory, input_directory, file1, file2):\n print(\"Running HISAT2 for: \"+file1.split(\"/\")[-1].replace(\"_1.fastq\", \"\"))\n subprocess.run(\"hisat2 -x \"+referenceindex+\" -1 \"+input_directory+file1+\" -2 \"+input_directory+file2+\" -S \"+output_directory+file1.split(\"/\")[-1].replace(\"_1.fastq\", \"\")+\".sam\", shell=True)\n\ndef main():\n\n #Argparse code\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\", \"--aligner-to-use\", help=\"1: Uses Kallisto; 2: Uses HISAT2; Default: 1\", required=True, type=int, default=1)\n parser.add_argument(\"-i\", \"--input-files-directory\", help=\"Path to directory containing FASTQ files.\", required=True)\n parser.add_argument(\"-r\", \"--reference-index\", help=\"Path of indexed reference file.\\n\"\n \"NOTE: 1. If Kallisto is selected, then enter the path of indexed reference transcriptome (Ends with .idx);\\n\"\n \"2. If HISAT2 is selected, then enter the path of indexed reference genome with the prefix\", required=True)\n parser.add_argument(\"-o\", \"--output-files-directory\", help=\"Enter name of directory which will contain output of aligner.\", required=True)\n args = vars(parser.parse_args())\n\n #Populating the variables\n aligner = args['aligner_to_use']\n input_path = args['input_files_directory']\n referenceindex_path = args['reference_index']\n output_path = args['output_files_directory']\n\n #Stripping \"/\" if present and adding \"/\", if not present will add \"/\" anyway: (Can do it in the subprocess commands itself, edit later)\n input_path = input_path.rstrip(\"/\")\n input_path = input_path + \"/\"\n output_path = output_path.rstrip(\"/\")\n output_path = output_path + \"/\"\n\n #Making output directory:\n if os.path.isdir(output_path) is False:\n os.mkdir(output_path)\n\n input_dir = sorted(os.listdir(input_path))\n mate1_list = [x for x in input_dir if \"_1\" in x]\n mate2_list = [x for x in input_dir if \"_2\" in x]\n\n #Checking FASTQ files:\n status = checkFASTQ(mate1_list, mate2_list)\n\n if status is True:\n if aligner == 1:\n #Running kallisto:\n for i,j in zip(mate1_list, mate2_list):\n runKallisto(referenceindex_path, output_path, input_path, i, j)\n if aligner == 2:\n #Running hisat2:\n for i,j in zip(mate1_list, mate2_list):\n runHISAT2(referenceindex_path, output_path, input_path, i, j)\n else:\n print(\"Error running aligners, check your files.\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"aligner_wrapper.py","file_name":"aligner_wrapper.py","file_ext":"py","file_size_in_byte":3550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"540448165","text":"#!/usr/bin/env Python3\n# -*- coding:utf-8 -*-\n\n\"\"\"\n@version: 0.1\n@author: RichardQian\n@license: Apache Licence \n@contact: paranoid_qian@163.com\n@file: property_demo.py.py\n@time: 15/11/19 下午3:51\n\"\"\"\n\nimport types\n\nclass Screen(object):\n\n cls_attr = 'class attribute'\n\n def __init__(self, height):\n self.__height = height\n\n @property\n def width(self):\n print('through getter: ')\n return self.__width\n\n @width.setter\n def width(self, width):\n # add some checks...\n print('through setter: %s' % width)\n self.__width = width\n\n # 只读\n @property\n def resolution(self):\n return self.__width**2\n\ns = Screen(80)\ns.width = 60 # 转化为setter操作\nprint(s.width) # 转化为getter操作\n\n# 经过采用了双下划线,但是仍然可以直接访问???\ns.__width = 50 # 不经过setter\nprint(s.__width) # 不经过getter\nprint(s.width)\n\n# 这里地__height就不可以访问了.\n# print(s.__height)\n\n# print([a for a in dir(s) if isinstance(a, types.MethodType)])\n\nprint(s.__dict__)\n# output: {'__width': 50, '_Screen__height': 80, '_Screen__width': 60}\n# dict不输出类属性???\n\n# 实力属性如果与类属性同名, 会屏蔽类属性, 从而只能有[ClassName.attr]的方式访问\n\n\n\"\"\"\n思考:\n\n1. 通过@property定义的属性和setter/getter, 名字就是width, 而在外部通过__width访问的属性实际上应该是动态添加的\n 所以才会造成修改__width不改变@property定义的属性这种现象\n\n2. 通过dir输出发现,属性有'_Screen__height', '_Screen__width', 也有__width\n a. _Screen__height不用解释,好理解\n b. _Screen__width,应该是在@property中的self.__width定义的\n\n s.width操作的时上面两个属性\n\n c. __width,类外部动态添加的.因此修改__width完全不影响_Screen__width\n\n d. resolution并没有生成任何实例属性?\n\"\"\"\n","sub_path":"oop/property_demo.py","file_name":"property_demo.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"39535785","text":"import time\nimport os\nimport json\nimport requests\nfrom json import dumps\n\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nimport tensorflow as tf\nimport cv2\nimport traceback\nimport numpy as np\nfrom django.http import JsonResponse\nimport base64\n\n\nfrom deal_one_model.kaiguandeng.deal_one_img import KaiGuanDengEval\n\n\ndef echoRuntime(func):\n def wrapper(*args, **kwargs):\n startTime = time.time()\n result = func(*args, **kwargs)\n endTime = time.time()\n msecs = (endTime - startTime)\n print(func.__name__ + \" running time is %.2f s\" % msecs)\n return result\n\n return wrapper\n\n\n# @echoRuntime\ndef kaiguandeng(request):\n if (request.method == 'POST'):\n t0 = time.time()\n\n img_data = request.POST.get('image') # 本质就是解码字符串\n tt = time.time()\n print(\"接收一张图片需要{}秒\".format(tt - t0))\n # print(test_image)\n img_byte = base64.b64decode(img_data)\n img_np_arr = np.fromstring(img_byte, np.uint8)\n image = cv2.imdecode(img_np_arr, cv2.IMREAD_COLOR)\n t1 = time.time()\n print(\"解码张图片需要{}秒\".format(t1 - tt))\n cv2.imwrite(\"./pppp.png\", image)\n t2 = time.time()\n print(\"保存一张图片需要{}秒\".format(t2 - t1))\n try:\n load_pb_model_szld = KaiGuanDengEval()\n result_list = load_pb_model_szld.get_detect_result(image)\n except:\n result_list = []\n num = 0\n for x in result_list:\n for y in x:\n for z in y:\n num += 1\n\n if len(result_list) == 3:\n deng, yskg, hskg = result_list\n if len(deng) == 3 and len(yskg) == 2 and len(hskg) == 1:\n one, two, three = deng\n yskg_one,yskg_two = yskg\n one_hskg = hskg[0]\n if len(one)==12 and len(two)==8 and len(three)==8 and len(yskg_one)==8 and len(yskg_two)==1 and len(one_hskg)==1:\n code = 200\n else:\n code = 0\n else:\n code = 0\n else:\n code = 0\n\n t3 = time.time()\n print(\"code--------------------{}\".format(num))\n print(\"num--------------------{}\".format(num))\n print(\"计算一张图片需要{}秒\".format(t3 - t2))\n data = {\n 'code': code,\n 'num': num,\n 'result': result_list,\n }\n t4 = time.time()\n # print(data)\n print(\"处理一张图片需要{}秒\".format(t4 - t0))\n return JsonResponse(data)\n # return HttpResponse(\"success!!!\")\n","sub_path":"kaiguandeng/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"607006439","text":"# Emojitracker API script\n\nimport requests as re\nimport os\nimport json\nimport time\n\nurl = \"https://api.emojitracker.com/v1/rankings\"\n\ndef get_rankings():\n res = re.get(url)\n return res\n \ndef print_to_file():\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n with open('rankings_' + timestr + '.txt', 'a') as outfile:\n outfile.write(json.dumps(get_rankings().json(), indent=4, sort_keys=True))\n with open('headers_' + timestr + '.txt', 'a') as outfile:\n outfile.write(str(get_rankings().headers))\n\ndef main():\n while True:\n data = get_rankings()\n print_to_file()\n time.sleep(300)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"emojitracker.py","file_name":"emojitracker.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"75249766","text":"#!/usr/bin/env python\n\n# TODO Maybe config updates should emit events?\n# Maybe commands should emit events too, and config updates just listen to\n# those and or chain them somehow? what about gevent's link thing?\n\nfrom functools import partial\nimport logging\n\nfrom gevent.event import Event\n\nfrom ooi.logging import log\nimport ooi.logging\n\n\nDEFAULT_PID_DIR = \"/var/ooici/port_agent/pid\"\nDEFAULT_LOG_LEVEL = 'warn'\n\n\nclass Config(object):\n cmds = {\n 'heartbeat_interval': (int, None),\n 'command_port': (int, None),\n 'data_port': (int, None),\n 'pid_dir': (str, DEFAULT_PID_DIR),\n 'log_level': (str, DEFAULT_LOG_LEVEL),\n 'log_config': (str, None),\n 'antelope_orb_name': (str, None),\n 'antelope_orb_select': (str, None),\n 'antelope_orb_reject': (str, None),\n 'antelope_orb_after': (float, -1),\n }\n\n # TODO: What about command_port?\n CONFIG_DEPS = set([\n 'heartbeat_interval',\n 'data_port',\n 'antelope_orb_name',\n ])\n\n # When one of these changes, it signals the data server to restart\n DATASERVER_DEPS = [\n 'data_port',\n 'antelope_orb_name',\n 'antelope_orb_select',\n 'antelope_orb_reject',\n 'antelope_orb_after',\n ]\n\n def setval(self, name, val, *args, **kwargs):\n setattr(self, name, val)\n\n def __init__(self, options, cmdproc):\n self.configuredevent = Event()\n self.dataserverconfigupdate = Event()\n self.heartbeatactive = Event()\n for name, (converter, default) in self.cmds.iteritems():\n self.cmdproc = cmdproc\n # Initialize attr with default val\n setattr(self, name, default)\n # Create command to set attr\n # cmdserver sends sock after val; will that mess this up?\n setval = partial(self.setval, name)\n cmdproc.setCmd(name, converter, setval)\n\n # Set a default loglevel?\n # update from config file\n if hasattr(options, 'conffile') and options.conffile is not None:\n self.readConfig(options.conffile)\n # update from command line\n if hasattr(options, 'verbose') and options.verbose is True:\n self.log_level = 'debug'\n if hasattr(options, 'command_port') and options.command_port is not None:\n self.command_port = options.command_port\n\n def readConfig(self, conffile):\n with open(conffile, 'rU') as file:\n for line in file:\n self.cmdproc.processCmd(line)\n\n def __setattr__(self, name, value):\n super(Config, self).__setattr__(name, value)\n if not self.configuredevent.isSet():\n configured_attrs = set()\n for attr in self.CONFIG_DEPS:\n if hasattr(self, attr) and getattr(self, attr) is not None:\n configured_attrs.add(attr)\n if configured_attrs == self.CONFIG_DEPS:\n self.configuredevent.set()\n if name in self.DATASERVER_DEPS:\n self.dataserverconfigupdate.set()\n self.dataserverconfigupdate.clear()\n\n @property\n def heartbeat_interval(self):\n return self._heartbeat_interval\n\n @heartbeat_interval.setter\n def heartbeat_interval(self, value):\n self._heartbeat_interval = value\n if value > 0:\n self.heartbeatactive.set()\n else:\n self.heartbeatactive.clear()\n\n @property\n def log_level(self):\n return self._log_level\n\n @log_level.setter\n def log_level(self, val):\n levels = {\n 'error': logging.ERROR,\n 'warn': logging.WARNING,\n 'info': logging.INFO,\n 'debug': logging.DEBUG,\n 'mesg': logging.DEBUG,\n }\n try:\n level = levels[val]\n except KeyError:\n log.error(\"Unknown logging level %s\" % val)\n else:\n self._log_level = val\n logging.getLogger().setLevel(level)\n\n @property\n def log_config(self):\n return self._log_config\n\n @log_config.setter\n def log_config(self, val):\n try:\n ooi.logging.config.add_configuration(val)\n self._log_config = val\n except Exception:\n log.error(\"Failed to read log config '%s'\" % val, exc_info=True)\n\n","sub_path":"port_agent/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"47860052","text":"\n\nclass Config(object):\n def __init__(self):\n self.path = './data/ML-1M/'\n self.user_dataset = self.path + 'userRating'\n self.group_dataset = self.path + 'groupRating'\n self.user_in_group_path = \"./data/ML-1M/groupMember.txt\"\n self.factor_num = 64\n self.num_layers = 3\n self.epoch = 30\n self.num_negatives = 4\n self.batch_size = 2048\n self.lr = [0.000005, 0.000001, 0.0000005]\n self.drop_ratio = 0.2\n self.topK = 5\n","sub_path":"NeuMF/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"401282230","text":"#Several utility functions\n\ndef produce_from_queue_of_file_chunks(queue,producer,topic_name,logger) :\n \"\"\"\n produce every file chunk in a given queue to the given topic using the given producer\n \"\"\"\n file_chunk = queue.get()\n while file_chunk is not None :\n file_chunk.produce_to_topic(producer,topic_name,logger)\n queue.task_done()\n file_chunk = queue.get()\n queue.task_done()\n\n#a very small class (and instance thereof) to hold a logger object to use in the producer callback \n# (literally exists because I don't think I can add extra keyword or other arguments to the producer callback function)\nclass ProducerCallbackLogger :\n\n @property\n def logger(self) :\n return self._logger\n @logger.setter\n def logger(self,logger_val) :\n self._logger = logger_val\n def __init__(self) :\n self._logger = None\n\nPRODUCER_CALLBACK_LOGGER = ProducerCallbackLogger()\n\n#a callback function to use for testing whether a message has been successfully produced to the topic\ndef producer_callback(err,msg) :\n global PRODUCER_CALLBACK_LOGGER\n if err is not None: #raise an error if the message wasn't sent successfully\n if err.fatal() :\n logmsg=f'ERROR: fatally failed to deliver message with key {msg.key()}. Error reason: {err.str()}'\n if PRODUCER_CALLBACK_LOGGER.logger is not None :\n PRODUCER_CALLBACK_LOGGER.logger.error(logmsg,RuntimeError)\n else :\n raise RuntimeError(logmsg)\n elif not err.retriable() :\n logmsg=f'ERROR: Failed to deliver message with key {msg.key()} and cannot retry. Error reason: {err.str()}'\n if PRODUCER_CALLBACK_LOGGER.logger is not None :\n PRODUCER_CALLBACK_LOGGER.logger.error(logmsg,RuntimeError)\n else :\n raise RuntimeError(logmsg)","sub_path":"openmsipython/data_file_io/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"30515595","text":"from __future__ import annotations\n\nfrom typing import Optional, Union\n\nimport tensorflow as tf\nfrom numpy import array\nfrom pandas import options\n\nfrom GNN.GNN_BaseClass import BaseClass\nfrom GNN.graph_class import GraphObject, GraphTensor\n\noptions.display.max_rows = 15\n\n\n#######################################################################################################################\n### CLASS GNN - NODE BASED ############################################################################################\n#######################################################################################################################\nclass GNNnodeBased(BaseClass):\n \"\"\" GNN for node-based problem \"\"\"\n\n ## CONSTRUCTORS METHODS ###########################################################################################\n def __init__(self,\n net_state: tf.keras.models.Sequential,\n net_output: tf.keras.models.Sequential,\n optimizer: tf.keras.optimizers.Optimizer,\n loss_function: tf.keras.losses.Loss,\n loss_arguments: Optional[dict],\n state_vect_dim: int,\n max_iteration: int,\n threshold: float,\n addressed_problem: str,\n extra_metrics: Optional[dict] = None,\n extra_metrics_arguments: Optional[dict[str, dict]] = None,\n path_writer: str = 'writer/',\n namespace: str = 'GNN') -> None:\n \"\"\" CONSTRUCTOR\n\n :param net_state: (tf.keras.model.Sequential) MLP for the state network, initialized externally.\n :param net_output: (tf.keras.model.Sequential) MLP for the output network, initialized externally.\n :param optimizer: (tf.keras.optimizers) for gradient application, initialized externally.\n :param loss_function: (tf.keras.losses) for loss computation.\n :param loss_arguments: (dict) with some {'argument': values} one could pass to loss when computed.\n :param state_vect_dim: None or (int)>=0, vector dim for a GNN which does not initialize states with node labels.\n :param max_iteration: (int) max number of iteration for the unfolding procedure (to reach convergence).\n :param threshold: threshold for specifying if convergence is reached or not.\n :param addressed_problem: (str) in ['r','c'], 'r':regression, 'c':classification for the addressed problem.\n :param extra_metrics: None or dict {'name':function} for metrics to be watched during training/validation/test procedures.\n :param extra_metrics_arguments: None or dict {'name': {'argument': value}} for arguments passed to extra_metrics['name'].\n :param path_writer: (str) path for saving TensorBoard objects in training procedure. If folder is not empty, all files are removed.\n :param namespace: (str) namespace for tensorboard visualization.\n \"\"\"\n # Check arguments\n if not isinstance(state_vect_dim, int) or state_vect_dim < 0: raise TypeError('param must be int>=0')\n\n # BaseClass GNN constructor\n super().__init__(optimizer, loss_function, loss_arguments, addressed_problem, extra_metrics, extra_metrics_arguments, path_writer,\n namespace)\n\n ### GNN parameter\n self.net_state = net_state\n self.net_output = net_output\n self.max_iteration = max_iteration\n self.state_threshold = threshold\n self.state_vect_dim = state_vect_dim\n\n # -----------------------------------------------------------------------------------------------------------------\n def copy(self, *, path_writer: str = '', namespace: str = '', copy_weights: bool = True) -> 'self':\n \"\"\" COPY METHOD\n\n :param path_writer: None or (str), to save copied gnn tensorboard writer. Default in the same folder + '_copied'.\n :param namespace: (str) for tensorboard visualization in model training procedure.\n :param copy_weights: (bool) True: state and output weights are copied in new gnn, otherwise they are re-initialized.\n :return: a Deep Copy of the GNN instance.\n \"\"\"\n # path_writer and namespace\n if not path_writer: path_writer = self.path_writer + '_copied/'\n if not namespace: namespace = 'GNN'\n\n # MLPs\n netS = tf.keras.models.clone_model(self.net_state)\n netO = tf.keras.models.clone_model(self.net_output)\n if copy_weights:\n netS.set_weights(self.net_state.get_weights())\n netO.set_weights(self.net_output.get_weights())\n\n return self.__class__(net_state=netS, net_output=netO, optimizer=self.optimizer.__class__(**self.optimizer.get_config()),\n loss_function=self.loss_function, loss_arguments=self.loss_args, max_iteration=self.max_iteration,\n threshold=self.state_threshold, addressed_problem=self.addressed_problem, extra_metrics=self.extra_metrics,\n extra_metrics_arguments=self.mt_args, state_vect_dim=self.state_vect_dim,\n path_writer=path_writer, namespace=namespace)\n\n ## SAVE AND LOAD METHODs ##########################################################################################\n def save(self, path: str):\n \"\"\" Save model to folder , without extra_metrics info \"\"\"\n from json import dump\n\n # check path\n if path[-1] != '/': path += '/'\n\n # save net_state and net_output\n tf.keras.models.save_model(self.net_state, f'{path}net_state/')\n tf.keras.models.save_model(self.net_output, f'{path}net_output/')\n\n # save configuration file in json format\n config = {'loss_function': tf.keras.losses.serialize(self.loss_function), 'loss_arguments': self.loss_args,\n 'optimizer': str(tf.keras.optimizers.serialize(self.optimizer)),\n 'max_iteration': self.max_iteration, 'threshold': self.state_threshold,\n 'addressed_problem': self.addressed_problem, 'state_vect_dim': self.state_vect_dim}\n\n with open(f'{path}config.json', 'w') as json_file:\n dump(config, json_file)\n\n # -----------------------------------------------------------------------------------------------------------------\n @classmethod\n def load(self, path: str,path_writer: Optional[str] = None, namespace: str = 'GNN',\n extra_metrics: Optional[dict] = None, extra_metrics_arguments: Optional[dict[str, dict]] = None):\n \"\"\" Load model from folder .\n\n Only Loss is considered as metrics after loading process.\n To use more metrics, set :param extra_metrics: and :param extra_metrics_arguments:\n\n :param path: (str) folder path containing all useful files to load the model.\n :param path_writer: (str) path for writer folder. !!! Constructor method deletes a non-empty folder and makes a new empty one.\n :param namespace: (str) namespace for tensorboard visualization in model training procedure.\n :param extra_metrics: None or dict {'name':function} for metrics to be watched during training/validation/test procedures.\n :param extra_metrics_arguments: None or dict {'name':{'argument':value}} for arguments passed to extra_metrics['name'].\n :return: the loaded gnn model. GNN type depends on class which call load method.\n \"\"\"\n from json import loads\n\n # check path\n if path[-1] != '/': path += '/'\n if path_writer is None: path_writer = f'{path}writer'\n\n # load configuration file\n with open(f'{path}config.json', 'r') as read_file:\n config = loads(read_file.read())\n\n # get optimizer, loss function\n optz = tf.keras.optimizers.deserialize(eval(config.pop('optimizer')))\n loss = tf.keras.losses.deserialize(config.pop('loss_function'))\n\n # load net_state and net_output\n netS = tf.keras.models.load_model(f'{path}net_state/', compile=False)\n netO = tf.keras.models.load_model(f'{path}net_output/', compile=False)\n\n return self(net_state=netS, net_output=netO, optimizer=optz, loss_function=loss,\n extra_metrics=extra_metrics, extra_metrics_arguments=extra_metrics_arguments,\n path_writer=path_writer, namespace=namespace, **config)\n\n ## GETTERS AND SETTERS METHODs ####################################################################################\n def get_dense_layers(self) -> list[tf.keras.layers.Layer]:\n \"\"\" Get dense layer for the application of regularizers in training time \"\"\"\n netSt_dense_layers = [i for i in self.net_state.layers if isinstance(i, tf.keras.layers.Dense)]\n netOut_dense_layers = [i for i in self.net_output.layers if isinstance(i, tf.keras.layers.Dense)]\n return netSt_dense_layers + netOut_dense_layers\n\n def trainable_variables(self) -> tuple[list[list[tf.Tensor]], list[list[tf.Tensor]]]:\n \"\"\" Get tensor weights for net_state and net_output \"\"\"\n return [self.net_state.trainable_variables], [self.net_output.trainable_variables]\n\n # -----------------------------------------------------------------------------------------------------------------\n def get_weights(self) -> tuple[list[list[array]], list[list[array]]]:\n \"\"\" Get array weights for net_state and net_output \"\"\"\n return [self.net_state.get_weights()], [self.net_output.get_weights()]\n\n # -----------------------------------------------------------------------------------------------------------------\n def set_weights(self, weights_state: list[list[array]], weights_output: list[list[array]]) -> None:\n \"\"\" Set weights for net_state and net_output \"\"\"\n assert len(weights_state) == len(weights_output) == 1\n self.net_state.set_weights(weights_state[0])\n self.net_output.set_weights(weights_output[0])\n\n ## CALL/PREDICT METHOD ############################################################################################\n def __call__(self, g: Union[GraphObject, GraphTensor]) -> tf.Tensor:\n \"\"\" Return ONLY the GNN output in test mode (training == False) for graph g of type GraphObject/GraphTensor \"\"\"\n return self.Loop(g, training=False)[-1]\n\n ## EVALUATE METHODS ###############################################################################################\n def evaluate_single_graph(self, g: Union[GraphObject, GraphTensor], training: bool) -> tuple:\n \"\"\" Evaluate single GraphObject/GraphTensor element g in test mode (training == False)\n\n :param g: (GraphObject/GraphTensor) single GraphObject/GraphTensor element\n :param training: (bool) set internal models behavior, s.t. they work in training or testing mode\n :return: (tuple) convergence iteration (int), loss value (matrix), target and output (matrices) of the model\n \"\"\"\n # transform GraphObject in GraphTensor\n if isinstance(g, GraphObject): g = GraphTensor.fromGraphObject(g)\n\n # get targets\n targs = self.get_filtered_tensor(g, g.targets)\n loss_weights = self.get_filtered_tensor(g, g.sample_weights)\n\n # graph processing\n it, _, out = self.Loop(g, training=training)\n\n # if class_metrics != 1, else it does not modify loss values\n loss = self.loss_function(targs, out, **self.loss_args) * loss_weights\n return it, tf.reduce_sum(loss), targs, out\n\n ## LOOP METHODS ###################################################################################################\n def condition(self, k, state, state_old, *args) -> tf.bool:\n \"\"\" Boolean function condition for tf.while_loop correct processing graphs \"\"\"\n\n # distance_vector is the Euclidean Distance: √ Σ(xi-yi)² between current state xi and past state yi\n outDistance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(state, state_old)), axis=1))\n\n # state_norm is the norm of state_old, defined by ||state_old|| = √ Σxi²\n state_norm = tf.sqrt(tf.reduce_sum(tf.square(state_old), axis=1))\n\n # boolean vector that stores the \"convergence reached\" flag for each node\n scaled_state_norm = tf.math.scalar_mul(self.state_threshold, state_norm)\n\n # check whether global convergence and/or the maximum number of iterations have been reached\n checkDistanceVec = tf.greater(outDistance, scaled_state_norm)\n\n # compute boolean\n c1 = tf.reduce_any(checkDistanceVec)\n c2 = tf.less(k, self.max_iteration)\n return tf.logical_and(c1, c2)\n\n # -----------------------------------------------------------------------------------------------------------------\n def convergence(self, k, state, state_old, nodes, adjacency, aggregated_nodes, aggregated_arcs, training) -> tuple:\n \"\"\" Compute new state for the graph's nodes \"\"\"\n\n # node_components refers to the considered nodes, NOT to the neighbors\n # It is composed of [nodes' state] if state at t=0 is NOT initialized by labels, [nodes' state | nodes' labels] otherwise\n node_components = tf.constant(state)\n if self.state_vect_dim:\n node_components = tf.concat([node_components, nodes], axis=1)\n\n # aggregated_states is the aggregation of ONLY neighbors' states.\n # NOTE: if state_vect_dim != 0, neighbors' label are considered using :param aggregated_nodes: since it is constant\n aggregated_states = tf.sparse.sparse_dense_matmul(adjacency, state)\n\n # concatenate the destination node 'old' states to the incoming message, to obtain the input to net_state\n inp_state = tf.concat([node_components, aggregated_states, aggregated_nodes, aggregated_arcs], axis=1)\n\n # compute new state and update step iteration counter\n state_new = self.net_state(inp_state, training=training)\n\n return k + 1, state_new, state, nodes, adjacency, aggregated_nodes, aggregated_arcs, training\n\n # -----------------------------------------------------------------------------------------------------------------\n def apply_filters(self, state_converged, nodes, adjacency, arcs_label, mask) -> tf.Tensor:\n \"\"\" Takes only nodes' [states] or [states|labels] for those with output_mask==1 AND belonging to set \"\"\"\n if self.state_vect_dim: state_converged = tf.concat([state_converged, nodes], axis=1)\n return tf.boolean_mask(state_converged, mask)\n\n # -----------------------------------------------------------------------------------------------------------------\n def Loop(self, g: Union[GraphObject, GraphTensor], *, training: bool = False) -> tuple[int, tf.Tensor, tf.Tensor]:\n \"\"\" Process a single GraphObject/GraphTensor element g, returning iteration, states and output \"\"\"\n\n # transform GraphObject in GraphTensor\n if isinstance(g, GraphObject): g = GraphTensor.fromGraphObject(g)\n\n # initialize states and iters for convergence loop\n # including aggregated neighbors' label and aggregated incoming arcs' label\n aggregated_arcs = tf.sparse.sparse_dense_matmul(g.ArcNode, g.arcs[:, 2:])\n aggregated_nodes = tf.zeros(shape=(g.nodes.shape[0], 0), dtype='float32')\n if self.state_vect_dim > 0:\n state = tf.random.normal((g.nodes.shape[0], self.state_vect_dim), stddev=0.1, dtype='float32')\n aggregated_nodes = tf.concat([aggregated_nodes, tf.sparse.sparse_dense_matmul(g.Adjacency, g.nodes)], axis=1)\n else:\n state = tf.constant(g.nodes, dtype='float32')\n state_old = tf.ones_like(state, dtype='float32')\n k = tf.constant(0, dtype='float32')\n training = tf.constant(training, dtype=bool)\n\n # loop until convergence is reached\n k, state, state_old, *_ = tf.while_loop(self.condition, self.convergence,\n [k, state, state_old, g.nodes, g.Adjacency, aggregated_nodes, aggregated_arcs, training])\n\n # out_st is the converged state for the filtered nodes, depending on g.set_mask\n mask = tf.logical_and(g.set_mask, g.output_mask)\n input_to_net_output = self.apply_filters(state, g.nodes, g.Adjacency, g.arcs[:, 2:], mask)\n\n # compute the output of the gnn network\n out = self.net_output(input_to_net_output, training=training)\n return k, state, out\n\n\n#######################################################################################################################\n### CLASS GNN - EDGE BASED ############################################################################################\n#######################################################################################################################\nclass GNNedgeBased(GNNnodeBased):\n \"\"\" GNN for edge-based problem \"\"\"\n\n def apply_filters(self, state_converged, nodes, adjacency, arcs_label, mask) -> tf.Tensor:\n \"\"\" Takes only nodes' [states] or [states|labels] for those with output_mask==1 AND belonging to set \"\"\"\n if self.state_vect_dim: state_converged = tf.concat([state_converged, nodes], axis=1)\n\n # gather source nodes' and destination nodes' state\n states = tf.gather(state_converged, adjacency.indices)\n states = tf.reshape(states, shape=(arcs_label.shape[0], 2 * state_converged.shape[1]))\n states = tf.cast(states, tf.float32)\n\n # concatenate source and destination states (and labels) to arc labels\n arc_state = tf.concat([states, arcs_label], axis=1)\n\n # takes only arcs states for those with output_mask==1 AND belonging to the set (in case Dataset == 1 Graph)\n return tf.boolean_mask(arc_state, mask)\n\n\n#######################################################################################################################\n### CLASS GNN - GRAPH BASED ###########################################################################################\n#######################################################################################################################\nclass GNNgraphBased(GNNnodeBased):\n \"\"\" GNN for graph-based problem \"\"\"\n\n # -----------------------------------------------------------------------------------------------------------------\n @staticmethod\n def get_filtered_tensor(g: GraphTensor, inp: tf.Tensor):\n \"\"\" Get inp [targets or sample_weights] for graph based problems -> nodes states are not filtered by set_mask and output_mask \"\"\"\n return tf.constant(inp, dtype='float32')\n\n # -----------------------------------------------------------------------------------------------------------------\n def Loop(self, g: Union[GraphObject, GraphTensor], *, training: bool = False) -> tuple[int, tf.Tensor, tf.Tensor]:\n \"\"\" Process a single graph, returning iteration, states and output. Output of graph-based problem is the averaged nodes output \"\"\"\n\n # check compatibility between graph g and GNN type\n if g.NodeGraph is None: raise ValueError('WRONG GNN. NodeGraph is None: GNN is graph-based, while problem is non graph-based.')\n\n # transform GraphObject in GraphTensor\n if isinstance(g, GraphObject): g = GraphTensor.fromGraphObject(g)\n\n # get iter, states and output of every nodes from GNNnodeBased\n iter, state_nodes, out_nodes = super().Loop(g, training=training)\n\n # obtain a single output for each graph, by using nodegraph matrix to the output of all of its nodes\n nodegraph = tf.constant(g.NodeGraph, dtype='float32')\n out_gnn = tf.matmul(nodegraph, out_nodes, transpose_a=True)\n return iter, state_nodes, out_gnn\n","sub_path":"GNN/GNN.py","file_name":"GNN.py","file_ext":"py","file_size_in_byte":19687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74858094","text":"from math import pi\n\ndef area():\n length = int(input('What is the length (ft) of your house?: '))\n width = int((input('What is the width (ft) of your house?: ')))\n area = length * width\n return (f\"The area of your house is {area}sq ft\")\n\ndef circ():\n r = int(input('To calculate the circumference of a circle, please provide the radius : '))\n circumference = 2*pi*r\n return (f\"The cirle's circumference is {circumference}\")\n\nprint(pi)","sub_path":"day2hw.py","file_name":"day2hw.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"133208798","text":"import operator\n\ndef detect_anagrams(testword, wordlist):\n \n anagrams = [] \n testdict = {x: testword.lower().count(x) for x in testword.lower()}\n \n for word in wordlist:\n worddict = {x: word.lower().count(x) for x in word.lower()}\n if testdict == worddict and testword.lower() != word.lower():\n anagrams.append(word)\n \n return anagrams\n\n# Decided to create a dict of every word containing letter: count (similar\n# to the word_count exercise) aftermy cunning plan with sets turned out\n# to be hopelessly flawed. I am sure there are much better ways to do this.\n# Please feel free to educate me!\n","sub_path":"all_data/exercism_data/python/anagram/bdeeea6142fc493a9cc8cebc53223b0f.py","file_name":"bdeeea6142fc493a9cc8cebc53223b0f.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"256165680","text":"import nori2 as nori\n\n# 创建并打开一个nori\nnw = nori.open(\"s3://weiyajun/test.nori\", \"w\")\n# 二进制读取文件\nfiledata = open('s3://weiyajun/daxiongtu1.jpeg', 'rb').read()\nfiledata2 = open('s3://weiyajun/daxiongtu2.jpeg', 'rb').read()\n# 添加内容\nnoriId = nw.put(filedata, filename='daxiongtu.jpeg')\n# 获取id\nprint(noriId)\nnoriId = nw.put(filedata2, filename='daxiongtu2.jpeg')\nprint(noriId)\nnw.close()\n","sub_path":"megvii/nori/generate_nori_from_local_to_oss.py","file_name":"generate_nori_from_local_to_oss.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"249697242","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nfrom whoosh import index\nfrom whoosh.query import Every\nfrom whoosh.qparser import QueryParser, MultifieldParser, FieldAliasPlugin, OrGroup\n\n\nclass KicadSearcher(object):\n\n def __init__(self, indexdir):\n self.indexdir = indexdir\n self.ix = index.open_dir(self.indexdir)\n self.searcher = self.ix.searcher()\n\n def close(self):\n self.searcher.close()\n\n def print_index_statistics(self):\n print('location: ', self.indexdir)\n print('num of docs:', self.searcher.doc_count())\n print('terms: id, type, name, descr, keyword, reference, md5sum')\n print('terms: path, position, lineno, lines')\n print('terms: path2, position2, lineno2, lines2')\n print()\n\n def search(self, query, limit, any_match, search_type, print_docs):\n if any_match:\n parser = MultifieldParser(\n ['name', 'keyword', 'descr'], self.ix.schema, group=OrGroup)\n else:\n parser = QueryParser('name', self.ix.schema)\n parser.add_plugin(FieldAliasPlugin({'type': 't',\n 'name': 'n',\n 'keyword': 'k',\n 'descr': 'd'}))\n print_hit = self.print_doc if print_docs else self.print_meta\n query = parser.parse(query)\n if search_type:\n query = parser.parse('type:{}'.format(search_type)) & query\n for hit in self.searcher.search(query, limit=limit):\n print_hit(hit)\n\n def list_all(self, print_docs):\n print_hit = self.print_doc if print_docs else self.print_meta\n for hit in self.searcher.search(Every(), limit=sys.maxsize):\n print_hit(hit)\n\n def print_meta(self, doc):\n keys = doc.keys()\n print('type: {}'.format(doc['type']))\n print('name: {}'.format(doc['name']))\n if 'descr' in keys:\n print('descr: {}'.format(doc['descr']))\n if 'keyword' in keys:\n print('keyword: {}'.format(doc['keyword']))\n if 'reference' in keys:\n print('reference: {}'.format(doc['reference']))\n print('location: {}:{}-{}'.format(doc['path'], doc['lineno'], doc[\n 'lineno'] + doc['lines'] - 1))\n if 'path2' in keys:\n print('location: {}:{}-{}'.format(doc['path2'], doc[\n 'lineno2'], doc['lineno2'] + doc['lines2'] - 1))\n print('md5sum: {}'.format(doc['md5sum']))\n print()\n\n def print_doc(self, doc):\n def dump_doc(path, position, lines):\n try:\n with open(path, 'r') as f:\n f.seek(position)\n for n in range(lines):\n print(f.readline(), end=\"\", flush=True)\n except FileNotFoundError:\n print('reading error or file {} is no longer available'.format(path))\n finally:\n print()\n\n dump_doc(doc['path'], doc['position'], doc['lines'])\n if 'path2' in doc.keys():\n dump_doc(doc['path2'], doc['position2'], doc['lines2'])\n","sub_path":"kicadsearch/kicadsearch_search.py","file_name":"kicadsearch_search.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"455923606","text":"'''\nAsyncio equivalents to regular Python functions.\n\n'''\nimport asyncio\nimport itertools\n\n\nasync def acount(start=0, step=1, delay=0, stop=None):\n '''Asyncio version of itertools.count()'''\n for item in itertools.count(start, step): # pragma: no branch\n if stop is not None and item >= stop:\n break\n\n yield item\n await asyncio.sleep(delay)\n","sub_path":"python_utils/aio.py","file_name":"aio.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"33647850","text":"import logging\nimport datetime\nfrom datetime import timedelta\nimport getpass\nimport json\nimport pymysql\n\nfrom fints.client import FinTS3PinTanClient\nfrom pymongo import MongoClient\nfrom pymongo import errors as PyMongoErrors\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import exc\nfrom sqlalchemy import text\nfrom sqlalchemy.sql import func\nfrom sqlalchemy import create_engine, Column, Table, ForeignKey\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import (\n Integer, SmallInteger, String, Date, DateTime, Float, Boolean, Text, LargeBinary)\n\nDeclarativeBase = declarative_base()\n\n\ndef create_table(engine):\n DeclarativeBase.metadata.create_all(engine)\n\n\nclass Bank(DeclarativeBase):\n __tablename__ = 'tblbanks'\n bankId = Column(Integer(), primary_key=True)\n bankName = Column(String(100))\n bankUrl = Column(String(2000))\n\n def __repr__(self):\n return \"\" % (\n self.bankName, self.bankUrl)\n\n\nclass User(DeclarativeBase):\n __tablename__ = 'tblusers'\n userId = Column(String(100), primary_key=True)\n userName = Column(String(100))\n userCreatedAt = Column(Date())\n\n def __repr__(self):\n return \"\" % (\n self.userId, self.userName)\n\nclass Account(DeclarativeBase):\n __tablename__ = 'tblaccounts'\n accountNumber = Column(String(100), primary_key=True)\n accountBlz = Column(String(100), primary_key=True)\n accountLogin = Column(String(100))\n accountOwner = Column(String(100))\n bankId = Column(Integer(), ForeignKey(\"tblbanks.bankId\"))\n\n def __repr__(self):\n return \"\" % (\n self.accountNumber, self.accountBlz, self.accountOwner)\n\n\nclass Account_Transaction(DeclarativeBase):\n __tablename__ = 'tblacctransactions'\n accountNumber = Column(Integer(), primary_key=True)\n accountBlz = Column(Integer(), primary_key=True)\n transactionAmt = Column(Float(17, 2), primary_key=True)\n transactionCur = Column(String(3))\n transactionType = Column(String(100))\n transactionTitle = Column(String(250), primary_key=True)\n transactionApplicantName = Column(String(250))\n transactionDate = Column(Date(), primary_key=True)\n transactionEntryDate = Column(Date())\n withdrawDate = Column(Date())\n localSysBankId = Column(Integer())\n transactionOwnerId = Column(String(100), ForeignKey(\"tblusers.userId\"))\n\n def __repr__(self):\n return \"\" % (\n self.accountNumber, self.accountBlz, self.transactionAmt, self.transactionDate)\n\n\nclass Run:\n def __init__(self, mysql_conn_string, mongo_conn_string, args):\n\n if args[\"accountLogin\"] is None:\n login = args[\"accountNumber\"]\n else:\n login = args[\"accountLogin\"]\n\n if args[\"accountKey\"] is None or args[\"accountKey\"] == \"\":\n key = getpass.getpass('PIN for {}:'.format(login))\n else:\n key = args[\"accountKey\"]\n\n fin_client = FinTS3PinTanClient(\n args[\"accountBlz\"],\n login,\n key,\n args[\"bankUrl\"]\n )\n\n self.localSysBankId = args[\"bankId\"]\n\n self.accountOwnerId = args[\"accountOwner\"]\n\n self.fin_client = fin_client\n self.lastWithdrawn = args[\"maxWithdrawDate\"]\n\n self.accounts = fin_client.get_sepa_accounts()\n self.balances = []\n self.transactions = []\n self.fetched = {}\n\n self.errors = []\n self.duplicates = []\n self.success = {}\n\n # MySQL\n engine = create_engine(mysql_conn_string)\n self.Session = sessionmaker(bind=engine)\n\n # MongoDB\n self.client = MongoClient(mongo_conn_string)\n db = self.client.fin71\n self.tb_transactions = db.transactions\n\n def init_processing(self):\n session = self.Session()\n\n for acc in self.accounts:\n mongo_t_array, sql_t_array = self.process_transactions(acc)\n stored_mongo, stored_sql = self.store_transactions(mongo_t_array, sql_t_array)\n\n self.handle_success(acc, stored_sql)\n\n logging.info(\"For the account {}, {} transactions were retrieved, {} (mongo) and {} (sql) stored\".format(\n acc.accountnumber, len(sql_t_array), stored_mongo, stored_sql))\n\n self.complete_processing()\n\n def complete_processing(self):\n\n self.client.close()\n logging.info(\"Processing complete\")\n\n for acc in self.accounts:\n logging.info(\"There were {} transactions fetched for the account {}, {} added to the database\".format(\n self.get_fetched(acc.accountnumber), acc.accountnumber, self.get_success(acc.accountnumber)))\n\n def process_transactions(self, account):\n\n balance = self.fin_client.get_balance(account)\n self.balances.append(balance)\n\n start, end = self.get_starting_date()\n\n statements = self.fin_client.get_statement(account, start, end)\n\n # how many transactions have been fetched for the time periode\n total_num = len(statements)\n self.handle_statements_fetched(account, total_num)\n\n mongo_dict = []\n sql_dict = []\n\n for s in statements:\n\n # Cater for faulty entry_date parsing:\n # If dates vary for more than 50 days, then parsing of entry_date is faulty\n # Replace with normal date\n\n if abs(s.data[\"date\"] - s.data[\"entry_date\"]) > datetime.timedelta(days=50):\n s.data[\"entry_date\"] = s.data[\"date\"]\n\n # if booking date is at least today or before\n if s.data[\"entry_date\"] <= datetime.datetime.now():\n\n mongo_obj, sql_obj = self.make_transaction_dict(account, s.data)\n\n mongo_dict.append(mongo_obj)\n sql_dict.append(sql_obj)\n else:\n logging.info(\n \"The following transaction is to be expected in the future {} at {}\".format(s.data[\"entry_date\"],\n s.data[\"purpose\"]))\n\n return mongo_dict, sql_dict\n\n def handle_statements_fetched(self, account, total_num):\n self.fetched[account.accountnumber] = total_num\n\n def get_fetched(self, accountnumber):\n if accountnumber in self.fetched:\n return self.fetched[accountnumber]\n else:\n return 0\n\n def get_success(self, accountnumber):\n if accountnumber in self.success:\n return self.success[accountnumber]\n else:\n return 0\n\n def handle_success(self, account, num_items):\n\n self.success[account.accountnumber] = num_items\n\n def get_starting_date(self):\n\n if self.lastWithdrawn is None:\n start = datetime.datetime(2000, 1, 1)\n else:\n # if data has been withdrawn for this account, get one day previously and start from there\n start = self.lastWithdrawn + timedelta(-1)\n\n end = datetime.datetime.now()\n\n return start, end\n\n @staticmethod\n def sanitize_string(input_string):\n\n if input_string is None:\n return \"#None#\"\n if len(input_string) == 0:\n return \"#None#\"\n else:\n return input_string\n\n def get_transaction_item(self, account, data):\n\n return {\n \"transactionOwnerId\": self.accountOwnerId,\n \"withdrawDate\": datetime.datetime.now(),\n \"accountNumber\": account.accountnumber,\n \"accountBlz\": account.blz,\n \"iban\": account.iban,\n \"bic\": account.bic,\n\t\t\"localSysBankId\" : self.localSysBankId,\n \"status\": data.get(\"status\", None),\n \"funds_code\": data.get(\"funds_code\", None),\n \"amount\": data[\"amount\"].amount.to_eng_string(),\n \"id\": data.get(\"id\", None),\n \"customer_reference\": data.get(\"customer_reference\", None),\n \"bank_reference\": data.get(\"bank_reference\", None),\n \"extra_details\": data.get(\"extra_details\", None),\n \"currency\": data.get(\"currency\", None),\n \"date\": data[\"date\"].isoformat(),\n \"entry_date\": data[\"entry_date\"].isoformat(),\n \"transaction_code\": data.get(\"transaction_code\", None),\n \"posting_text\": data.get(\"posting_text\", \"\"),\n \"prima_nota\": data.get(\"prima_nota\", None),\n \"purpose\": data.get(\"purpose\", \"\"),\n \"applicant_bin\": data.get(\"applicant_bin\", None),\n \"applicant_iban\": data.get(\"applicant_iban\", None),\n \"applicant_name\": data.get(\"applicant_name\", None),\n \"return_debit_notes\": data.get(\"return_debit_notes\", None),\n \"recipient_name\": data.get(\"recipient_name\", None),\n \"additional_purpose\": data.get(\"additional_purpose\", None),\n \"gvc_applicant_iban\": data.get(\"gvc_applicant_iban\", None),\n \"gvc_applicant_bin\": data.get(\"gvc_applicant_bin\", None),\n \"end_to_end_reference\": data.get(\"end_to_end_reference\", None),\n \"additional_position_reference\": data.get(\"additional_position_reference\", None),\n \"applicant_creditor_id\": data.get(\"applicant_creditor_id\", None),\n \"purpose_code\": data.get(\"purpose_code\", None),\n \"additional_position_date\": data.get(\"additional_position_date\", None),\n \"deviate_applicant\": data.get(\"deviate_applicant\", None),\n \"deviate_recipient\": data.get(\"deviate_recipient\", None),\n \"FRST_ONE_OFF_RECC\": data.get(\"FRST_ONE_OFF_RECC\", None),\n \"old_SEPA_CI\": data.get(\"old_SEPA_CI\", None),\n \"old_SEPA_additional_position_reference\": data.get(\"old_SEPA_additional_position_reference\", None),\n \"settlement_tag\": data.get(\"settlement_tag\", None),\n \"debitor_identifier\": data.get(\"debitor_identifier\", None),\n \"compensation_amount\": data.get(\"compensation_amount\", None),\n \"original_amount\": data.get(\"original_amount\", None)\n }\n\n def make_transaction_dict(self, account, transaction_data):\n\n mongo_t_itm = self.get_transaction_item(account, transaction_data)\n\n sql_acc_trans_item = Account_Transaction(\n accountNumber=mongo_t_itm[\"accountNumber\"],\n accountBlz=mongo_t_itm[\"accountBlz\"],\n transactionAmt=mongo_t_itm[\"amount\"],\n transactionCur=mongo_t_itm[\"currency\"],\n transactionType=mongo_t_itm[\"posting_text\"],\n transactionTitle=self.sanitize_string(mongo_t_itm[\"id\"]) + \"/\" + self.sanitize_string(\n mongo_t_itm[\"purpose\"]),\n transactionApplicantName=mongo_t_itm[\"applicant_name\"],\n transactionDate=mongo_t_itm[\"date\"],\n transactionEntryDate=mongo_t_itm[\"entry_date\"],\n withdrawDate=mongo_t_itm[\"withdrawDate\"],\n transactionOwnerId=mongo_t_itm[\"transactionOwnerId\"],\n localSysBankId=self.localSysBankId\n )\n\n return mongo_t_itm, sql_acc_trans_item\n\n def store_transactions(self, mongo_t_array, sql_t_array):\n\n def _store_sql_transaction(self, sql_item):\n try:\n\n session = self.Session()\n\n session.add(sql_item)\n\n session.commit()\n\n return True\n\n except exc.SQLAlchemyError as e:\n\n err_msg = e.args[0]\n session.rollback()\n logging.error(err_msg)\n\n if \"1062,\" in err_msg:\n logging.warning(\"error MySQL, transaction with id {} already exists\".format(sql_item))\n else:\n self.errors.append(sql_item)\n logging.warning(\"unknown error adding transaction to mysql {}\".format(sql_item))\n\n return False\n\n except Exception as e:\n logging.warning(\"unknown error {}\".format(e))\n return False\n\n def _store_mongo_transaction(self, mongo_item):\n\n try:\n\n self.tb_transactions.insert_one(mongo_item)\n\n return True\n\n except PyMongoErrors.DuplicateKeyError as e:\n logging.warning(\"error MongoDB, transaction with id {} already exists\".format(e))\n return False\n except Exception as e:\n logging.warning(\"unknown error {}\".format(e))\n return False\n\n stored_mongo = 0\n stored_sql = 0\n\n for m in mongo_t_array:\n res = _store_mongo_transaction(self, m)\n if res:\n stored_mongo += 1\n\n for t in sql_t_array:\n res = _store_sql_transaction(self, t)\n if res:\n stored_sql += 1\n\n return stored_mongo, stored_sql\n","sub_path":"BalanceStatement.py","file_name":"BalanceStatement.py","file_ext":"py","file_size_in_byte":12982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"537013768","text":"# LC680 Valid Palindrome II\n# Easy\n\n# Given a non-empty string s, you may delete at most one character.\n# Judge whether you can make it a palindrome.\n# Note:\n# The string will only contain lowercase characters a-z.\n# The maximum length of the string is 50000.\n\n# \"\"\"\n# :type s: str\n# :rtype: bool\n# \"\"\"\n\nclass Solution:\n def validPalindrome(self, s):\n # time limit exceeded\n if s == s[::-1]:\n return True\n else:\n for i in range(len(s)):\n lst = list(s)\n del lst[i]\n if lst == lst[::-1]:\n return True\n return False\n\n def validPalindrome(self, s):\n # use string slice combination, still exceed time limit\n if s == s[::-1]:\n return True\n for i in range(len(s)):\n s_new = s[:i] + s[i + 1:]\n if s_new == s_new[::-1]:\n return True\n return False\n # 当string变得十分长,没有本质上的区别\n\n def validPalindrome(self, s):\n # from two ends, check step by step\n for i in range(len(s) // 2):\n # if head != tail, check if and element needs to be removed\n if s[i] != s[len(s) - 1 - i]:\n head, tail = i, len(s) - 1 - i\n # two possibilities, remove element at head or tail, and check the rest\n return s[head:tail] == s[head:tail][::-1] or s[head + 1:tail + 1] == s[head + 1:tail + 1][::-1]\n # if nothings goes wrong, it is naturally a palindrome\n return True\n\n\nif __name__ == \"__main__\":\n assert Solution().validPalindrome(\"abccba\") == True, \"born to be (even)\"\n assert Solution().validPalindrome(\"abcba\") == True, \"born to be (odd)\"\n assert Solution().validPalindrome(\"aabbcecbXbaa\") == True, \"delete a letter\"\n assert Solution().validPalindrome(\"abxcxdcba\") == False, \"does not work\"\n print(\"All passed\")\n","sub_path":"LeetCode/LC680_valid_palindrome_ii.py","file_name":"LC680_valid_palindrome_ii.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"644288540","text":"import discord\nimport sys\nimport os\nimport io\nimport json\nimport ezjson\nimport random\nfrom discord.ext import commands\nfrom discord.ext.commands.cooldowns import BucketType\n\n\nclass Error(Exception):\n pass\n\n\nclass Economy:\n def __init__(self, bot):\n self.bot = bot\n self.session = self.bot.session\n self.db = self.bot.db\n with open('data/apikeys.json') as f:\n x = json.loads(f.read())\n self.dbl = x['dblapi']\n self.lottery_numbers = [str(random.randint(0, 9)), str(random.randint(0, 9)), str(random.randint(0, 9))]\n\n def dev_check(self, id):\n with open('data/devs.json') as f:\n devs = json.load(f)\n if id in devs:\n return True\n return False\n\n async def add_points(self, user, points):\n x = await self.db.economy.find_one({\"user\": user.id})\n total = int(x['points']) + points\n await self.db.economy.update_one({\"user\": user.id}, {\"$set\": {\"points\": int(total)}}, upsert=True)\n \n\n async def is_registered(self, user):\n x = await self.db.economy.find_one({\"user\": user.id})\n if x is None:\n return False\n else:\n return True\n\n\n @commands.command(aliases=['register', 'openbank'])\n async def openaccount(self, ctx):\n '''Opens a bank account for the economy!'''\n registered = await self.is_registered(ctx.author)\n if registered:\n return await ctx.send(f\"You already have a bank account!\")\n await self.db.economy.update_one({\"user\": ctx.author.id}, {\"$set\": {\"points\": 0}}, upsert=True)\n await ctx.send(\"Your bank account is now open! GLHF!\")\n\n\n \n @commands.command(aliases=['bal'])\n async def balance(self, ctx, user: discord.Member = None):\n '''Check how much bananas ya got!'''\n person = \"You currently have\" if not user else f\"**{user.name}** currently has\"\n user = user or ctx.author\n em = discord.Embed(color=0x00ff00, title='Current Balance')\n x = await self.db.economy.find_one({\"user\": user.id})\n if not x:\n em.description = f\"{person} don't have an account on dat banana bot yet! Open one using `*openaccount`.\"\n else:\n responses = [\n f\"{person} **{x['points']}** :banana:. Kinda sad.\",\n f\"Idk how {person} **{x['points']}** :banana:?!\",\n f\"REEEEEE! {person} **{x['points']}** :banana:.\",\n f\"{person} **{x['points']}** :banana:. Man, hella rich.\",\n f\"{person} **{x['points']}** :banana:. Deal with it.\",\n f\"{person} **{x['points']}** :banana:. I wonder where this dood's money comes from?!\"\n ]\n em.description = random.choice(responses)\n await ctx.send(embed=em)\n\n\n @commands.command(aliases=['daily', 'dailyshit'])\n @commands.cooldown(1, 86400.0, BucketType.user)\n async def dailycredit(self, ctx):\n '''Collect your daily bananas!'''\n # async with self.session.get(f\"https://discordbots.org/api/bots/388476336777461770/check?userId={ctx.author.id}\", headers={'Authorization': self.dbl}) as resp:\n # resp = await resp.json()\n # if resp['voted'] == 0:\n # em = discord.Embed(color=0x00ff00, title='Did you vote for dat banana bot today?')\n # em.description = \"You can get an extra **500** points from daily credit by simply upvoting dat banana bot. Click [here](https://discordbots.org/bot/388476336777461770/vote) to vote now.\\n\\nReact with :white_check_mark: to go upvote, or :x: to receive the reduced daily credit.\"\n # msg = await ctx.send(embed=em)\n # await msg.add_reaction(\"\\U00002705\")\n # await msg.add_reaction(\"\\U0000274c\")\n # reaction, user = await self.bot.wait_for('reaction_add', check=lambda reaction, user: user == ctx.author)\n # if reaction.emoji == '✅':\n # return await ctx.send(\"Thank you! The link (for your convenience) is: https://discordbots.org/bot/388476336777461770/vote\")\n # elif reaction.emoji == '❌':\n # number = random.randint(100, 300)\n # try:\n # await self.add_points(ctx.author, number)\n # except Exception as e:\n # return await ctx.send(f\"Aw, shucks! An unexpected error occurred: \\n```{e}```\")\n # return await ctx.send(f\"Hooray! Successfully added **{number}** :banana: into your account.\")\n # else:\n x = await self.db.economy.find_one({\"user\": ctx.author.id})\n if not x:\n return await ctx.send(\"You don't have an account on dat banana bot yet! Time to open one with `*openaccount.`\")\n number = random.randint(300, 500)\n try:\n await self.add_points(ctx.author, number)\n except Exception as e:\n return await ctx.send(f\"Aw, shucks! An unexpected error occurred: \\n```{e}```\")\n responses = [\n f\"Be proud. You just got **{number}** :banana:.\",\n f\"*Why u ask me for da MONEY? Anyways, you got **{number}** :banana:.\",\n f\"Ugh fine, take my money. But only **{number}** :banana:.\",\n f\"Why would you ever rob a poor man? Fine, take **{number}** :banana:.\",\n f\"You can have **{number}** :banana:, if that means you can shut up.\",\n f\"If you take **{number}** :banana:, ur mom gay. Oh well, you did :rofl:\",\n f\"I'd hate to give away **{number}** :banana:, but it's in my programming...\",\n f\"I love all my bananas. You just *had* to take away **{number}** :banana: from me...\"\n ]\n return await ctx.send(random.choice(responses))\n \n\n \n\n @commands.command()\n async def lottery(self, ctx, numbers: str):\n '''Enter the lottery to win/lose! 3 numbers, seperate with commas. Entry is $50, winner gets $10 million!'''\n x = await self.db.economy.find_one({\"user\": ctx.author.id})\n if x is None:\n return await ctx.send(\"Oof. You don't have an account yet! Time to create one with `*openaccount`.\")\n if int(x['points']) < 100:\n return await ctx.send(\"Entering the lottery requires 100 :banana:. You don't have enough! Keep on earning 'em\")\n if numbers is None:\n return await ctx.send(\"Please enter 3 numbers seperated by commas to guess the lottery! \\nExample: *lottery 1,2,3\")\n numbers = numbers.replace(' ', '')\n numbers = numbers.split(',')\n #lucky = [str(random.randint(0, 9)), str(random.randint(0, 9)), str(random.randint(0, 9))]\n for i in numbers:\n try:\n int(i)\n except ValueError:\n return await ctx.send(\"Please enter only numbers for the lottery!\")\n lol = \"\"\n for x in self.lottery_numbers:\n lol += f\"`{x}` \"\n if numbers == self.lottery_numbers:\n responses = [\n \"Bruh. Just how...\",\n \"Y'know only 0.8% people can even get to see this.\",\n \"I'm gonna be SO BROKE!\",\n \"Take it. Don't even look at me...\",\n \"You just...WON?\",\n \"Could I be dreaming this?\"\n ]\n await self.add_points(ctx.author, 10000000)\n em = discord.Embed(color=0x00ff00, title='You are the lucky winner!')\n em.description = f'{random.choice(responses)} :tada:\\n\\nYou won 10,000,000 :banana:!'\n await ctx.send(embed=em)\n self.lottery_numbers = [str(random.randint(0, 9)), str(random.randint(0, 9)), str(random.randint(0, 9))]\n else:\n await self.add_points(ctx.author, -100)\n em = discord.Embed(color=0xf44e42)\n responses = [\n f\"OOF! Guess who didn't win the giant $$ this time!\",\n \"Aw, try again!\",\n \"Yo luck really succs...\",\n \"Cry all you want, but you ain't gonna get that 10,000,000 :banana:.\",\n \"Well, I ain't gonna stick around and waste time on someone who didn't win...\",\n \"And the bad luck goes SKRRRRRRA!\",\n \"Guess you're part of the 99.2% that didn't make it.\"\n ]\n em.description = f\"{random.choice(responses)} ¯\\_(ツ)_/¯\\n\\nYou lost: 100 :banana:\"\n await ctx.send(embed=em)\n await self.bot.get_channel(445332002942484482).send(f\"The winning numbers are: {self.lottery_numbers}\")\n\n\n @commands.command(aliases=['bet'])\n @commands.cooldown(1, 300, BucketType.user)\n async def gamble(self, ctx, amount):\n \"\"\"Choose an amount. Will you win it or will you lose it?\"\"\"\n x = await self.db.economy.find_one({\"user\": ctx.author.id})\n if not x:\n return await ctx.send(\"You haven't created an account on dat banana bot yet! Time to create one with `*openaccount`\")\n try:\n amount = int(amount)\n except ValueError:\n return await ctx.send(\"Please enter a valid number for the amount.\")\n if amount <= 0:\n return await ctx.send(\"Gamble more. Not less. Enter a number more than 0.\")\n if amount > x['points']:\n return await ctx.send(f\"You gambled WAY TOO MUCH! You currently can gamble up to **{x['points']}** :banana:.\")\n choose = random.randint(1, 2)\n if choose == 1:\n await self.add_points(ctx.author, amount)\n return await ctx.send(f\"HOORAY! You won **{amount}** :banana:. YEET!\")\n elif choose == 2:\n await self.add_points(ctx.author, -amount)\n return await ctx.send(f\"Aw, man! You just lost **{amount}** :banana:. Better luck next time!\")\n\n\n @commands.command(alises=['steal'])\n @commands.cooldown(1, 300, BucketType.user)\n async def rob(self, ctx, user: discord.Member, points: int):\n \"\"\"Steal from someone else!\"\"\"\n try:\n points = int(points)\n except ValueError:\n return await ctx.send(\"Please enter a valid number to rob.\")\n x = await self.db.economy.find_one({\"user\": ctx.author.id})\n if not x:\n return await ctx.send(\"You don't have an account on dat banana bot yet! Time to create one with `*openaccount`.\")\n f = await self.db.economy.find_one({\"user\": user.id})\n if not f:\n return await ctx.send(\"Your target doesn't have an account yet! What's there to rob? :thinking:\")\n if points <= 0:\n return await ctx.send(\"Trying to rob less than 0? I think not.\")\n if points > x['points']:\n return await ctx.send(f\"Can't rob more than you have. ¯\\_(ツ)_/¯ You can rob up to **{x['points']}** :banana:.\")\n if points > f['points']:\n return await ctx.send(f\"Can't rob more than **{user.name}** has. ¯\\_(ツ)_/¯ You can rob up to **{f['points']}** :banana:.\")\n your_fate = random.randint(1, 2)\n if your_fate == 1:\n await self.add_points(ctx.author, points)\n await self.add_points(user, -points)\n return await ctx.send(f\"That was a success! You earned **{points}** :banana:, while that other sucker **{user.name}** lost **{points}** :banana:.\")\n elif your_fate == 2:\n await self.add_points(ctx.author, -points)\n await self.add_points(user, points)\n return await ctx.send(f\"That attempt sucked! I mean, thanks for giving **{user.name}** your **{points}** :banana:.\")\n\n\n @commands.command(aliases=['lb'])\n async def leaderboard(self, ctx):\n \"\"\"Get the leaderboard for economy!\"\"\"\n em = discord.Embed(color=0x00ff00, title=\"Economy Leaderboard\")\n em.set_author(name=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n lb = list(reversed(await self.bot.db.economy.find().sort(\"points\").to_list(None)))\n counter = 0\n to_add = \"\"\n for x in lb:\n counter += 1\n to_add += f\"**#{counter}**. **{str(self.bot.get_user(x['user']))}**: **{x['points']}** :banana:\\n\"\n if counter == 10:\n break\n em.description = to_add\n await ctx.send(embed=em)\n\n\n\n @commands.command(aliases=['give'])\n #@commands.has_permissions(manage_guild=True)\n async def reward(self, ctx, user: discord.Member, points):\n '''Reward a good person'''\n if not self.dev_check(ctx.author.id):\n return await ctx.send(\"HALT! This command is for the devs only. Sorry. :x:\")\n if not self.is_registered(user):\n return await ctx.send(f\"ACK! **{str(user)}** doesn't have an account yet, so they can't get the gucci money!\")\n else:\n try:\n points = int(points)\n except ValueError:\n return await ctx.send(\"ACK! Please enter a valid number for points.\")\n try:\n await self.add_points(user, points)\n await ctx.send(f\"YEET! Added **{points}** :banana: to **{str(user)}**!\")\n except Exception as e:\n await ctx.send(f\"Oops, something went wrong. ```{e}```Please report to the developers!\")\n print(e)\n\n @commands.command(aliases=['remove'])\n #@commands.has_permissions(manage_guild=True)\n async def deduct(self, ctx, user: discord.Member, points):\n '''Fines a bad boi.'''\n if not self.dev_check(ctx.author.id):\n return await ctx.send(\"HALT! This command is for the devs only. Sorry. :x:\")\n if not self.is_registered(user):\n return await ctx.send(f\"ACK! **{str(user)}** doesn't have an account yet, so you can't take away money from them!\")\n else:\n try:\n points = int(points)\n except ValueError:\n return await ctx.send(\"ACK! Please enter a valid number for points.\")\n try:\n await self.add_points(user, -points)\n await ctx.send(f\"OOF! Removed **{points}** :banana: to **{str(user)}**!\")\n except Exception as e:\n await ctx.send(f\"Oops, something went wrong. ```{e}```Please report to the developers!\")\n print(e)\n\n\ndef setup(bot):\n bot.add_cog(Economy(bot))\n","sub_path":"cogs/economy.py","file_name":"economy.py","file_ext":"py","file_size_in_byte":14301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"223388763","text":"def Reverse(data):\n\tfor index in range(len(data)-1,-1,-1):\n\t\tyield data[index]\n\n\ndef main():\n\trev = Reverse('malyalam')\n\tfor char in rev:\n\t\tprint(char)\n\n\n\trev = Reverse(\"TEEM\")\n\tfor char in rev:\n\t\tprint(char) \n\t\n\tdata = \"abcdef\"\n\tprint(list(data[i] for i in range (len(data)-1,-1,-1)))\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"tutorial-programms/intermediate-tutorials-code/7_reversing-generator.py","file_name":"7_reversing-generator.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"34553222","text":"import re\nimport os\nimport getopt\nimport sys\n#from collections import Counter\n#from Bio.Seq import Seq\n#from Bio.Alphabet import IUPAC\nimport numpy as np\n#import pandas as pd\n### Dehan Method Check\n#from scipy import sparse\n#from sklearn.model_selection import ShuffleSplit\n#from sklearn.linear_model import Lasso, LassoCV\n#from scipy.stats import pearsonr\n\nopts,args=getopt.getopt(sys.argv[1:],\"i:p:k:m:s:f:c:b:o:\")\nread_1=''\t# SE or long reads\nread_2=''\t# PE reads - read set 2\nsnp_kmr_file='' #kmer -> snp_pos file\nsnp_kmr_fa='' # kmr fasta\nmatrix_file='' # Sno-pos matrix file\ncls_file=''\nsub_kmr_file=''\nout_dir=''\n\nk=25\t\t# Default k size=25\n#min_depth_rate=0.1\nmin_depth_percentile=10\nmax_depth_percentile=90\nmin_depth_absolute=2\n#min_depth_rate=0.05\nmin_depth_rate=0.05\n#threads=4\nfor opt,arg in opts:\n\tif opt=='-i':\n\t\tread_1=arg\n\telif opt=='-p':\n\t\tread_2=arg\n\telif opt=='-k':\n\t\tk=int(arg)\n\telif opt=='-s':\n\t\tsnp_kmr_file=arg\n\telif opt=='-m':\n\t\tmatrix_file=arg\n\telif opt=='-f':\n\t\tsnp_kmr_fa=arg\n\telif opt=='-c':\n\t\tcls_file=arg\n\telif opt=='-b':\n\t\tsub_kmr_file=arg\n\telif opt=='-o':\n\t\tout_dir=arg\n\n## Static variables\nBASE_ORDER=['A','T','G','C']\nBASE_P = {'A': [1, 0, 0, 0],'C':[0,1,0,0],'G':[0,0,1,0],'T':[0,0,0,1],}\n'''\nCV_NITER = 20\nNALPHA = 50\nMAX_NITER = 5000\nTEST_SIZE = 0.5\n'''\nfile_dir=sys.path[0]\n#print(file_dir)\n#exit()\n\n\n######### Step-1 Load Pre-build File to memory ####\n## Kmer -> POS-SNP\nf1=open(snp_kmr_file,'r')\ndkps={} # kmr -> {pos-snp:1,......}\npos=[]\ndpsc={} # pos-snp: num\nwhile True:\n\tline=f1.readline().strip()\n\tif not line:break\n\tele=line.split('\\t')\n\tdkps[ele[0]]=''\n\tps=re.split(',',ele[1])\n\tfor e in ps:\n\t\tdkps[ele[0]]=e # Set to 1 for Counter -> Dict Merge\n\t\tdpsc[e]=0\n\n## Build pos-snp freq array\n\nf3=open(matrix_file,'r')\nfl=f3.readline().strip()\npos_snp=re.split('\\t',fl) # Head line arr\n#print(np.where(np.array(pos_snp)=='8946-T')[0][0])\n\n# Run jellyfish to get kmer counting result\nif read_2=='':\n\tcmd1=file_dir+'/jellyfish-linux count -m 25 -s 100M -t 8 --if '+snp_kmr_fa+' -o Tem_VS.jf '+read_1\n\tcmd2=file_dir+'/jellyfish-linux dump -c Tem_VS.jf > Tem_Vs.fa'\n\tos.system(cmd1)\n\tos.system(cmd2)\nelse:\n\tcmd1=file_dir+'/jellyfish-linux count -m 25 -s 100M -t 8 --if '+snp_kmr_fa+' -o Tem_VS.jf '+read_1+' '+read_2\n\tcmd2=file_dir+'/jellyfish-linux dump -c Tem_VS.jf > Tem_Vs.fa'\n\tos.system(cmd1)\n\tos.system(cmd2)\n\t\n\nfreq_arr=[]\nfnew=open('Tem_Vs.fa','r')\nwhile True:\n\tline=fnew.readline().strip()\n\tif not line:break\n\tele=line.split()\n\tdpsc[dkps[ele[0]]]+=int(ele[1])\n\ncarr=[]\nfor p in pos_snp:\n\tc=re.split('-',p)[0]\n\tif c not in carr:\n\t\tcarr.append(c)\n\tif p not in dpsc:\n\t\tfreq_arr.append(0)\n\telse:\n\t\tfreq_arr.append(dpsc[p])\n\nfreq_arr=np.array(freq_arr)\n#print(freq_arr[1397])\n\n#freq_arr[freq_arr<=min_depth_absolute]=0\n#keep=(freq_arr!=0)\n\n\n### Check avg depth from the pos-snp frequency array\nkeep=(freq_arr!=0)\ncheck_arr=freq_arr[keep]\nmin_depth,max_depth=np.percentile(check_arr,[min_depth_percentile,max_depth_percentile])\n#print(min_depth)\nkeep=np.logical_and.reduce((check_arr>=min_depth,check_arr<=max_depth))\ncheck_arr2=check_arr[keep]\n# Average depth of the frequency vector\nmin_depth_adf=min_depth_rate*np.mean(check_arr2)\n##### !! freq arr filter using 2 or 0.1*avg-depth\n\nif min_depth_adf<2:\n\tmin_depth_adf=2 # Use 2 to test firstly\n#min_depth_adf=min_depth_absolute\nfreq_arr[freq_arr<=min_depth_adf]=0\nweighted_freq_arr=freq_arr/np.sum(freq_arr)\n\n##### Filter done. ########\n\n\npos_freq_map=dict(zip(pos_snp,freq_arr))\n#print(pos_freq_map['18722-C'])\nt=np.array(pos_snp)\n#sindex=np.argwhere(t=='18722-C')[0][0]\n#print(freq_arr[sindex])\n#exit()\n#print(np.where(np.array(pos_snp)=='8946-T'))\n\t#ax=sns.distplot(a,norm_hist=False,kde=False)\n\t#ax=sns.distplot(a,norm_hist=False,kde=False)\n#exit()\n\n#print(pos_freq_map['8946-T'])\n#exit()\n#print(pos_freq_map['4248-T'])\n#exit()\n#des={} # Strain -> the sum of frequency vector of this strain\nds_pos={} # Strain -> 0 1 1 0 0 1 (pos-snp: yes or no vector)\nds_freq={} # Strain -> frequency vector of this strain\ndmap_rate={} # Strain -> pos-snp map scpre of this strain\n#ds_avgd={} # Strain -> average depth of this strain\nds_num={} # Strain -> pos-snp map number, raw number\ndmr={} # Strain -> pos-snp map rate os this strain (map_number/raw_number)\n#These 2 dict will be used to visualize the pos depth figure\ndscf={} #Strain-> 110-A:110, 111-NA:0, ..\ndscl={} # Strain-> 110-A:3000, 111-NA:0, ..\nall_ps=[] # Record the matrix array\nwhile True:\n\tline=f3.readline().strip()\n\tif not line:break\n\tele=line.split('\\t')\n\t#des[ele[0]]=0\n\ttem=[]\n\t#all_ps.append(ele[1:])\n\tfor e in ele[1:]:\n\t\ttem.append(int(e))\n\ttem=np.array(tem)\n\tall_ps.append(tem)\n\tdscf[ele[0]]={}\n\tdscl[ele[0]]={}\n\t'''\n\tkeep=(tem==1)\n\tps_number=len(tem[keep])\n\tds_num[ele[0]]=ps_number\n\t'''\n\tnt=freq_arr*tem # the frequency vector of this strain\n\traw_c=len(tem[tem==1])\n\tmap_c=len(nt[nt>0])\n\t'''\n\tif ele[0]=='>MT419847.1':\n\t\tprint('>MT419847.1',map_c,raw_c)\n\tif ele[0]=='>MN938384.1':\n\t\tprint('>MN938384.1',map_c,raw_c)\n\t'''\n\tmap_rate=np.sum(tem*weighted_freq_arr)\n\t#map_rate=float(map_c)/float(raw_c)\n\t#dmap_rate[ele[0]]=map_rate\n\tdmap_rate[ele[0]]=map_rate\n\tds_num[ele[0]]=str(map_c)+'/'+str(raw_c)\n\tdmr[ele[0]]=float(map_c)/float(raw_c)\n\t#value=nt.sum()\n\t#des[ele[0]]=value\n\tds_pos[ele[0]]=tem\n\tds_freq[ele[0]]=nt\n\t## Get average depth of this strain ##\n\t'''\n\tkeep=(nt!=0)\n\tnt=nt[keep]\n\tmin_depth,max_depth=np.percentile(nt,[min_depth_percentile,max_depth_percentile])\n\tkeep=np.logical_and.reduce((nt>=min_depth,nt<=max_depth))\n\tnt=nt[keep]\n\tds_avgd[ele[0]]=np.mean(nt)\n\t'''\n''' Here we initialize 2 dict for later visualization. '''\nall_ps=np.array(all_ps)\nall_sum=np.sum(all_ps,axis=0)\npos_label=dict(zip(pos_snp,list(all_sum)))\n'''\nfor s in ds_freq:\n\ti=0\n\tfor c in ds_freq[s]:\n\t\tif c==0:\n\t\t\ti+=1\n\t\t\tcontinue\n\t\tcurrent_ps=pos_snp[i]\t\n\t\tcolumn=int(re.split('-',current_ps)[0])\n\t\tif column not in dscf[s]:\n\t\t\tdscf[s][column]=0\n\t\tif not c==0:\n\t\t\tdscf[s][column]+=c\n\t\ti+=1\n\ti2=0\n\tfor c in ds_pos[s]:\n\t\tif c==0:\n\t\t\ti2+=1\n\t\t\tcontinue\n\t\tcurrent_ps=pos_snp[i2]\n\t\tcolumn=int(re.split('-',current_ps)[0])\n\t\tif column not in dscl[s]:\n\t\t\tdscl[s][column]=0\n\t\tif c==1:\n\t\t\tdscl[s][column]=pos_label[current_ps]\n\t\ti2+=1\nprint(len(dscl['>gb:J02176']),len(dscf['>gb:J02176']))\nexit()\n'''\n#print(max(the_sum),len(np.argwhere(the_sum==len(all_ps_vec)-1)),len(np.argwhere(the_sum==1)))\n\n#exit()\n#index=np.where(np.array(pos_snp)=='8946-T')[0][0]\n#print(ds_pos['>MT419847.1'][index])\n#print(ds_pos['>MN938384.1'][index])\nmax_map=sorted(dmr.items(),key=lambda d:d[1],reverse=True)[0][1]\n#exit()\nres=sorted(dmap_rate.items(),key=lambda d:d[1],reverse=True)\ntop10_score_s=res[:10]\n#exit()\ntop_map_strain=[]\nfor r in res:\n\tif r[1]==res[0][1]:\n\t\ttop_map_strain.append(r[0])\n\telse:break\n##### Unique nodes scan for all top strains\nsnp_arr=[]\n### Pre-calculate the possible strain number, then decide whether should calculate weighted score.\npre_freq_arr=[]\n#strain_num={}\nfor s in top_map_strain:\n\tsnp_arr.append(ds_pos[s])\n\tpre_pa=ds_pos[s]*(-1)\n\tpre_pa=np.array(pre_pa)\n\tpre_pa[pre_pa==0]=1\n\tpre_freq_arr=freq_arr*pre_pa\n\tpre_freq_arr[pre_freq_arr<0]=0\n\nkeep=(pre_freq_arr!=0)\npre_freq_arr=pre_freq_arr[keep]\n\npre_pos_snp=np.array(pos_snp)[keep]\npre_ds_pos={}\nfor s in ds_pos:\n\tpre_ds_pos[s]=ds_pos[s][keep]\npre_wf_arr=pre_freq_arr/np.sum(pre_freq_arr)\nstrain_num={}\nsn=0\n#print(pre_freq_arr)\n#exit()\nwhile True:\n\tif len(pre_freq_arr)==0:break\n\tsmr={}\n\tfor r in ds_pos:\n\t\tif r in top_map_strain:continue\n\t\ttt=pre_ds_pos[r]\n\t\tnt=tt*pre_wf_arr\n\t\t#mc=len(nt[nt>0])\n\t\tmr=np.sum(nt)\n\t\tsmr[r]=mr\n\tres=sorted(smr.items(),key=lambda d:d[1],reverse=True)\n\tts=[]\n\tfor r in res:\n\t\tif r[1]==res[0][1]:\n\t\t\tts.append(r[0])\n\tif len(ts)>1:\n\t\trmr={}\n\t\tfor s in ts:\n\t\t\trmr[s]=dmap_rate[s]\n\t\tres2=sorted(rmr.items(),key=lambda d:d[1],reverse=True)\n\t\t#for r in res2:\n\t\tstrain_num[res2[0][0]]=''\n\telse:\n\t\tstrain_num[ts[0]]=''\n\t#strain_num[res[0][0]]=''\n\tvm1=len(pre_freq_arr)\n\tpre_pa=pre_ds_pos[ts[0]]*(-1)\n\tpre_pa[pre_pa==0]=1\n\tpre_freq_arr=pre_freq_arr*pre_pa\n\tpre_freq_arr[pre_freq_arr<0]=0\n\tkeep=(pre_freq_arr!=0)\n\tpre_freq_arr=pre_freq_arr[keep]\n\tif not np.sum(pre_freq_arr)==0:\n\t\tpre_wf_arr=pre_freq_arr/np.sum(pre_freq_arr)\n\tpre_pos_snp=pre_pos_snp[keep]\n\tfor s in pre_ds_pos:\n\t\tpre_ds_pos[s]=pre_ds_pos[s][keep]\n\tvm=vm1-len(pre_freq_arr)\n\tif vm>1:\n\t\t#strain_num[res[0][0]]=''\n\t\tsn+=1\n# Will recalculate the score and select top strain\nif sn>1:\n\t#nw=len(strain_num)\n\tfor s in top_map_strain:\n\t\tstrain_num[s]=''\n\tsscore={}\n\tsna=[]\n\tfor s in strain_num:\n\t\tsna.append(ds_pos[s])\n\tsna=np.array(sna)\n\tssum=sna.sum(axis=0)\n\tssum[ssum==0]=1\n\tfor s in strain_num:\n\t\tsnt=ds_pos[s]/ssum\n\t\tns=dmap_rate[s]*snt\n\t\tns=ns.sum(axis=0)\n\t\tsscore[s]=ns\n\tres=sorted(sscore.items(),key=lambda d:d[1],reverse=True)\n\ttem_map_strain=[]\n\tfor r in res:\n\t\tif not dmr[r[0]]==max_map:continue\n\t\ttem_map_strain.append(r[0])\n\t\tbreak\n\tif len(tem_map_strain)>0:\n\t\ttop_map_strain=tem_map_strain\n\t#print(sscore)\n\n\t\nsnp_arr=np.array(snp_arr)\npos_sum=snp_arr.sum(axis=0)\npos_sum[pos_sum>1]=0\ni=0\n\nstrain_unique={}\nstrain_unique_count={}\nfor p in pos_sum:\n\tcolumn=pos_snp[i]\n\t#i+=1\n\tif p==1:\n\t\tif pos_freq_map[column]<=min_depth_absolute:\n\t\t\ti+=1\n\t\t\tcontinue\n\t\ti2=0\n\t\twindow=snp_arr[:,i]\n\t\tfor w in window:\n\t\t\tif w==1:\n\t\t\t\tstrain=top_map_strain[i2]\n\t\t\t\tif strain not in strain_unique:\n\t\t\t\t\tstrain_unique[strain]={column:pos_freq_map[column]}\n\t\t\t\t\tstrain_unique_count[strain]=1\n\t\t\t\telse:\n\t\t\t\t\tstrain_unique[strain][column]=pos_freq_map[column]\n\t\t\t\t\tstrain_unique_count[strain]+=1\n\t\t\ti2+=1\n\ti+=1\n#print(freq_arr[1397])\n#print(pos_freq_map['4248-T'])\n#print(strain_unique)\n#exit()\n## Final output generate part ####\nmp_strain=[] # Most possible strain\nop_strain=[] # Other possible strain -> [S1,S2,S3,S5]\nop_strain_batch=[] # Other possible strain ->[[S1,S2],[S3,S5],...]\n#op_pos_snp=[] # Other possible pos-snp\n#op_ps_strain=[] # The top map rate strain of other possible pos-snp\nif not len(strain_unique)==0:\n\t#print(strain_unique)\n\tfor s in strain_unique:\n\t\tmp_strain.append(s)\n\t\t#print(s,ds_avgd[s])\n\t\t#exit()\nelse:\n\t### Need to check whether these strains have close depth\n\t#d={}\n\t#strain_depth=[]\n\tfor r in top_map_strain:\n\t\tmp_strain.append(r)\n\t\t#d[r]=ds_avgd[r]\n\t\t#raw_depth=ds_freq[r] # the freq arr of this strain -> [0, 22, 0 ...]\n\t\t#keep=(raw_depth>0)\n\t\t#nz_depth=raw_depth[keep]\n\t\t#print(np.mean(nz_depth))\n\t\t#exit()\n\t#exit()\n\t'''\n\tres2=sorted(d.items(),key=lambda d:d[1],reverse=True)\n\tfor r in res2:\n\t\tmp_strain.append(r[0])\n\t'''\n#print(strain_unique)\n#exit()\n#print(top_map_strain)\n#for t in top_map_strain:\n#print(t,dmap_rate[t])\n## Check nodes of MT419847\n## Iterative function to get other possible strains and other possible pos-snp\n#print('Raw freq arr: ', freq_arr[sindex])\nds_avgd={}\nfor m in mp_strain:\n\t'''\n\tkeep=(ds_freq[m]!=0)\n\tcheck_arr=ds_freq[m][keep]\n\tmin_depth,max_depth=np.percentile(check_arr,[min_depth_percentile,max_depth_percentile])\n\tkp1=(check_arr<=min_depth)\n\tcheck_arr=check_arr[kp1]\n\tcontinue\n\tprint(m,ds_freq[m][sindex])\n\t'''\n\t# Get the avg depth of the strain\n\tkeep=(ds_freq[m]!=0)\n\tmin_depth,max_depth=np.percentile(ds_freq[m][keep],[min_depth_percentile,max_depth_percentile])\n\tkeep=np.logical_and.reduce((ds_freq[m]>=min_depth,ds_freq[m]<=max_depth))\n\tds_avgd[m]=np.mean(ds_freq[m][keep])\n\n\tpos_arr=ds_pos[m]*(-1)\n\tpos_arr=np.array(pos_arr)\n\tpos_arr[pos_arr==0]=1\n\tfreq_arr=freq_arr*pos_arr\n\tfreq_arr[freq_arr<0]=0\n\nkeep=(freq_arr!=0)\n\nleft_freq_arr=freq_arr[keep]\n#print('Filtered freq arr: ',left_freq_arr[sindex])\n#exit()\npos_snp=np.array(pos_snp)\nleft_pos_snp=pos_snp[keep]\nleft_ds_pos={}\nfor s in ds_pos:\n\tleft_ds_pos[s]=ds_pos[s][keep]\n#print(left_freq_arr)\n#print(left_pos_snp)\n#exit()\n#run=0\nleft_ps_freq_map=dict(zip(left_pos_snp,left_freq_arr))\n#print(left_ps_freq_map)\n#exit()\nresl=sorted(left_ps_freq_map.items(),key=lambda d:d[1],reverse=True)\nos_strain={} # '221-A-100-10000':['>MT312312.1',....]\nos_arr=[] # ['221-A-100-10000','225-G-100-10000',....]\nleft_weighted_freq_arr=left_freq_arr/np.sum(left_freq_arr)\n#print(left_ps_freq_map)\n#print(left_freq_arr,left_pos_snp)\n#exit()\n#run=0\nvmap={} # The dict used to record the valid map rate\n#### Start Iterative process ######\nif not len(left_freq_arr)==0:\n\tmax_iter_times=len(left_freq_arr)\n\tfor l in range(max_iter_times):\n\t\t#temd=dict(zip(left_pos_snp,left_freq_arr))\n\t\t#res=sorted(temd.items(),key=lambda d:d[1],reverse=True)\n\t\t#for r in res:\n\t\tif len(left_freq_arr)==0:break\n\t\t#column_snp=res[0][0]\n\t\tstrain_map_rate={}\n\t\tfor r in ds_pos:\n\t\t\tif r in mp_strain:continue\n\t\t\t#if r in op_strain:continue\n\t\t\t#if r in op_ps_strain:continue\n\t\t\ttem=left_ds_pos[r]\n\t\t\tnt=tem*left_weighted_freq_arr\n\t\t\t#raw_c=len(tem[tem==1])\n\t\t\tmap_c=len(nt[nt>0])\n\t\t\t#map_rate=map_c\n\t\t\tmap_rate=np.sum(nt)\n\t\t\tstrain_map_rate[r]=map_rate\n\t\tselected_strain=[] # used to save the selected strains in this round\n\t\tres=sorted(strain_map_rate.items(),key=lambda d:d[1],reverse=True)\n\t\t#print(res)\n\t\t#exit()\n\t\ttop_s=[]\n\t\tsnp_arr=[]\n\n\t\tfor r in res:\n\t\t\tif r[1]==res[0][1]:\n\t\t\t\ttop_s.append(r[0])\n\t\t\t\t### Calculate avg depth\n\t\t\t\tnt=left_ds_pos[r[0]]*left_freq_arr\n\t\t\t\tkeep=(nt!=0)\n\t\t\t\tnt=nt[keep]\n\t\t\t\tmin_depth,max_depth=np.percentile(nt,[min_depth_percentile,max_depth_percentile])\n\t\t\t\tkeep=np.logical_and.reduce((nt>=min_depth,nt<=max_depth))\n\t\t\t\tif not len(nt[keep])==0:\n\t\t\t\t\tnt=nt[keep]\n\t\t\t\tds_avgd[r[0]]=np.mean(nt)\n\t\t\t\t## Done\n\t\t\t\tsnp_arr.append(left_ds_pos[r[0]])\n\t\t#print(top_s)\n\t\t#exit()\n\t\tif len(top_s)>1:\n\t\t\trank_map_rate={}\n\t\t\tfor s in top_s:\n\t\t\t\trank_map_rate[s]=dmap_rate[s]\n\t\t\tres=sorted(rank_map_rate.items(),key=lambda d:d[1],reverse=True)\n\t\t\ttop_s=[]\n\t\t\tfor r in res:\n\t\t\t\ttop_s.append(r[0])\n\t\ttop_s=np.array(top_s)\n\t\tpre=[]\n\t\tfor r in resl:\n\t\t\tpre.append(r[0]+':'+str(r[1]))\n\t\tpre=','.join(pre)\n\t\ttop_pos_snp=pre+'\\t'+str(len(top_s))\n\t\tos_arr.append(top_pos_snp)\n\t\tos_strain[top_pos_snp]=[]\n\t\tfor s in top_s:\n\t\t\tos_strain[top_pos_snp].append(s)\n\t\t\tpos_arr=left_ds_pos[s]*(-1)\n\t\t\tpos_arr[pos_arr==0]=1\n\t\t\tleft_freq_arr=left_freq_arr*pos_arr\n\t\t\tleft_freq_arr[left_freq_arr<0]=0\n\t\tkeep=(left_freq_arr!=0)\n\t\tvalid_map=len(left_freq_arr)\n\t\tleft_freq_arr=left_freq_arr[keep]\n\t\tvalid_map=valid_map-len(left_freq_arr)\n\t\tvmap[top_pos_snp]=valid_map\n\t\tif not np.sum(left_freq_arr)==0:\n\t\t\tleft_weighted_freq_arr=left_freq_arr/np.sum(left_freq_arr)\n\t\tleft_pos_snp=left_pos_snp[keep]\n\t\tleft_ps_freq_map=dict(zip(left_pos_snp,left_freq_arr))\n\t\tfor s in left_ds_pos:\n\t\t\tleft_ds_pos[s]=left_ds_pos[s][keep]\n\t\tresl=sorted(left_ps_freq_map.items(),key=lambda d:d[1],reverse=True)\n\n# All strain cross validation\n\n### Output part\n'''\nfp=open('../head_file.txt','r')\ndc={}\nwhile True:\n\tline=fp.readline().strip()\n\tif not line:break\n\tpre=re.split('\\|',line)[0].strip()\n\tanno=re.split('\\|',line)[1].strip()\n\t#t=re.split('/',line)\n\tif len(t)<3:\n\t\tdc[pre]='NA'\n\telse:\n\t#country=re.split('/',line)[2]\n\t#if re.search('2020',country):\n\t#country='NA'\n\tdc[pre]=anno\n'''\no=open(out_dir,'w+')\no.write('\\t\\tStrain_ID\\tCls_info\\tSubCls_info\\tMap_Score\\tValid_Map_Rate\\tTotal_Map_Rate\\tStrain_Depth\\tStrain_info\\tUnique_SNP\\n')\no.write('>>Most possible strains:\\n')\nall_s=[]\nvs_sd=[]\nvs_so=[]\nfor s in mp_strain:\n\tall_s.append(s)\n\tvs_sd.append(s)\nif len(os_arr)>0:\n\tfor s in os_arr:\n\t\tall_s.append(os_strain[s][0])\n\t\tif vmap[s]>1:\n\t\t\tvs_so.append(os_strain[s][0])\n# Strain-level identification \ns2cls={} # Used for output cluster info\ns2sub={} # Used for output sub-cluster info\ncandidate_cls={}\nfcls=open(cls_file)\nwhile True:\n\tline=fcls.readline().strip()\n\tif not line:break\n\tele=line.split('\\t')\n\tif ele[0] not in all_s:\n\t\ts2cls[ele[0]]=ele[2]\n\t\tcontinue\n\tif ele[2]==ele[3]:\n\t\ts2cls[ele[0]]=ele[2]\n\t\ts2sub[ele[0]]='NA'\n\telse:\n\t\ts2cls[ele[0]]=ele[2]\n\t\tcandidate_cls[ele[2]]=''\n\t\t#s2sub[ele[0]]=''\nif not len(candidate_cls)==0:\n\tfsk=open(sub_kmr_file,'r')\n\tksub={}\n\tcls_sub={}\n\t#subcount={}\n\twhile True:\n\t\tline=fsk.readline().strip()\n\t\tif not line:break\n\t\tele=line.split('\\t')\n\t\tif ele[1] in candidate_cls:\n\t\t\tif k not in ksub:\n\t\t\t\tksub[k]={ele[1]:{}}\n\t\t\tif ele[1] not in ksub[k]:\n\t\t\t\tksub[k][ele[1]]={}\n\t\t\tif ele[1] not in cls_sub:\n\t\t\t\tcls_sub[ele[1]]={}\n\t\t\tsub=re.split(',',ele[-1])\n\t\t\tfor s in sub:\n\t\t\t\tksub[k][ele[-1]][s]=''\n\t\t\t\tcls_sub[ele[1]][s]=0\n\tok=open('Tem_Vs2Sub.fa','w+')\n\tco=1\n\tfor k in ksub:\n\t\tok.write('>'+str(co)+'\\n')\n\t\tok.write(k+'\\n')\n\t\tco+=1\n\tok.close()\n\tif read_2=='':\n\t\tcmd1=file_dir+'/jellyfish-linux count -m 25 -s 100M -t 8 --if Tem_Vs2Sub.fa -o Tem_VS2.jf '+read_1\n\telse:\n\t\tcmd1=file_dir+'/jellyfish-linux count -m 25 -s 100M -t 8 --if Tem_Vs2Sub.fa -o Tem_VS2.jf '+read_1+' '+read_2\n\tcmd2=file_dir+'/jellyfish-linux dump -c Tem_VS2.jf > Tem_Vs2.fa'\n\tft2=open('Tem_Vs2.fa','r')\n\twhile True:\n\t\tline=ft2.readline().strip()\n\t\tif not line:break\n\t\tele=line.split()\n\t\tif int(ele[1])>=min_depth_adf:\n\t\t\tfor c in ksub[ele[0]]:\n\t\t\t\tfor c2 in ksub[ele[0]][c]:\n\t\t\t\t\tcls_sub[c][c2]+=int(ele[1])\n\tfor s in all_s:\n\t\tif s in s2sub:continue\n\t\tif s2cls[s] not in cls_sub:\n\t\t\ts2sub[s]='NA'\n\t\telse:\n\t\t\tres=sorted(cls_sub[s2cls[s]].items(),key=lambda d:d[1],reverse=True)\n\t\t\tif res[0][1]==res[1][1]:\n\t\t\t\ts2sub[s]='NA'\n\t\t\telse:\n\t\t\t\ts2sub[s]=res[0][0]\n\n\nfor s in mp_strain:\n\t#keep=(ds_pos[s]==1)\n\t#all_s.append(s)\n\tif s in strain_unique:\n\t\to.write('\\t\\t'+s+'\\t'+s2cls[s]+'\\t'+s2sub[s]+'\\t'+str(dmap_rate[s])+'\\t'+str(ds_num[s])+'\\t'+str(ds_num[s])+'\\t'+str(ds_avgd[s])+'\\t\\t\\t'+str(strain_unique[s])+'\\n')\n\telse:\n\t\to.write('\\t\\t'+s+'\\t'+s2cls[s]+'\\t'+s2sub[s]+'\\t'+str(dmap_rate[s])+'/'+str(ds_num[s])+'\\t'+str(ds_avgd[s])+'\\t\\t\\tNA\\n')\no.write('>>Other possible strains:\\n')\nif len(os_arr)>0:\n\t#i=1\n\tfor s in os_arr:\n\t\tele=re.split('\\t',s)\n\t\tif vmap[s]==1:\n\t\t\to.write('\\t>>(Could be FP)'+ele[0]+',Genome_num: '+ele[1]+'\\n')\n\t\telse:\n\t\t\to.write('\\t>>'+ele[0]+',Genome_num: '+ele[1]+'\\n')\n\t\tall_s.append(os_strain[s][0])\n\t\tfor n in os_strain[s]:\n\t\t\t#all_s.append(n)\n\t\t\ta=re.split('/',ds_num[n])[-1]\n\t\t\tvm=str(vmap[s])+'/'+a\n\t\t\tif n in s2sub:\n\t\t\t\to.write('\\t\\t'+n+'\\t'+s2cls[n]+'\\t'+s2sub[n]+'\\t'+str(dmap_rate[n])+'\\t'+vm+'\\t'+ds_num[n]+'\\t'+str(ds_avgd[n])+'\\t\\t\\tNot_record\\n')\n\t\t\telse:\n\t\t\t\to.write('\\t\\t'+n+'\\t'+s2cls[n]+'\\tNot_record\\t'+str(dmap_rate[n])+'\\t'+vm+'\\t'+ds_num[n]+'\\t'+str(ds_avgd[n])+'\\t\\t\\tNot_record\\n')\n\t\t\t\t\n\t\t#outs='\\t'.join(os_strain[s])\n\t\t#o.write('\\t\\t\\t'+outs+'\\n')\n\t\t#i+=1\nelse:\n\to.write('\\tCan not detect other strains.\\n')\nres=sorted(dmr.items(),key=lambda d:d[1],reverse=True)\no.write('\\n>>Highest_Map_Strains (Could be FP):\\n')\nfinal={}\nfor r in res:\n\tif r[1]==res[0][1]:\n\t\tif r[0] not in all_s:\n\t\t\tfinal[r[0]]=dmap_rate[r[0]]\nif not len(final)==0:\n\tres=sorted(final.items(),key=lambda d:d[1],reverse=True)\n\tfor s in res:\n\t\tif s[0] in s2sub:\n\t\t\to.write('\\t\\t'+s[0]+'\\t'+s2cls[s[0]]+'\\t'+s2sub[s[0]]+'\\t'+str(dmap_rate[s[0]])+'\\t'+ds_num[s[0]]+'\\t'+ds_num[s[0]]+'\\tNA\\tNA\\n')\n\t\telse:\n\t\t\to.write('\\t\\t'+s[0]+'\\t'+s2cls[s[0]]+'\\tNot_record\\t'+str(dmap_rate[s[0]])+'\\t'+ds_num[s[0]]+'\\t'+ds_num[s[0]]+'\\tNA\\tNA\\n')\no.write('>>Top10_Score_Strains:\\n')\nfor t in top10_score_s:\n\tif t[0] in s2sub:\n\t\to.write('\\t\\t'+t[0]+'\\t'+s2cls[t[0]]+'\\t'+s2sub[t[0]]+'\\t'+str(t[1])+'\\t'+ds_num[t[0]]+'\\t'+ds_num[t[0]]+'\\tNA\\tNA\\n')\n\telse:\n\t\to.write('\\t\\t'+t[0]+'\\t'+s2cls[t[0]]+'\\tNot_record\\t'+str(t[1])+'\\t'+ds_num[t[0]]+'\\t'+ds_num[t[0]]+'\\tNA\\tNA\\n')\n## Remove tem file \nos.system('rm Tem_Vs* Tem_VS*')\n## From this line, we will generate strain-level analysis report\nprint('Txt report is done. Now will generate pdf report!')\n\nvs_so=vs_so[:5]\nfor s in ds_freq:\n\tcheck=0\n\tif s in vs_sd:\n\t\tcheck=1\n\tif s in vs_so:\n\t\tcheck=2\n\tif check==0:continue\n\ti=0\n\tfor c in ds_freq[s]:\n\t\tif c==0:\n\t\t\ti+=1\n\t\t\tcontinue\n\t\tcurrent_ps=pos_snp[i]\n\t\tcolumn=int(re.split('-',current_ps)[0])\n\t\tif column not in dscf[s]:\n\t\t\tdscf[s][column]=c\n\t\t\n\t\ti+=1\n\ti2=0\n\tfor c in ds_pos[s]:\n\t\tif c==0:\n\t\t\ti2+=1\n\t\t\tcontinue\n\t\tcurrent_ps=pos_snp[i2]\n\t\tcolumn=int(re.split('-',current_ps)[0])\n\t\tif column not in dscl[s]:\n\t\t\tdscl[s][column]=0\n\t\tif c==1:\n\t\t\tdscl[s][column]=pos_label[current_ps]\n\t\ti2+=1\n\nov1=open('Mps_ps_depth.csv','w+')\nov2=open('Ops_ps_depth.csv','w+')\nov1.write('ID,Column_ID')\n#carr=[]\nfor s in vs_sd:\n\tov1.write(','+s+'_Freq')\n\tov1.write(','+s+'_LNum')\nov1.write('\\n')\ncarr=sorted(list(dscf[vs_sd[0]].keys()))\ni=1\nfor c in carr:\n\tov1.write(str(i)+','+str(c))\n\tfor s in vs_sd:\n\t\tov1.write(','+str(dscf[s][c])+','+str(dscl[s][c]))\n\tov1.write('\\n')\n\ti+=1\n\t\n\t\n\nov2.write('ID,Column_ID')\nif len(vs_so)==0:\n\tov2.write(',None,None\\n')\nelse:\n\tfor s in vs_so:\n\t\tov2.write(','+s+'_Freq')\n\t\tov2.write(','+s+'_LNum')\n\tov2.write('\\n')\n\ti=1\n\tfor c in carr:\n\t\tov2.write(str(i)+','+str(c))\n\t\tfor s in vs_so:\n\t\t\tov2.write(','+str(dscf[s][c])+','+str(dscl[s][c]))\n\t\tov2.write('\\n')\n\t\ti+=1\n","sub_path":"bin/S3_Strain_pred_My_Method_V0819_Val_beifen.py","file_name":"S3_Strain_pred_My_Method_V0819_Val_beifen.py","file_ext":"py","file_size_in_byte":20525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"97940430","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 20 11:30:09 2019\n\n@author: rahul\n\"\"\"\n#%%\nimport cv2\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense,Dropout,Activation,Flatten\nfrom keras.layers.convolutional import Convolution2D,MaxPooling2D\nfrom keras.optimizers import SGD,RMSprop,adam\nfrom sklearn.metrics import classification_report,confusion_matrix\nfrom keras.models import model_from_json\n#%%\ndata_path = ''\n\ndata_dir_list = os.listdir(data_path)\n\nimg_rows = 200\nimg_cols = 200\nmax_channel = 3\nmax_epoch = 5\n\nmax_classes = 2\n\nimg_data_list = []\n#%%\nfor i in range(len(data_dir_list)):\n img_name = 'frame'+str(i+1)+\".jpg\"\n img_path = data_path + \"/\" +img_name\n print('Loaded the images of dataset='+'{}\\n'.format(i))\n input_img = cv2.imread(img_path)\n input_img_resize = cv2.resize(input_img,(200,200))\n img_data_list.append(input_img_resize)\n#%% \nimg_data = np.array(img_data_list)\nimg_data = img_data.astype('float32')\nimg_data/=255\nprint(img_data.shape)\n#(100, 200, 200, 3) - 100 =size,\n#%%\nnum_classes = 2\n\nnum_of_supplies = img_data.shape[0]\nlabels = np.ones((num_of_supplies,),dtype = 'int64')\nlabels[0:2000] = 0\nlabels[2000:] = 1\nnames = ['penguin','flamingo']\n#%%\nY = np_utils.to_categorical(labels,num_classes)\nx,y = shuffle(img_data, Y , random_state = 2)\n\nX_train,X_test,y_train,y_test = train_test_split(x,y,test_size = 0.3,random_state = 0)\n\ninput_shape = img_data[0].shape\n\nmodel = Sequential()\nmodel.add(Convolution2D(16,(2,2),strides = (2,2),padding = 'same',data_format = \"channels_last\",activation = 'relu',input_shape = input_shape))\nmodel.add(MaxPooling2D(pool_size=(2,2)))\n\nmodel.add(Convolution2D(16,(2,2),padding = 'same',activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size = (2,2)))\n\nmodel.add(Convolution2D(16,(2,2),padding = 'same',activation = 'relu'))\nmodel.add(MaxPooling2D(pool_size = (2,2)))\n\nmodel.add(Flatten())\n\nmodel.add(Dense(16,activation = 'relu'))\nmodel.add(Dropout(0.25))\n\nmodel.add(Dense(2,activation = 'sigmoid'))\nmodel.compile(optimizer = 'rmsprop',loss = 'binary_crossentropy', metrics = ['accuracy'])\n\nhist = model.fit(X_train,y_train,batch_size = 64,nb_epoch = 5,verbose = 1,validation_data =( X_test,y_test) )\n#%%\nmodel.summary()\nmodel.get_config()\nmodel.layers[0].get_config()\nmodel.layers[0].input_shape\nmodel.layers[0].output_shape\nmodel.layers[0].get_weights()\nnp.shape(model.layers[0].get_weights()[0])\nmodel.layers[0].trainable\n\ntrain_loss = hist.history['loss']\nval_loss = hist.history['val_loss']\ntrain_acc = hist.history['acc']\nval_acc = hist.history['val_acc']\nxc = range(5)\n\nplt.figure(1,figsize = (7,5))\nplt.plot(xc,train_loss)\nplt.plot(xc,val_loss)\nplt.xlabel(10)\nplt.ylabel('loss')\nplt.title('train_loss vs val_loss')\nplt.grid(True)\nplt.legend(['train','val'])\nprint(plt.style.available)\nplt.style.use(['classic'])\n\nplt.figure(2,figsize = (7,5))\nplt.plot(xc,train_acc)\nplt.plot(xc,val_acc)\nplt.xlabel(10)\nplt.ylabel('accuracy')\nplt.title('train_acc vs val_acc')\nplt.grid(True)\nplt.legend(['train_acc','val_acc'])\nprint(plt.style.available)\nplt.style.use(['classic'])\n\n'''\ny_pred = model.predict_classes(X_test)\nprint(y_pred)\n#cm = confusion_matrix(y_test,y_pred)\ncm = confusion_matrix()'''\n#%%\nmodel_json = model.to_json()\nwith open(\"model.json\",\"w\") as json_file:\n json_file.write(model_json)\n \nmodel.save_weights(\"model.h5\")\nprint(\"Save model to disk\")\n","sub_path":"cnn_image_processing.py","file_name":"cnn_image_processing.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"583277544","text":"import keras\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nimport numpy as np\nimport pandas as pd\n\nbatch_size = 128\nnum_classes = 10\nepochs = 3\n\nimg_rows, img_cols = 28, 28\n\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\nx_train = x_train.reshape(60000,28,28,1)\nx_test = x_test.reshape(10000,28,28,1)\n\nprint('x_train shape:', x_train.shape)\nprint(x_train.shape[0], 'train samples')\nprint(x_test.shape[0], 'test samples')\n\ny_train = keras.utils.to_categorical(y_train, num_classes)\ny_test = keras.utils.to_categorical(y_test, num_classes)\n\nmodel = Sequential()\nmodel.add(Conv2D(32, kernel_size=(3, 3),\n activation='relu',\n input_shape=(28,28,1)))\nmodel.add(Conv2D(64, (3, 3), activation='relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\nmodel.add(Dropout(0.25))\nmodel.add(Flatten())\nmodel.add(Dense(128, activation='relu'))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(num_classes, activation='softmax'))\n\nmodel.compile(loss=keras.losses.categorical_crossentropy,\n optimizer=keras.optimizers.Adadelta(),\n metrics=['accuracy'])\n\nmodel.fit(x_train, y_train,\n batch_size=batch_size,\n epochs=epochs,\n verbose=1,\n validation_data=(x_test, y_test))\n# score = model.evaluate(x_test, y_test, verbose=0)\n# print('Test loss:', score[0])\n# print('Test accuracy:', score[1])\n\nX_test = pd.read_csv('../data/test.csv')\nX_test = X_test.values.reshape(X_test.shape[0],28,28,1)\ny_pred = model.predict_classes(X_test, verbose=1)\n\ndef write_preds(y_pred, fname):\n pd.DataFrame({\"ImageId\":list(range(1,len(y_pred)+1)),\"Label\":y_pred}).to_csv(fname,index=False,header=True)\n\nwrite_preds(y_pred, \"keras-cnn-json.csv\")\n\n#Save the model\n# serialize model to JSON\nmodel_digit_json = model.to_json()\nwith open(\"model_digit_rec.json\", \"w\") as json_file:\n json_file.write(model_digit_json)\n# serialize weights to HDF5\nmodel.save_weights(\"model_digit_rec.h5\")\nprint(\"Saved model to disk\")\n","sub_path":"CNN-two-conv/recognition.py","file_name":"recognition.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"15117682","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom PyQt4.QtGui import *\nfrom PyQt4.QtCore import *\n\nfrom GuiAdherent import *\nfrom GuiJeu import *\nfrom GuiReservation import *\nfrom GuiExtension import *\nfrom GuiEmprunt import *\n\nfrom GuiCreationUtilisateur import *\nfrom GuiCreationJeu import *\n\n\nimport sys\n\nsys.path.append(r'./lib')\nfrom EnsAdherents import *\n\n\n\n\n\nclass GuiMain(QMainWindow):\n\n def __init__(self,user):\n super(GuiMain, self).__init__()\n self.user = user\n adh = AfficherAdherent(self.user)\n self.status = adh[8]\n self.initUI()\n\n def reinit(self):\n reply = QMessageBox.question(self, 'Confirmation', 'Etes vous sur de vouloir remettre toutes les cotisations à zéro ?', QMessageBox.Yes, QMessageBox.No)\n\n if reply == QMessageBox.Yes:\n ReinitCotisations()\n self.widgetAdherent()\n QMessageBox.information(self,self.trUtf8(\"Succes\"),self.trUtf8(\"Réinitialisation réussie !\"))\n\n\n def widgetMonCompte(self):\n moncompte = GuiModificationUtilisateur(table = None,username = self.user,status = self.status)\n self.setCentralWidget(moncompte)\n\n def widgetEmprunt(self):\n Emprunt = GuiEmprunt()\n self.setCentralWidget(Emprunt)\n\n def widgetAjoutAdherent(self):\n\n self.fen = GuiCreationUtilisateur()\n self.fen.show()\n\n def widgetAjoutJeu(self):\n\n self.fen = GuiCreationJeu()\n self.fen.show()\n\n def widgetAdherent(self):\n\n Adherent = GuiAdherent(self.status)\n self.setCentralWidget(Adherent)\n\n def widgetExtension(self):\n Extension = GuiExtension()\n self.setCentralWidget(Extension)\n\n def widgetJeux(self):\n\n # Création du widget\n Jeu = GuiJeu(self.user,self.status)\n self.setCentralWidget(Jeu)\n\n def widgetReserv(self):\n\n # Création du widget\n Reservation = GuiReservation()\n self.setCentralWidget(Reservation)\n\n def center(self):\n qr = self.frameGeometry()\n cp = QDesktopWidget().availableGeometry().center()\n qr.moveCenter(cp)\n self.move(qr.topLeft())\n\n\n def initUI(self):\n\n\n zoneCentrale = QWidget()\n self.center()\n reinitCotisationsAction = QAction(QIcon('exit24.png'), 'Reinitialiser cotisations', self)\n reinitCotisationsAction.setShortcut('Ctrl+W')\n reinitCotisationsAction.setStatusTip('Reinitialiser les cotisations')\n reinitCotisationsAction.triggered.connect(self.reinit)\n\n exitAction = QAction(QIcon('exit24.png'), 'Quitter', self)\n exitAction.setShortcut('Ctrl+Q')\n exitAction.setStatusTip('Quitter l\\'application')\n exitAction.triggered.connect(self.close)\n\n toggleEmprunt = QAction(QIcon('exit24.png'), 'Liste des emprunts', self)\n toggleEmprunt.setShortcut('Ctrl+T')\n toggleEmprunt.setStatusTip('Montrer la liste des emprunts')\n toggleEmprunt.triggered.connect(self.widgetEmprunt)\n\n toggleAdherent = QAction(QIcon('exit24.png'), 'Liste des Adherents', self)\n toggleAdherent.setShortcut('Ctrl+A')\n toggleAdherent.setStatusTip('Montrer la liste des Adhérents')\n toggleAdherent.triggered.connect(self.widgetAdherent)\n\n toggleCreationAdherent = QAction(QIcon('exit24.png'), 'Ajouter un adherent', self)\n toggleCreationAdherent.setStatusTip('Formulaire d\\ajout d\\'adhérent')\n toggleCreationAdherent.triggered.connect(self.widgetAjoutAdherent)\n\n\n toggleCreationJeu = QAction(QIcon('exit24.png'), 'Ajouter un jeu', self)\n toggleCreationJeu.setStatusTip('Formulaire d\\ajout d\\'un jeu')\n toggleCreationJeu.triggered.connect(self.widgetAjoutJeu)\n\n toggleJeu = QAction(QIcon('jeu.png'), 'Liste des Jeux', self)\n toggleJeu.setShortcut('Ctrl+J')\n toggleJeu.setStatusTip('Montrer la liste des Jeux')\n toggleJeu.triggered.connect(self.widgetJeux)\n\n toggleReserv = QAction(QIcon('exit24.png'), 'Liste des Reservations', self)\n toggleReserv.setShortcut('Ctrl+R')\n toggleReserv.setStatusTip('Montrer la liste des Reservations')\n toggleReserv.triggered.connect(self.widgetReserv)\n\n toggleExtension = QAction(QIcon('exit24.png'), 'Liste des Extensions', self)\n toggleExtension.setShortcut('Ctrl+E')\n toggleExtension.setStatusTip('Montrer la liste des Extensions')\n toggleExtension.triggered.connect(self.widgetExtension)\n\n toggleMoncompte = QAction(QIcon('user1.png'), 'Mon Compte', self)\n toggleMoncompte.setShortcut('Ctrl+C')\n toggleMoncompte.setStatusTip('Aperçu de mes informations')\n toggleMoncompte.triggered.connect(self.widgetMonCompte)\n\n if self.status == 1:\n self.widgetJeux()\n\n menubar = self.menuBar()\n\n mainMenu = menubar.addMenu('&Ludotech')\n mainMenu.addAction(exitAction)\n\n adherentMenu = menubar.addMenu('&Adherents')\n adherentMenu.addAction(toggleAdherent)\n adherentMenu.addAction(toggleCreationAdherent)\n adherentMenu.addAction(reinitCotisationsAction)\n\n jeuMenu = menubar.addMenu('&Jeux')\n jeuMenu.addAction(toggleJeu)\n jeuMenu.addAction(toggleExtension)\n\n jeuMenu.addAction(toggleCreationJeu)\n\n reservMenu = menubar.addMenu('&Reservations')\n reservMenu.addAction(toggleReserv)\n\n empruntsMenu = menubar.addMenu('&Emprunts')\n empruntsMenu.addAction(toggleEmprunt)\n self.setWindowTitle('Gestion de Ludotech')\n\n else:\n self.widgetMonCompte()\n self.toolbar = self.addToolBar('Quitter')\n self.toolbar.addAction(exitAction)\n self.toolbar.addAction(toggleMoncompte)\n self.toolbar.addAction(toggleJeu)\n self.setWindowTitle('Espace personnel de Ludotech')\n\n\n self.setMinimumSize(1100,600)\n self.setGeometry(300, 300, 300, 300)\n self.show()\n","sub_path":"src/gui/GuiMain.py","file_name":"GuiMain.py","file_ext":"py","file_size_in_byte":5996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"207225115","text":"# TLE RECURSIVE SOLUTION\nfrom collections import deque\nfrom math import *\nimport sys\nsys.setrecursionlimit(10**6)\nn, q = map(int, sys.stdin.readline().split())\n\nd = (int)(log(n, 2))+1\n\ndp = [[0]*(n+1) for _ in range(d)] # for binary lifting\ndepth = [-1]*(n+1) # depth of all nodes\nparent = [0]*(n+1) # to store parent of each node\n\n# building actual tree\ntree = [[] for _ in range(n+1)]\nfor _ in range(n-1):\n a, b = map(int, input().split())\n tree[a].append(b)\n tree[b].append(a)\n\n\n# initialize the dp table\ndef iniDFS(node):\n stack = deque()\n stack.appendleft(1)\n while len(stack) > 0:\n node = stack.pop()\n for child in tree[node]:\n if dp[0][child] == 0:\n dp[0][child] = node\n parent[child] = node\n stack.appendleft(child)\n\n\niniDFS(1)\n\n# filling binary lifting table\nfor j in range(1, d):\n for i in range(2, n+1):\n dp[j][i] = dp[j-1][dp[j-1][i]]\n\n\ndef depthBFS(node, d):\n q = deque()\n q.appendleft(1)\n while len(q) > 0:\n for _ in range(len(q)):\n node = q.pop()\n depth[node] = d\n for child in tree[node]:\n if depth[child] == -1:\n q.appendleft(child)\n d += 1\n\n\ndef kthAncestor(i, k):\n b = 0\n while k > 0:\n if k & 1:\n i = dp[b][i]\n k >>= 1\n b += 1\n if i == 0:\n i = -1\n return i\n\n\ndef LCA(u, v):\n if depth[v] > depth[u]: # u should be lower node\n u, v = v, u\n u = kthAncestor(u, depth[u]-depth[v]) # making u and v at same level\n\n if u == v: # if v is already ancestor of u\n return u\n # move both nodes up in bigger steps one by one\n # until their ancesor are not same\n for j in range(d-1, -1, -1):\n if dp[j][u] != dp[j][v]:\n u = dp[j][u]\n v = dp[j][v]\n return dp[0][u]\n\n\ndepthBFS(1, 0)\nprefix = [0]*(n+1) # to store prefix sums of tree\nparent[1] = 0\nfor _ in range(q):\n u, v = map(int, sys.stdin.readline().split())\n lca = LCA(u, v)\n plca = parent[lca]\n prefix[u] += 1\n prefix[v] += 1\n prefix[lca] -= 1\n prefix[plca] -= 1\n\n# running final summation\ndef prefixSum(node, parent):\n for child in tree[node]:\n if child != parent:\n prefix[node] += prefixSum(child, node)\n return prefix[node]\n\n\nprefixSum(1, 1)\nprint(*prefix[1:])\n","sub_path":"Tree Algorithms/Counting Paths/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"287102822","text":"from sermons.models import Sermon, SermonSession\nfrom useraccounts.models import UserAccount\n\n\ndef get_or_create_session(sermon_id, user_id):\n try:\n sermon = Sermon.objects.get(pk=sermon_id)\n except Sermon.DoesNotExist:\n return None\n\n try:\n user = UserAccount.objects.get(pk=user_id)\n except UserAccount.DoesNotExist:\n return None\n\n try:\n session = SermonSession.objects.get(sermon_id=sermon.id, user_id=user.id)\n return session\n except SermonSession.DoesNotExist:\n session = SermonSession(sermon=sermon, user=user, position=0)\n session.save()\n return session\n\n return None\n\n\ndef get_completed_user_sessions_for_sermons(user_id, sermon_ids):\n \"\"\"\n Get all the completed sessions for a give list of sermon IDs.\n :param user_id: ID of the user in question\n :param sermon_ids: A list of sermon IDs\n :return: A list of sessions\n \"\"\"\n sessions = SermonSession.objects.filter(\n user_id=user_id, sermon_id__in=sermon_ids, completed=True\n )\n\n session_dict = {}\n for session in sessions:\n if session.sermon_id not in session_dict:\n session_dict[session.sermon_id] = session\n\n return session_dict\n","sub_path":"sermons/queries.py","file_name":"queries.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"472165505","text":"import tornado.web\nimport tornado.tcpserver\nimport tornado.ioloop\nfrom tornado.options import define, options\nfrom sysinfo import sysInfo\nimport json\ndefine('port', default=8080, type=int, help='Http Server bind port')\ndefine('host', default='0.0.0.0', type=str, help='Http Server bind host')\nclass index(tornado.web.RequestHandler):\n def get(self):\n sys_info = sysInfo()\n name = self.get_argument('name', 'all')\n if name in sys_info.all:\n self.write(sys_info.all[name])\n else:\n self.write('http://example/?name=[name]
name in {0}'.format(list(sys_info.all.keys())))\n\napp = tornado.web.Application([(r'/', index)])\nserver = tornado.httpserver.HTTPServer(app)\nserver.listen(address=options.host, port=options.port)\ntornado.ioloop.IOLoop.current().start()\n","sub_path":"http_sysinfo.py","file_name":"http_sysinfo.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"141855514","text":"\n# coding: utf-8\n\n# # Trained ResNet-50 classification \n# \n# - new data generators\n\n# In[2]:\n\nimport os\nfrom datetime import datetime\nimport numpy as np\n\n\n# In[3]:\n\n# Project\nimport sys\nsys.path.append(os.path.join(os.path.abspath(os.path.dirname('.')), '..', 'common'))\n\nfrom resnet_keras122 import get_resnet50\nfrom training_utils import get_trainval_id_type_lists\n\nprint(\"\\n {} - Get train/val lists ...\".format(datetime.now()))\ntrain_id_type_list, val_id_type_list = get_trainval_id_type_lists()\n\nprint(\"\\n {} - Get ResNet-50 model ...\".format(datetime.now()))\nresnet = get_resnet50(opt='adadelta')\nresnet.summary()\n\nseed = 2017\nnp.random.seed(seed)\nsave_prefix='resnet_4_dense_adadelta_seed=%i' % seed\n\nfrom glob import glob\n\nweights_files = glob(\"weights/%s*.h5\" % save_prefix)\nbest_val_loss = 1e5\nbest_weights_filename = \"\"\nfor f in weights_files:\n index = os.path.basename(f).index('-')\n loss = float(os.path.basename(f)[index+1:-4])\n if best_val_loss > loss:\n best_val_loss = loss\n best_weights_filename = f\nprint(\"Best val loss weights: \", best_weights_filename)\n\n\nif len(best_weights_filename) > 0:\n # load weights to the model\n print(\"Load found weights: \", best_weights_filename)\n resnet.load_weights(best_weights_filename)\n\nfrom xy_providers import DataCache\ncache = DataCache(2000)\n\nif True:\n from training_utils import classification_train as train\n \n nb_epochs = 50\n batch_size = 4\n \n print(\"\\n {} - Start training ...\".format(datetime.now()))\n h = train(resnet, \n train_id_type_list, \n val_id_type_list, \n nb_epochs=nb_epochs,\n lrate_decay_f=None,\n batch_size=batch_size, \n xy_provider_cache=cache,\n seed=seed,\n save_prefix=save_prefix)\n\n\nfrom training_utils import classification_validate as validate\nfrom test_utils import classification_predict as predict\n\nbatch_size = 4\n\n\nprint(\"\\n {} - Start validation ...\".format(datetime.now()))\nvalidate(resnet, val_id_type_list, batch_size=batch_size, xy_provider_cache=cache)\n\n\nprint(\"\\n {} - Start predictions and write submission ...\".format(datetime.now()))\nfrom test_utils import get_test_id_type_list\ntest_id_type_list = get_test_id_type_list()\npredict(resnet, test_id_type_list, info=save_prefix, batch_size=batch_size)\n\n\n","sub_path":"scripts/resnet_11_classification_with_keras.py","file_name":"resnet_11_classification_with_keras.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"341861051","text":"from tclCommands.TclCommand import TclCommandSignaled\n\nimport collections\n\nimport gettext\nimport appTranslation as fcTranslate\nimport builtins\n\nfcTranslate.apply_language('strings')\nif '_' not in builtins.__dict__:\n _ = gettext.gettext\n\n\nclass TclCommandSubtractRectangle(TclCommandSignaled):\n \"\"\"\n Tcl shell command to subtract a rectangle from the given Geometry object.\n \"\"\"\n\n # array of all command aliases, to be able use old names for backward compatibility (add_poly, add_polygon)\n aliases = ['subtract_rectangle']\n\n description = '%s %s' % (\"--\", \"Subtract a rectangle from the given Geometry object. \"\n \"The coordinates are provided in X Y pairs.\")\n\n # Dictionary of types from Tcl command, needs to be ordered.\n # For positional arguments\n arg_names = collections.OrderedDict([\n ('name', str)\n ])\n\n # Dictionary of types from Tcl command, needs to be ordered.\n # For options like -optionname value\n option_types = collections.OrderedDict([\n\n ])\n\n # array of mandatory options for current Tcl command: required = {'name','outname'}\n required = ['name']\n\n # structured help for current command, args needs to be ordered\n help = {\n 'main': \"Subtract a rectangle from the given Geometry object. The coordinates are provided in X Y pairs.\\n\"\n \"If the number of coordinates is not even then the 'Incomplete coordinates' error is raised.\",\n 'args': collections.OrderedDict([\n ('name', 'Name of the Geometry object from which to subtract. Required.'),\n ('x0 y0', 'Bottom left corner coordinates.'),\n ('x1 y1', 'Top right corner coordinates.')\n ]),\n 'examples': ['subtract_rectangle geo_obj 8 8 15 15']\n }\n\n def execute(self, args, unnamed_args):\n \"\"\"\n execute current TCL shell command\n\n :param args: array of known named arguments and options\n :param unnamed_args: array of other values which were passed into command\n without -somename and we do not have them in known arg_names\n :return: None or exception\n \"\"\"\n if 'name' not in args:\n self.raise_tcl_error(\"%s:\" % _(\"No Geometry name in args. Provide a name and try again.\"))\n return 'fail'\n obj_name = args['name']\n\n if len(unnamed_args) != 4:\n self.raise_tcl_error(\"Incomplete coordinates. There are 4 required: x0 y0 x1 y1.\")\n return 'fail'\n\n x0 = float(unnamed_args[0])\n y0 = float(unnamed_args[1])\n x1 = float(unnamed_args[2])\n y1 = float(unnamed_args[3])\n\n try:\n obj = self.app.collection.get_by_name(str(obj_name))\n except Exception:\n return \"Could not retrieve object: %s\" % obj_name\n\n if obj is None:\n return \"Object not found: %s\" % obj_name\n\n obj.subtract_polygon([(x0, y0), (x1, y0), (x1, y1), (x0, y1)])\n","sub_path":"HSRWLaserTool_APP/FlatCAM_beta_8.994_sources/tclCommands/TclCommandSubtractRectangle.py","file_name":"TclCommandSubtractRectangle.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"181322892","text":"from typing import Iterable, List, Reversible, Tuple\r\nimport textwrap\r\n\r\nimport tcod\r\n\r\nimport color\r\n\r\n\r\nclass Message:\r\n def __init__(self, text: str, fg: Tuple[int, int, int]):\r\n self.plain_text = text #текст сообщения\r\n self.fg = fg #цвет \r\n self.count = 1 #нужно, если одно сообщение повторяется несколько раз\r\n\r\n @property\r\n def full_text(self) -> str:\r\n \"\"\"Выводит полный текст сообщения, включая количество, если это необходимо.\"\"\"\r\n if self.count > 1:\r\n return f\"{self.plain_text} (x{self.count})\"\r\n return self.plain_text\r\n\r\n#хранит список сообщений\r\nclass MessageLog:\r\n def __init__(self) -> None:\r\n self.messages: List[Message] = []\r\n\r\n def add_message(\r\n self, text: str, fg: Tuple[int, int, int] = color.white, *, stack: bool = True,\r\n ) -> None:\r\n \"\"\"обавляет сообщение в журнал.\r\n Если `stack` - True, тогда сообщение может складываться с предыдущим сообщением\r\n с таким же текстом.\r\n \"\"\"\r\n if stack and self.messages and text == self.messages[-1].plain_text:\r\n self.messages[-1].count += 1\r\n else:\r\n self.messages.append(Message(text, fg))\r\n\r\n def render(\r\n self, console: tcod.Console, x: int, y: int, width: int, height: int,\r\n ) -> None:\r\n \"\"\"Render this log over the given area.\r\n `x`, `y`, `width`, `height` is the rectangular region to render onto\r\n the `console`.\r\n \"\"\"\r\n self.render_messages(console, x, y, width, height, self.messages)\r\n\r\n @staticmethod\r\n def wrap(string: str, width: int) -> Iterable[str]:\r\n \"\"\"Return a wrapped text message.\"\"\"\r\n for line in string.splitlines(): # Управляет новыми строками в сообщении.\r\n yield from textwrap.wrap(\r\n line, width, expand_tabs=True,\r\n )\r\n\r\n @classmethod\r\n def render_messages(\r\n cls,\r\n console: tcod.Console,\r\n x: int,\r\n y: int,\r\n width: int,\r\n height: int,\r\n messages: Reversible[Message],\r\n ) -> None:\r\n \"\"\"Отображает предоставленные сообщения начиная с последнего\r\n \"\"\"\r\n y_offset = height - 1\r\n\r\n for message in reversed(messages):\r\n for line in reversed(list(cls.wrap(message.full_text, width))): #clswrap.wrap делит текст на части для красивого отображения на экране \r\n console.print(x=x, y=y + y_offset, string=line, fg=message.fg)\r\n y_offset -= 1\r\n if y_offset < 0:\r\n return # Больше нет места для сообщений.\r\n","sub_path":"message_log.py","file_name":"message_log.py","file_ext":"py","file_size_in_byte":3011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"648485914","text":"from .transducer import Transducer\n\nDEFAULT_BLANK_FOREST = \"\\t\"\n\nclass ForestTrans(Transducer):\n\t\"\"\" Daughter class for 'forest' \"\"\"\n\n\tdef toUnsat(tree, labels):\n\t\t\n\t\tdef toUnsatRec(tree, idx, space):\n\t\t\tif not tree.children[idx]:\n\t\t\t\treturn [space*DEFAULT_BLANK_FOREST + \"[\", labels[idx], \"] \\n\"]\n\t\t\telse:\n\t\t\t\treturnStr = []\n\t\t\t\treturnStr.append(space*DEFAULT_BLANK_FOREST + \"[{\")\n\t\t\t\treturnStr.append(labels[idx])\n\t\t\t\treturnStr.append(\"} \\n\")\n\n\t\t\t\tfor c in tree.children[idx]:\n\t\t\t\t\treturnStr += toUnsatRec(tree, c, space + 1)\n\t\t\t\t\n\t\t\t\treturnStr.append(space*DEFAULT_BLANK_FOREST + \"]\\n\")\n\t\t\t\treturn returnStr\n\n\t\treturn [\"\\\\begin{forest}\\n\"] + toUnsatRec(tree, 0, 0) + [\"\\\\end{forest}\\n\"]","sub_path":"src/tree/transducer/forest.py","file_name":"forest.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"70792891","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom carsharing.models import CSUser\nfrom driver.models import Point, PointStatus\nimport requests\nimport json\n\nimport logging\nlogger = logging.getLogger('django')\n\ndef car5_start_order(order_id, cs_id):\n logger.info('car5_start_order')\n r = requests.post(\n \"https://car5.ru/car5/rs/proil.hook.php?session_id=b05e7a21-7a51-40cb-b84a-a56c073b0693\",\n data = {\n 'status': 'processing',\n 'order_id': order_id,\n }\n )\n if r.status_code != 200:\n logger.error(r.status_code)\n raise ValueError('Request failed')\n\n data = json.loads(r.content)\n if int(data[0].get('errcode')):\n raise ValueError(data)\n point = Point.objects.get(carsharing_id=cs_id, external_id=order_id)\n point.status=PointStatus.objects.get(name='processing')\n point.save()\n return True\n \n\n\ndef car5_cancel_order(order_id, cs_id, not_available, message='uncomfortably'):\n logger.info('car5_cancel_order')\n r = requests.post(\n \"https://car5.ru/car5/rs/proil.hook.php?session_id=b05e7a21-7a51-40cb-b84a-a56c073b0693\",\n data = {\n 'message': message,\n 'order_id': order_id,\n }\n )\n if r.status_code != 200:\n logger.error(r.status_code)\n raise ValueError('Request failed')\n logger.info({\n 'message': message,\n 'order_id': order_id\n })\n try:\n data = json.loads(r.content)\n if int(data[0].get('errcode')):\n raise ValueError(data)\n try:\n if not_available:\n logger.info('Make order not_available with status processing')\n Point.objects.filter(carsharing_id=cs_id, external_id=order_id, status=PointStatus.objects.get(name='processing')).update(status=PointStatus.objects.get(name='not_available'))\n else:\n logger.info('Delete point with only status processing')\n Point.objects.get(carsharing_id=cs_id, external_id=order_id, status=PointStatus.objects.get(name='processing')).delete()\n except Exception as e:\n logger.error(e)\n raise ValueError(str(e))\n except Exception as e:\n logger.error(e)\n raise ValueError(str(e))\n\n return True\n\ndef car5_close_order(order_id, fuel_price, fuel_volume, cost, photos, cs_id):\n logger.info('car5_close_order')\n r = requests.post(\n \"https://car5.ru/car5/rs/proil.hook.php?session_id=b05e7a21-7a51-40cb-b84a-a56c073b0693\",\n data = {\n 'order_id': order_id,\n 'fuel_price': fuel_price,\n 'fuel_volume': fuel_volume,\n 'cost': cost,\n 'photos': \",\".join(photos)\n }\n )\n if r.status_code != 200:\n return False\n\n try:\n data = json.loads(r.content)\n if int(data[0].get('errcode')):\n return True\n except Exception as e:\n logger.error(e)\n\n return False\n\ndef car5_car_action(order_id, action):\n logger.info('car5_car_action {} {}'.format(order_id, action))\n r = requests.post(\n \"https://car5.ru/car5/rs/proil.hook.php?session_id=b05e7a21-7a51-40cb-b84a-a56c073b0693\",\n data = {\n 'order_id': order_id,\n 'cmd': action,\n }\n )\n if r.status_code != 200:\n return False\n\n try:\n data = json.loads(r.content)\n if int(data[0].get('errcode')):\n return True\n except Exception as e:\n logger.error(e)\n\n return False\n","sub_path":"carsharing_api/car5.py","file_name":"car5.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"109998378","text":"from django.conf.urls import patterns, url\n\nfrom . import api as api\n\n\nurlpatterns = patterns('',\n url(\n regex=r'api/obstacles/(?P[0-9]+)/$',\n view=api.ObstacleDetailView.as_view(),\n name='api_obstacle_detail',\n ),\n\n url(\n regex=r'api/obstacles/$',\n view=api.ObstaclesView.as_view(),\n name='api_obstacles_index',\n ),\n\n url(\n regex=r'api/spots/(?P[0-9]+)/$',\n view=api.SpotDetailView.as_view(),\n name='api_spots_spot_detail',\n ),\n\n url(\n regex=r'api/spots/$',\n view=api.SpotsView.as_view(),\n name='api_spots_index',\n ),\n)\n","sub_path":"sherpa/apps/spots/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"455159305","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/3/18 17:55\n# @Author : Tom_tao\n# @Site : \n# @File : interval_task.py\n# @Software: PyCharm\n\nfrom selenium import webdriver\n\n# chromedriver得绝对路径\ndriver_path = r\"D:\\chromedriver\\chromedriver.exe\"\n\nbrowser = webdriver.Chrome(executable_path=driver_path)\n\nbrowser.get('https://www.baidu.com/')\n\nprint((browser.page_source).encode('utf-8', 'ignore'))","sub_path":"ajax_spider_demo/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"518338077","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nimport cgi,os\nprint (\"Content-type:text/html\\r\\n\\r\\n\")\nprint ('')\nprint ('')\nprint ('Hello Word - First CGI Program')\nprint(\"\"\"\n\"\"\")\nprint ('')\nprint ('')\n\nprint(\"\"\"
\n
\n \n \n \n
\n
\"\"\")\npost=cgi.FieldStorage()\nfile_dir = post[\"dirField\"].value\nresult = post[\"fileField\"]\nfile_name = os.path.basename(result.filename)\nfile = result.file.read()\nprint(file_dir)\n\nif not os.path.isdir(file_dir):\n try:\n os.makedirs(file_dir)\n except:\n print(\"文件路径错误\") \n\npath=os.path.join(file_dir,file_name)\n\nif os.path.exists(path):#覆盖文件\n try:\n os.remove(path)\n #file.save(path)\n with open(path,\"wb\") as target:\n target.write(file)\n except:\n print(\"目标为文件夹\")\nelse:\n with open(path,\"wb\") as target:\n target.write(file)\n #file.save(path)\n\n\nprint ('')\nprint ('')\n","sub_path":"cgi-bin/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"570619088","text":"import sys\n\ninput_file = sys.argv[1]\nprint(f'अजगर के साथ {input_file} चलाया जा राहा है')\n\neng_hindi_mapping = {\n\t'pass': 'जाने दो',\n\t'elif': 'नहीं अगर तो',\n\t'if': 'अगर',\n\t'else': 'नहीं तो',\n\t'or': 'या',\n\t'not': 'नहीं', \n\t'and': 'और',\n\t'in': 'अंदर',\n\t'del': 'हटा',\n\t'True': 'सच',\n\t'False': 'झूट',\n\t'None': 'अशक्त',\n\t'break': 'रुक जाओ',\n\t'def': 'परिभाषा',\n\t'return': 'अर्थ',\n\t'class': 'श्रेणी',\n\t'print': 'प्रिंट',\n\t'set': 'संग्रह',\n\t'pop': 'निकालो',\n\t'add': 'डालो',\n\t'extend': 'विस्तार',\n\t'while': 'जब तक',\n\t'hash': 'हैश',\n\t'__init__': '__स्थापना__',\n\t'self': 'स्वयं'\n}\n\n\nhindi_eng_mapping = {hindi: eng for eng, hindi in eng_hindi_mapping.items()}\n\n\ndef execute_hindi_file(input_file):\n\twith open(input_file, 'r') as f:\n\t\tfile_data = f.read()\n\n\tfor hin, eng in hindi_eng_mapping.items():\n\t\tfile_data = file_data.replace(hin, eng)\n\n\texec(file_data)\n\ndef convert_to_hindi_file(input_file):\n\twith open(input_file, 'r') as f:\n\t\tfile_data = f.read()\n\tfor eng, hin in hindi_eng_mapping.items():\n\t\tfile_data = file_data.replace(eng, hin)\n\touptut_file = input_file[:-2] + '_hindi.py'\n\twith open(ouptut_file, 'w') as f:\n\t\tf.write(file_data)\n\nif __name__ == \"__main__\":\n\texecute_hindi_file(input_file)\n","sub_path":"ajagar.py","file_name":"ajagar.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"137197418","text":"from oscar.core.loading import get_model\nfrom oscar.apps.dashboard.catalogue.views import (\n ProductListView as BaseProductListView\n)\n\nCategory = get_model('catalogue', 'Category')\n\n\nclass ProductListView(BaseProductListView):\n def apply_search(self, queryset):\n queryset = super().apply_search(queryset)\n\n data = self.form.cleaned_data\n\n if data.get('product_class'):\n queryset = queryset.filter(product_class=data['product_class'])\n\n if data.get('shipping_class'):\n queryset = queryset.filter(shipping_class=data['shipping_class'])\n\n if data.get('category'):\n categories = Category.get_tree(data['category'])\n queryset = queryset.filter(categories__in=categories)\n\n return queryset;\n","sub_path":"server/dashboard/catalogue/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"51242151","text":"import xml.etree.ElementTree as ET\nfrom redisearch import Client, AutoCompleter, Suggestion\n\nclient = Client('category', 'redis', 6379)\nac = AutoCompleter('ac_cat', conn=client.redis)\n\ndef main():\n with open('data/CategoriesList.xml') as xml_file:\n # create element tree object\n tree = ET.parse(xml_file)\n\n # get root element\n root = tree.getroot()\n print(\"root.tag is \", root.tag)\n cat_cntr = 0\n for child in root:\n print(\"child tag is \" + child.tag)\n print(\"child attribute is \" + str(child.attrib))\n for cat in root.findall('Response/CategoriesList/Category'):\n # print(\"starting in xml file\")\n # print(\"cat.tag is \" + str(cat.tag))\n # print(\"cat.attribute is \" + str(cat.attrib))\n cat_cntr += 1\n cat_id = cat.attrib['ID']\n cat_score = cat.attrib['Score']\n # print(\"ID is \", str(cat_id))\n for cat_child in cat:\n # print(\"cat_child is \" + str(cat_child))\n if cat_child.tag == 'Name' and cat_child.attrib['langid'] == '1':\n cat_name = cat_child.attrib['Value']\n # print(\"cat_name=\" + cat_name)\n elif cat_child.tag == 'ParentCategory' and cat_id != \"1\":\n parent_cat_id = cat_child.attrib['ID']\n # print(\"parent_cat_id is \" + parent_cat_id)\n for parent_child in cat_child:\n # print(\"parent_child is \" + str(parent_child))\n for name in parent_child:\n # print(\"name under parent child is \" + str(name))\n if name.tag == 'Name' and name.attrib['langid'] == '1':\n parent_cat_name = name.attrib['Value']\n client.add_document(\"category:\" + str(cat_id), ID=str(cat_id), lowpic=str(cat.attrib['LowPic']),\n thumbpic=str(cat.attrib['ThumbPic']),name=cat_name,\n parentcatid=parent_cat_id, parentcatname=parent_cat_name)\n ac.add_suggestions(Suggestion(cat_name, 1.0))\n if cat_cntr % 1000 == 0:\n print(str(cat_cntr) + \" categories loaded\")\n\n xml_file.close()\n print(str(cat_cntr) + \" categories loaded\")\n\n\nif '__main__' == __name__:\n main()\n","sub_path":"src/categoryImport.py","file_name":"categoryImport.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"191908116","text":"from manipulation.collision.collision_primitives import is_valid_point\nfrom manipulation.inverse_reachability.inverse_reachability import *\nfrom manipulation.primitives.placements import *\nfrom motion.single_query import *\nfrom openravepy import planning_error\nfrom primitives.contacts import get_contacts, manip_trans_from_pose_contact, approach_vector_from_pose_contact\nfrom misc.functions import irange\nfrom manipulation.primitives.utils import Pose\nfrom manipulation.primitives.transforms import normalize, length, quat_from_pose, pose_from_quat_point, \\\n quat_transform_point, point_from_pose, quat_inv\nfrom pick_and_place import query_paps\n\n# TODO - make push basis given available pushes and plan using that (ie push x, then y, etc)\n# TODO - check that the trajectory actually stays on the table\ndef linear_push_traj(oracle, object_name, initial_pose, goal_point, max_failures=LINEAR_PUSH_MAX_FAILURES):\n initial_quat = quat_from_pose(initial_pose.value)\n initial_point = point_from_pose(initial_pose.value)\n total_distance = length(goal_point.value-initial_point)\n steps = int(ceil(total_distance/PUSH_MAX_DISTANCE)+1)\n distances = np.linspace(0., total_distance, steps)\n direction = normalize(goal_point.value-initial_point)\n poses = [initial_pose] + [Pose(pose_from_quat_point(initial_quat, initial_point+d*direction)) for d in distances[1:]]\n contacts = get_contacts(oracle, object_name, quat_transform_point(quat_inv(initial_quat), direction))\n pushes = []\n for start_pose, end_pose in pairs(poses):\n failures = count()\n cycled_contacts = cycle(randomize(contacts))\n while next(failures) < max_failures:\n contact = next(cycled_contacts)\n push = Push(oracle.get_geom_hash(object_name), start_pose, end_pose, contact)\n if push.sample(oracle, object_name):\n pushes.append(push)\n break\n else:\n return None\n return pushes\n\n\"\"\"\ndef get_symmetrical_push_trajs(oracle, body_name, push_traj):\n symmetrical_push_trajs = []\n # Assuming can push in any direction, can just rotate the contacts and objects in place\n for grasp in get_grasps(oracle, body_name):\n if grasp == pap.grasp: continue\n trans = np.dot(np.linalg.inv(pap.grasp.grasp_trans), grasp.grasp_trans) # M*G = P | M = P*G^-1 | P*(G^-1*G') = P'\n if np.allclose(point_from_trans(trans), unit_point()): # NOTE - signifies only rotation of the object\n pose = Pose(pose_from_trans(np.dot(trans_from_pose(pap.pose.value), trans)))\n new_pap = copy.copy(pap)\n new_pap.pose = pose\n new_pap.grasp = grasp\n symmetrical_paps.append(new_pap)\n return symmetrical_push_trajs\n\"\"\"\n\n#################################################################\n\ndef sample_approach_config(oracle, base_trans, check_base=CHECK_BASE_REACHABLE):\n if not is_valid_point(oracle.robot, point_from_trans(base_trans)): return None\n set_trans(oracle.robot, base_trans)\n set_config(oracle.robot, oracle.default_left_arm_config, get_arm_indices(oracle))\n if robot_collision(oracle, check_self=False): return None\n approach_config = oracle.get_robot_config()\n if check_base and not oracle.grow_base_roadmap(approach_config): return None\n return approach_config\n\ndef sample_contact_config(oracle, manip_trans):\n grasp_arm_config = inverse_kinematics(oracle, manip_trans)\n if grasp_arm_config is None: return None\n set_config(oracle.robot, grasp_arm_config, get_arm_indices(oracle))\n return oracle.get_robot_config()\n\ndef sample_vector_traj(oracle, approach_vector):\n traj = vector_traj(oracle, approach_vector)\n if traj is None: return None\n set_config(oracle.robot, traj.end(), get_arm_indices(oracle))\n return traj, oracle.get_robot_config()\n\ndef sample_arm_traj(oracle, approach_config):\n #return manip_traj(oracle, approach_config.value[get_arm_indices(oracle)])\n return motion_plan(oracle.env, CSpace.robot_arm(get_manipulator(oracle.robot)), approach_config.value[get_arm_indices(oracle)])\n\nclass Push(object):\n def __init__(self, geom_hash, start_pose, end_pose, contact):\n self.geom_hash = geom_hash\n self.start_pose = start_pose\n self.end_pose = end_pose\n self.contact = contact\n self.sampled = False\n\n def sample_push(self, oracle, object_name):\n self.approach_config = sample_approach_config(oracle, self.base_trans)\n if self.approach_config is None: return False\n oracle.set_pose(object_name, self.start_pose)\n self.start_contact_config = sample_contact_config(oracle, manip_trans_from_pose_contact(self.start_pose, self.contact))\n if self.start_contact_config is None: return False\n grab(oracle, object_name)\n self.push_traj = workspace_traj(oracle, point_from_pose(self.end_pose.value)-point_from_pose(self.start_pose.value))\n release(oracle, object_name)\n if self.push_traj is None: return False\n self.end_contact_config = Config(self.start_contact_config.value.copy())\n self.end_contact_config.value[get_arm_indices(oracle)] = self.push_traj.end()\n return True\n\n def sample_setup(self, oracle, object_name, object_pose, contact_config):\n oracle.set_pose(object_name, object_pose)\n oracle.set_robot_config(contact_config)\n result = sample_vector_traj(oracle, approach_vector_from_pose_contact(object_pose, self.contact))\n if result is None: return None\n vector_traj, vector_config = result\n arm_traj = sample_arm_traj(oracle, self.approach_config)\n if arm_traj is None: return None\n return vector_config, (vector_traj, arm_traj)\n\n # TODO - seems like having trouble sampling bases. Make a distribution that is for pushing\n def sample(self, oracle, object_name, max_failures=PUSH_MAX_FAILURES):\n if self.sampled: return\n t0 = time()\n self.sampled = True\n self.successful = False\n with oracle.env:\n with oracle.robot:\n with oracle.body_saver(object_name):\n #mean_pose = pose_interpolate(self.start_pose.value, self.end_pose.value)\n base_iterator = custom_base_iterator(oracle, [(manip_trans_from_object_trans(\n trans_from_pose(self.start_pose.value), self.contact.grasp_trans), get_trans(oracle.robot), tuple())])\n for _ in irange(max_failures):\n #oracle.set_pose(object_name, None) # Probably don't need this\n try:\n self.base_trans, _ = next(base_iterator)\n except (StopIteration, ValueError, planning_error): # ValueError: low >= high because of an empty sequence\n break\n if not self.sample_push(oracle, object_name): continue\n result = self.sample_setup(oracle, object_name, self.end_pose, self.end_contact_config) # Postpush seems to be harder to sample than prepush\n if result is None: continue\n self.end_vector_config, self.end_trajs = result\n result = self.sample_setup(oracle, object_name, self.start_pose, self.start_contact_config)\n if result is None: continue\n self.start_vector_config, self.start_trajs = result\n self.start_trajs = tuple(reverse_trajectories(self.start_trajs))\n self.successful = True\n break\n self.sample_time = time() - t0\n return self.successful\n def smooth(self, oracle, object_name): # TODO\n pass\n def __repr__(self):\n return self.__class__.__name__ + str_object((self.start_pose, self.end_pose, self.contact))\n\n#################################################################\n\n# TODO - connect to nearest pose that will complete the roadmap\ndef get_push_trajs(oracle, body_name, initial_pose, goal_point, max_failures=LINEAR_PUSH_MAX_FAILURES):\n geom_hash = oracle.get_geom_hash(body_name)\n goal_pose = first(lambda pose: np.allclose(point_from_pose(pose.value), goal_point.value) and\n oracle.pushes[geom_hash](initial_pose, pose) is not None, oracle.pushes[geom_hash])\n #print goal_pose, initial_pose not in oracle.pushes[geom_hash], initial_pose\n #print oracle.pushes[geom_hash].vertices.values()\n #print filter(lambda pose: np.allclose(point_from_pose(pose.value), goal_point.value), oracle.pushes[geom_hash])\n if goal_pose is None or initial_pose not in oracle.pushes[geom_hash]: # NOTE - do not need later check if ensuring the traj is good\n with oracle.state_saver():\n oracle.set_all_object_poses({body_name: initial_pose})\n push_traj = linear_push_traj(oracle, body_name, initial_pose, goal_point, max_failures=max_failures)\n if push_traj is None: return []\n for push in push_traj:\n oracle.pushes[geom_hash].connect(push.start_pose, push.end_pose, push)\n goal_pose = push_traj[-1].end_pose\n return [oracle.pushes[geom_hash](initial_pose, goal_pose)[1]]\n\ndef query_from_edge_push_trajs(oracle, body_name, region_name, goal_point):\n geom_hash = oracle.get_geom_hash(body_name)\n for pose in filter(lambda p: oracle.on_edge(region_name, body_name, p), # and \\ oracle.pushes[geom_hash](p, goal_point) is not None, # NOTE - goal_point not a valid arg!!!!\n oracle.pushes[oracle.get_geom_hash(body_name)]):\n pap, _ = next(query_paps(oracle, body_name, poses=[pose], num_grasps=4, max_failures=15), (None, None))\n if pap is None: continue\n push_trajs = get_push_trajs(oracle, body_name, pose, goal_point)\n if len(push_trajs) == 0: continue\n for push_traj in push_trajs:\n yield pap, push_traj\n\n for pose in random_edge_placements(oracle, body_name, region_name, z=goal_point.value[2], bias_point=goal_point.value):\n pap, _ = next(query_paps(oracle, body_name, poses=[pose], num_grasps=4, max_failures=15), (None, None))\n if pap is None: continue\n #for new_pap in (get_symmetrical_paps(oracle, body_name, pap) if SYMMETRICAL_PAPS else []):\n # oracle.add_pap(body_name, new_pap)\n # print new_pap\n push_trajs = get_push_trajs(oracle, body_name, pose, goal_point)\n if len(push_trajs) == 0: continue\n for push_traj in push_trajs:\n yield pap, push_traj\n\ndef query_to_edge_push_trajs(oracle, body_name, region_name, start_pose, grasps):\n geom_hash = oracle.get_geom_hash(body_name)\n start_point = point_from_pose(start_pose.value)\n for pose in filter(lambda p: oracle.on_edge(region_name, body_name, p) and \\\n oracle.pushes[geom_hash](start_pose, p) is not None,\n oracle.pushes[oracle.get_geom_hash(body_name)]):\n pap, _ = next(query_paps(oracle, body_name, poses=[pose], grasps=grasps, max_failures=15), (None, None))\n if pap is None: continue\n push_trajs = get_push_trajs(oracle, body_name, start_pose, Point(point_from_pose(pose.value)))\n if len(push_trajs) == 0: continue\n for push_traj in push_trajs:\n yield pap, push_traj\n\n for pose in random_edge_placements(oracle, body_name, region_name, z=start_point[2], use_quat=quat_from_pose(start_pose.value), bias_point=start_point):\n pap, _ = next(query_paps(oracle, body_name, poses=[pose], grasps=grasps, max_failures=15), (None, None))\n if pap is None: continue\n push_trajs = get_push_trajs(oracle, body_name, start_pose, Point(point_from_pose(pose.value)))\n if len(push_trajs) == 0: continue\n for push_traj in push_trajs:\n yield pap, push_traj\n","sub_path":"openrave_wrapper/manipulation/push.py","file_name":"push.py","file_ext":"py","file_size_in_byte":11118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"24752258","text":"just_len = 60\nimport re\nimport collections\n\nNUM = r'(?P\\d+)'\nPLUS = r'(?P\\+)'\nTIMES = r'(?P\\*)'\nLPAREN = r'(?P\\()'\nRPAREN = r'(?P\\))'\nWS = r'(?P\\s+)'\n\nmaster_pattern = re.compile('|'.join((NUM, PLUS, TIMES, LPAREN, RPAREN, WS)))\nToken = collections.namedtuple('Token', ['type', 'value'])\n\ndef generate_tokens(pattern, text):\n scanner = pattern.scanner(text)\n for m in iter(scanner.match, None):\n token = Token(m.lastgroup, m.group())\n\n if token.type != 'WS':\n yield token\n\nclass ExpressionEvaluator:\n\n def parse(self, text):\n self.tokens = generate_tokens(master_pattern, text)\n self.current_token = None\n self.next_token = None\n self._advance()\n return self.expr()\n\n def _advance(self):\n self.current_token, self.next_token = self.next_token, next(self.tokens, None)\n\n def _accept(self, token_type):\n\n if self.next_token and self.next_token.type == token_type:\n self._advance()\n return True\n else:\n return False\n\n def _expect(self, token_type):\n if not self._accept(token_type):\n raise SyntaxError('Expected ' + token_type)\n\n def expr(self):\n expr_value = self.term()\n\n \n while self._accept('PLUS'):\n \n op = self.current_token.type\n\n right = self.term()\n if op == 'PLUS':\n expr_value += right\n else:\n raise SyntaxError('Should not arrive here ' + op)\n\n return expr_value\n\n def term(self):\n \n term_value = self.factor()\n\n \n while self._accept('TIMES'):\n op = self.current_token.type\n\n if op == 'TIMES':\n term_value *= self.factor()\n else:\n raise SyntaxError('Should not arrive here ' + op)\n\n return term_value\n\n def factor(self):\n \n\n if self._accept('NUM'):\n return int(self.current_token.value)\n\n elif self._accept('LPAREN'):\n \n expr_value = self.expr()\n\n \n self._expect('RPAREN')\n\n \n return expr_value\n else:\n raise SyntaxError('Expect NUMBER or LPAREN')\n\ne = ExpressionEvaluator()\na = input(\"Enter the expression:\")\nprint('parse '+a.ljust(just_len),\n e.parse(a))\n\n","sub_path":"Advanced System Software/lab/programs/w52.py","file_name":"w52.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"183799240","text":"from PIL import Image\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n\n\ndef get_average_color_of_frame(file, offset=(), size=(40, 40)):\n \"\"\"\n Given an open file of an image, crop to offset and size \n and then find the average color of cropped image.\n \"\"\"\n image = Image.open(file) # open image\n\n width = image.size[0]\n height = image.size[1]\n\n if not offset: \n offset = (\n width // 2 - size[0] // 2,\n height // 2 - size[1] // 2\n )\n\n image = image.crop((*offset, offset[0]+size[0],offset[1]+size[1]))\n\n im2arr = np.array(image)\n mean = [np.mean(im2arr[:,:,0]), np.mean(im2arr[:,:,1]), np.mean(im2arr[:,:,2])]\n return mean\n\n\ndef rgb_to_threat(im_rgb, colormap='plasma'):\n colmap = cm.get_cmap(plt.get_cmap(colormap))\n colmap = colmap.colors\n for i in range(len(colmap)): # convert RGB percentages to values\n for j in range(len(colmap[i])):\n colmap[i][j] = round(colmap[i][j]*255)\n match = best_match(im_rgb, colmap)\n threat_value = colmap.index(match)\n return threat_value # threat value is equal to the index of the closest match of im_rgb in the colmap array (0-255)\n\n\ndef distance(color1, color2):\n return math.sqrt(sum([(e1 - e2) ** 2 for e1, e2 in zip(color1, color2)]))\n\n\ndef best_match(sample, colors):\n by_distance = sorted(colors, key=lambda c: distance(c, sample))\n return by_distance[0]\n\n\nif __name__ == \"__main__\":\n color_val = get_average_color_of_frame('test.png')\n print(color_val)\n threat = rgb_to_threat(color_val, 'plasma')\n print(threat)\n","sub_path":"IPAS_easy/image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"136796194","text":"import sys\nimport xml.etree.ElementTree as ET\n\nclass Event:\n\tdef __init__(self, answer, printText, nextEvents):\n\t\tself.__answer = answer\n\t\tself.__printText = printText\n\t\tself.__nextEvents = nextEvents\n\t\tif answer is None:\n\t\t\tself.run()\n\tdef getAnswer(self):\n\t\treturn self.__answer\n\tdef run(self):\n\t\tif self.__nextEvents is None:\n\t\t\tprint(self.__printText)\n\t\telse:\n\t\t\tresponse = input(self.__questionText())\n\t\t\tfound = False\n\t\t\tfor event in self.__nextEvents:\n\t\t\t\tif event.getAnswer() == response:\n\t\t\t\t\tevent.run()\n\t\t\t\t\tfound = True\n\t\t\tif found == False:\n\t\t\t\tprint(\"Invalid Answer.\")\n\t\t\t\tself.run()\n\tdef __questionText(self):\n\t\ttext = self.__printText\n\t\tlength = len(self.__nextEvents)\n\t\tif length > 0:\n\t\t\ttext += \" [Choose \"\n\t\t\tfor i in range(length):\n\t\t\t\tif i > 0:\n\t\t\t\t\tif i == length - 1:\n\t\t\t\t\t\ttext += \" or \"\n\t\t\t\t\telse:\n\t\t\t\t\t\ttext += \", \"\n\t\t\t\ttext += self.__nextEvents[i].getAnswer();\n\t\t\ttext += \"]:\\n\"\n\t\treturn text\n\nclass GameXmlParser:\n\tdef parseFile(self):\n\t\ttree = ET.parse(sys.argv[1])\n\t\troot = tree.getroot()\n\t\tself.__parseEvent(root)\n\tdef __parseEvent(self, event):\n\t\tnextEvents = event.find(\"NextEvents\")\n\t\tnextEventsList = []\n\t\tif nextEvents is not None:\n\t\t\tfor nextEvent in nextEvents.findall('Event'):\n\t\t\t\tnextEventsList.append(self.__parseEvent(nextEvent))\n\t\telse:\n\t\t\tnextEventsList = None\n\t\tanswer = event.find(\"Answer\")\n\t\tif answer is not None:\n\t\t\tanswer = answer.text\n\t\tprintText = event.find(\"PrintText\")\n\t\tif printText is not None:\n\t\t\tprintText = printText.text\n\t\treturn Event(answer, printText, nextEventsList)\n\nparser = GameXmlParser()\nparser.parseFile()\n\nif False:\n\tEvent(\n\t\tNone,\n\t\t\"Greetings Adventurers! Where will you be questing today?\",\n\t\t[\n\t\t\tEvent(\n\t\t\t\t\"Dungeon\",\n\t\t\t\t\"What adventure awaits you in the dungeon?\",\n\t\t\t\t[\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Dragon\",\n\t\t\t\t\t\t\"Fighting a dragon, eh? Better take some fire-resistant armor and your dragon-slaying sword!\",\n\t\t\t\t\t\tNone \n\t\t\t\t\t),\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Giant Spider\",\n\t\t\t\t\t\t\"Giant spiders can be tricky. Best take your flame blade and lots of antidotes.\",\n\t\t\t\t\t\tNone\n\t\t\t\t\t),\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Hydra\",\n\t\t\t\t\t\t\"You'll want to take lots of grenades so you can take out all heads in one fell swoop!\",\n\t\t\t\t\t\tNone\n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t),\n\t\t\tEvent(\n\t\t\t\t\"Forbidden Tomb\", \n\t\t\t\t\"What could you want with that scary place?\", \n\t\t\t\t[\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Treasure\",\n\t\t\t\t\t\t\"Be sure to share the wealth when you get back!\",\n\t\t\t\t\t\tNone \n\t\t\t\t\t),\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Power\",\n\t\t\t\t\t\t\"Get strong enough and I just might hire you for a job!\",\n\t\t\t\t\t\tNone \n\t\t\t\t\t),\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Mystery\",\n\t\t\t\t\t\t\"I wonder what's down there sometimes, myself.\",\n\t\t\t\t\t\tNone \n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t),\n\t\t\tEvent(\n\t\t\t\t\"Chrystal Palace\", \n\t\t\t\t\"Ooooh, you're sure to find vast riches in there. Is there anything specifically you'll be looking for?\", \n\t\t\t\t[\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Jewels\",\n\t\t\t\t\t\t\"My daughter's getting married in a month. I'd be happy to buy some jewels from you if you find any!\",\n\t\t\t\t\t\tNone \n\t\t\t\t\t),\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Silver\",\n\t\t\t\t\t\t\"Silver's effective against the undead. You should definitely get some silver weapons crafted if you find silver!\",\t\t\n\t\t\t\t\t\tNone \n\t\t\t\t\t),\n\t\t\t\t\tEvent(\n\t\t\t\t\t\t\"Gold\",\n\t\t\t\t\t\t\"Hehehe, you'll have to sure the wealth when you return!\",\t\t\n\t\t\t\t\t\tNone \n\t\t\t\t\t)\n\t\t\t\t]\n\t\t\t)\n\t\t] \n\t)\n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"184533880","text":"mod = 1000000007\nn = 10 ** 8\nm = n\nterm = pow(2, n * m, mod)\nresult = term\ntwopow = pow(4, n * mod - 2 * n, mod)\nfor k in range(1, n + 1):\n if k % 1000000 == 0:\n print(k)\n term = term * (m - 2 * k + 1) * (m - 2 * k + 2)\n term = term * pow(k, 2 * mod - 4, mod) * twopow\n term %= mod\n result += term\n result %= mod\ninput(result)","sub_path":"Problems 701-800/p743_WindowIntoAMatrix.py","file_name":"p743_WindowIntoAMatrix.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"140322513","text":"import math, heapq\r\nfrom tqdm import tqdm\r\nfrom time import clock\r\n\r\nstarttime = clock()\r\n# For n = p_1^{a_1}\\cdots p_k^{a_k}, #divisors = \\prod (a_i+1). So to get something with 2^500500 divisors, a_i = -1+2^l_i\r\n# for \\sum l_i = 500500.\r\n\r\ndef buildPrimeList(upper):\r\n #build a list of primes\r\n test = [1]*(upper+1)\r\n primes = []\r\n for n in tqdm(range(2,upper+1)):\r\n if test[n] == 1:\r\n primes.append(n)\r\n for k in range(2*n,upper+ 1,n):\r\n test[k] = 0\r\n\r\n return primes\r\n\r\n# New idea: keep a record of all the prime factors available and the relative cost of increasing their exponent.\r\ndef problem500(goalsum):\r\n if goalsum > 500500:\r\n print(\"Adjust parameters! goalsum too big.\")\r\n return 0\r\n unused = buildPrimeList(7376507)[:0:-1]\r\n current = [[4,2,1]] #data format represents p^(-1+2^a) as [cost of incrementing a, prime, a where current exponent is 2^a-1]\r\n total = 1\r\n #print(current[0][0],unused[-1])\r\n for t in tqdm(range(1,goalsum)):\r\n if current[0][0] > unused[-1]:\r\n #add a new prime\r\n current.append([unused[-1]**2,unused.pop(),1])\r\n else:\r\n #increment our current prime\r\n current[0] = [current[0][0]**2,current[0][1],current[0][2]+1]\r\n #resort current\r\n current.sort()\r\n\r\n for entry in current:\r\n total = (total * pow(entry[1],-1+2**entry[2],500500507)) % 500500507\r\n return total\r\n\r\n#print(problem500(500500)) #correct, very long (more than an hour). need to change data type?\r\n\r\ndef problem500h(goalsum):\r\n if goalsum > 500500:\r\n print(\"Adjust parameters! goalsum too big.\")\r\n return 0\r\n unused = buildPrimeList(7376507)[:0:-1]\r\n current = [[4,2,1]] #data format represents p^(-1+2^a) as [cost of incrementing a, prime, a where current exponent is 2^a-1]\r\n heapq.heapify(current)\r\n #print(current)\r\n total = 1\r\n #print(current[0][0],unused[-1])\r\n for t in tqdm(range(1,goalsum)):\r\n if current[0][0] > unused[-1]:\r\n #add a new prime\r\n heapq.heappush(current,[unused[-1]**2,unused.pop(),1])\r\n else:\r\n #increment our current prime\r\n temp = heapq.heappop(current)\r\n #print(temp)\r\n heapq.heappush(current,[temp[0]**2,temp[1],temp[2]+1])\r\n\r\n for entry in current:\r\n total = (total * pow(entry[1],-1+2**entry[2],500500507)) % 500500507\r\n return total\r\n\r\nprint(problem500h(500500)) #holy shit heaps are awesome. 0s for heap implementation.\r\n","sub_path":"500.py","file_name":"500.py","file_ext":"py","file_size_in_byte":2557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"329631216","text":"# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport multiprocessing\nimport threading\nfrom multiprocessing.pool import ThreadPool\n\n\nclass SubprocPool:\n \"\"\"Singleton for managing multiprocessing.Pool instances.\n\n Subprocesses (including multiprocessing.Pool workers) can inherit locks in poorly written\n libraries (eg zlib) if other threads in the parent process happen to be holding them at the\n moment the worker is fork()'ed. Thus it is important to create any subprocesses BEFORE\n starting any threads, or they may deadlock mysteriously when sent a particular piece of work.\n\n This is accomplished in pants by these initializing pools early, when creating the RunTracker.\n\n However, in tests, RunTrackers are created repeatedly, as part of creating Contexts that\n are used briefly and discarded. Creating a new subprocess pool every time is expensive, and will\n lead to os.fork failing once too many processes are spawned.\n\n To avoid this, the pools themselves are kept in this singleton and new RunTrackers re-use them.\n \"\"\"\n\n _pool = None\n _lock = threading.Lock()\n _num_processes = multiprocessing.cpu_count()\n\n @classmethod\n def set_num_processes(cls, num_processes):\n cls._num_processes = num_processes\n\n @classmethod\n def foreground(cls):\n with cls._lock:\n if cls._pool is None:\n cls._pool = ThreadPool(processes=cls._num_processes)\n return cls._pool\n\n @classmethod\n def shutdown(cls, force):\n with cls._lock:\n old = cls._pool\n cls._pool = None\n\n if old:\n if force:\n old.terminate()\n else:\n old.close()\n old.join()\n","sub_path":"src/python/pants/base/worker_pool.py","file_name":"worker_pool.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"383858602","text":"#! python3\n# cpFileToNewFolder.py - move the order type file to another new folder\n\nimport shutil\nimport os\nimport re\n\ndef cpFileToFolder(folder):\n fileRegex = re.compile(r'.*\\.txt')\n folder = os.path.abspath(folder)\n os.makedirs('%s_new' % folder)\n newFolder = folder + '_' + 'new'\n for foldernames, subfolders, filenames in os.walk(folder):\n for filename in filenames:\n newfile = fileRegex.search(filename)\n if newfile == None:\n continue\n newfile = newfile.group()\n newfile = os.path.join(foldernames, newfile)\n # print(newfile)\n shutil.copy(newfile, newFolder)\n\n\ncpFileToFolder('data_bak')","sub_path":"act9/cpFileToNewFolder.py","file_name":"cpFileToNewFolder.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"326728067","text":"# -*- coding: utf-8 -*-\r\n\r\nROSTER = {}\r\n\r\ndef get_roster_auto(cljid):\r\n time.sleep(6)\r\n get_roster(cljid)\r\n\r\ndef get_roster(cljid):\r\n packet = IQ(CLIENTS[cljid], 'get')\r\n packet.addElement('query', 'jabber:iq:roster')\r\n packet.addCallback(roster_result_handler, cljid)\r\n reactor.callFromThread(packet.send, cljid)\r\n\r\ndef roster_result_handler(cljid, x):\r\n if x['type']=='result':\r\n if cljid in ROSTER.keys():\r\n ROSTER[cljid].clear()\r\n query = element2dict(x)['query']\r\n query = [i.attributes for i in query.children if i.__class__==domish.Element]\r\n if not cljid in ROSTER:\r\n ROSTER[cljid] = {}\r\n for c in query:\r\n try: ROSTER[cljid][c['jid']]=c['subscription']\r\n except: pass\r\n\r\ndef hnd_roster_del(t, s, p):\r\n if p:\r\n roster_del(p, s[3])\r\n reply(t, s, u'ok')\r\n \r\ndef roster_del(jid, cljid):\r\n q = domish.Element(('jabber:client', 'iq'))\r\n q['type'] = 'set'\r\n q['id'] = str(random.randrange(1,999))\r\n query = q.addElement('query', 'jabber:iq:roster')\r\n i = query.addElement('item')\r\n i['jid'] = jid\r\n i['subscription'] = 'remove'\r\n reactor.callFromThread(dd, q, CLIENTS[cljid])\r\n\r\ndef hnd_roster_add(t, s, p):\r\n if p:\r\n roster_add(p, s[3])\r\n reply(t, s, u'ok')\r\n\r\ndef roster_add(jid, cljid):\r\n p = domish.Element(('jabber:client', 'presence'))\r\n p['to'] = jid\r\n p['type'] = 'subscribe'\r\n reactor.callFromThread(dd, p, CLIENTS[cljid])\r\n\r\ndef hnd_roster_all(t, s, p):\r\n global ROSTER\r\n get_roster(s[3])\r\n time.sleep(1)\r\n if not s[3] in ROSTER:\r\n reply(t, s, u'Пусто!')\r\n return\r\n n=len(ROSTER[s[3]])\r\n reply(t, s, u'Всего контактов '+str(n)+':\\n'+'\\n'.join(ROSTER[s[3]].keys()))\r\n\r\nregister_command_handler(hnd_roster_del, 'roster_del', ['все'], 100, 'Удаляет контакты из ростера бота.', 'roster_del ', ['roster_del any@jid.ru'])\r\nregister_command_handler(hnd_roster_add, 'roster_add', ['все'], 100, 'Добавляет контакты в ростер бота.', 'roster_add ', ['roster_add any@jid.ru'])\r\nregister_command_handler(hnd_roster_all, 'roster_all', ['все'], 100, 'Выводит контакты из ростера бота.', 'roster_all', ['roster_all'])\r\nregister_stage0_init(get_roster_auto)\r\n","sub_path":"plugins/roster_plugin.py","file_name":"roster_plugin.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"233407763","text":"\t#for loop\n\nfor x in range(5, 12, 4): #'5' start no. '12' stop no. '4' increment no.\n\tprint (\"Moses\")\n\n\n\t#while loop\nbutter = 5\n\nwhile butter < 12:\n\tprint (butter)\n\tbreak;\n","sub_path":"py-progs/range-while.py","file_name":"range-while.py","file_ext":"py","file_size_in_byte":172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"638120717","text":"from unittest import TestCase\n\nfrom mock import MagicMock, patch, call\nfrom nose.tools import raises, eq_\n\nfrom managers.user import UserManager\nfrom tests.mocked_manager import MockManager, mocked_git, mocked_path\n\n\nclass TestUserManager(TestCase):\n @raises(ValueError)\n def test_create_user_with_no_key(self):\n with patch.multiple('managers.manager',\n Git=MagicMock(),\n Path=MagicMock()):\n users = UserManager('~/path/to/admin/gitolite/repo')\n users.create('test_username')\n\n def test_create_user_succesfully(self):\n mocked_user = MagicMock(return_value='test_username')\n\n UserManager.__bases__ = (MockManager, )\n with patch.multiple('managers.user', User=mocked_user,\n Manager=MagicMock()):\n users = UserManager('~/path/to/admin/gitolite/repo')\n\n eq_('test_username', users.create('test_username', 'key_path'))\n mocked_user.assert_called_once_with(mocked_path, mocked_git,\n 'test_username', keys=['key_path'])\n\n def test_get_user(self):\n mocked_user = MagicMock()\n mocked_user.get_by_name.return_value = 'test_user'\n\n UserManager.__bases__ = (MockManager, )\n with patch.multiple('managers.user', User=mocked_user):\n users = UserManager('~/path/to/admin/gitolite/repo')\n\n eq_('test_user', users.get('test_user'))\n mocked_user.get_by_name.assert_called_once_with('test_user',\n mocked_path,\n mocked_git)\n\n def test_get_all_users(self):\n mocked_key_dir = MagicMock()\n mocked_file = MagicMock()\n mocked_dir = MagicMock()\n mocked_re = MagicMock()\n\n mocked_user = MagicMock()\n mocked_user.get_by_name.return_value = 'test_user'\n\n mocked_path.return_value = mocked_key_dir\n mocked_dir.isdir.return_value = True\n mocked_file.isdir.return_value = False\n mocked_file.__str__ = lambda x: 'ok_file'\n\n mocked_re.compile().findall.return_value = ['file1.pub']\n\n mocked_key_dir.walk.return_value = [mocked_file, mocked_dir]\n\n UserManager.__bases__ = (MockManager, )\n with patch.multiple('managers.user', User=mocked_user,\n Path=mocked_path, re=mocked_re):\n users = UserManager('~/path/to/admin/gitolite/repo')\n\n eq_(users.all(), ['test_user'])\n mocked_path.has_calls([call(mocked_path, 'keydir')])\n eq_(mocked_key_dir.walk.call_count, 1)\n eq_(mocked_dir.isdir.call_count, 1)\n eq_(mocked_file.isdir.call_count, 1)\n\n mocked_re.compile.has_calls([call('(\\w.pub)')])\n mocked_re.compile('\\w.pub').findall.assert_called_once_with('ok_file')\n\n mocked_user.get_by_name.assert_called_once_with('file1', mocked_path,\n mocked_git)\n","sub_path":"tests/managers/test_user.py","file_name":"test_user.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"35059978","text":"import random\nfrom copy import deepcopy\nfrom dataclasses import dataclass, asdict\nfrom numbers import Number\nfrom typing import Dict, Any, List, cast, Tuple, Callable, Optional, TypeVar, Type\n\nfrom solai_evolutionary_algorithm.evaluation.simulation.simulation_queue import CharacterConfig, AbilityConfig\nfrom solai_evolutionary_algorithm.evolution.evolution_types import Individual\nfrom solai_evolutionary_algorithm.evolution.generation_evolver import Mutation\nfrom solai_evolutionary_algorithm.utils.character_id import create_character_id\n\n\ndef __clamp(value: Number, value_range: Tuple[Number, Number]):\n \"\"\"\n range is a list of two numbers: [min, max]\n \"\"\"\n return max(value_range[0], min(value_range[1], value))\n\n\nPropertyMutator = Callable[[Any, Tuple[float, float], Tuple[Any, Any]], Any]\n\n\ndef mutate_float_property(\n value: float,\n mutation_factor_range: Tuple[float, float],\n value_range: Tuple[float, float]\n) -> float:\n mutation_factor = random.uniform(\n mutation_factor_range[0], mutation_factor_range[1])\n new_radius = mutation_factor * value\n new_radius_bound = __clamp(new_radius, value_range)\n return cast(float, new_radius_bound)\n\n\ndef mutate_int_property(\n value: int,\n mutation_factor_range: Tuple[float, float],\n value_range: Tuple[int, int]\n) -> int:\n return int(mutate_float_property(value, mutation_factor_range, value_range))\n\n\ndef mutate_bool_property(\n value: bool,\n mutation_factor_range: Tuple[float, float],\n value_range: Tuple[bool, bool],\n) -> bool:\n \"\"\"\n Returns the opposite value, ignoring mutation_factor_range and value_range.\n Bool properties should in general be converted to a number that can be converted to a bool when evaluated\n \"\"\"\n return not value\n\n\n# TypeVar(\"PropertyType\", Type[float], Type[int], Type[bool])\nPropertyType = Any\n\n\n@dataclass(frozen=True)\nclass PropertyMutationData:\n property_type: PropertyType\n probability: float # in the range [0, 1]\n mutation_factor_range: Tuple[float, float]\n value_range: Optional[Tuple[Any, Any]] = None\n\n\ndef mutate_property(value: PropertyType, mutation_data: PropertyMutationData) -> PropertyType:\n if mutation_data.property_type == float:\n return mutate_float_property(value, mutation_data.mutation_factor_range, mutation_data.value_range)\n elif mutation_data.property_type == int:\n return mutate_int_property(value, mutation_data.mutation_factor_range, mutation_data.value_range)\n elif mutation_data.property_type == bool:\n return mutate_bool_property(value, mutation_data.mutation_factor_range, mutation_data.value_range)\n else:\n raise ValueError(\n f\"Mutation property of type {mutation_data.property_type} is not supported\")\n\n\ndef mutate_property_with_probability(value: PropertyType, mutation_data: PropertyMutationData):\n if random.random() < mutation_data.probability:\n return mutate_property(value, mutation_data)\n else:\n return value\n\n\ndef mutate_ability(\n ability: AbilityConfig,\n properties_probability: Dict[str, float],\n mutation_factor_range: Tuple[float, float],\n melee_property_ranges: Dict[str, Tuple[Any, Any]],\n projectile_property_ranges: Dict[str, Tuple[Any, Any]],\n) -> AbilityConfig:\n mutateable_properties_with_type = {\n \"radius\": float,\n \"distanceFromChar\": float,\n \"speed\": float,\n \"startupTime\": int,\n \"activeTime\": int,\n \"executionTime\": int,\n \"endlagTime\": int,\n \"rechargeTime\": int,\n \"damage\": float,\n \"baseKnockback\": float,\n \"knockbackRatio\": float,\n \"knockbackPoint\": float,\n \"knockbackTowardPoint\": bool\n }\n\n property_mutators_by_type: Dict[Any, PropertyMutator] = {\n float: mutate_float_property,\n int: mutate_int_property,\n bool: mutate_bool_property\n }\n\n use_property_ranges: Dict[str, Tuple[Any, Any]\n ] = projectile_property_ranges if ability['type'] == 'PROJECTILE' else melee_property_ranges\n\n def mutate_ability_property(prop_name: str) -> Any:\n probability = properties_probability[prop_name]\n value = ability[prop_name]\n\n if random.random() < probability:\n prop_type = mutateable_properties_with_type[prop_name]\n mutate_func = property_mutators_by_type[prop_type]\n value_range = use_property_ranges[prop_name]\n mutate_func(value, mutation_factor_range, value_range)\n else:\n return value\n\n mutated_properties = {\n prop_name: mutate_ability_property(prop_name)\n for prop_name in mutateable_properties_with_type\n }\n\n mutated_ability: AbilityConfig = {\n 'name': ability['name'],\n 'type': ability['type'],\n **mutated_properties\n }\n return mutated_ability\n\n\ndef mutate_character(\n char: CharacterConfig,\n character_property_data: Dict[str, PropertyMutationData],\n melee_property_data: Dict[str, PropertyMutationData],\n projectile_property_data: Dict[str, PropertyMutationData]\n):\n char_mutated_props = {\n prop_name: mutate_property_with_probability(\n value=char[prop_name],\n mutation_data=prop_data\n )\n for prop_name, prop_data in character_property_data.items()\n }\n\n abilities_mutated_props = [\n {\n prop_name: mutate_property_with_probability(\n value=ability[prop_name],\n mutation_data=prop_data\n )\n for prop_name, prop_data in (\n melee_property_data if ability['type'] == 'MELEE' else projectile_property_data\n ).items()\n }\n for ability in char['abilities']\n ]\n\n mutated_char: CharacterConfig = {\n **char,\n 'characterId': create_character_id(),\n **char_mutated_props,\n 'abilities': [\n {\n **ability,\n **mutated_ability_props\n }\n for ability, mutated_ability_props in zip(char['abilities'], abilities_mutated_props)\n ]\n }\n return mutated_char\n\n\n@dataclass\nclass PropertiesMutation(Mutation):\n character_property_data: Dict[str, PropertyMutationData]\n melee_property_data: Dict[str, PropertyMutationData]\n projectile_property_data: Dict[str, PropertyMutationData]\n\n def __call__(self, original_genome: Individual) -> Individual:\n char_config = cast(CharacterConfig, original_genome)\n mutated_char_config: CharacterConfig = mutate_character(\n char_config,\n character_property_data=self.character_property_data,\n melee_property_data=self.melee_property_data,\n projectile_property_data=self.projectile_property_data\n )\n\n return mutated_char_config\n\n def serialize(self) -> Dict:\n dic = str(asdict(self))\n return dic\n\n\ndef print_mutations(\n char_config,\n mutated_char_config,\n character_property_data,\n melee_property_data,\n projectile_property_data\n):\n def print_change(prop_name: str, orig_value, new_value, begin_with: str = \"\"):\n if orig_value == new_value:\n print(f\"{begin_with}{prop_name}: {new_value}\")\n else:\n print(f\"{begin_with}{prop_name}: {orig_value} -> {new_value}\")\n\n for char_prop in character_property_data:\n print_change(\n char_prop, char_config[char_prop], mutated_char_config[char_prop])\n\n for i, (mutated_ability, orig_ability) in enumerate(zip(mutated_char_config['abilities'], char_config['abilities'])):\n print(f\"ability{i}: {mutated_ability['name']}\")\n for ability_prop in melee_property_data:\n print_change(\n ability_prop, orig_ability[ability_prop], mutated_ability[ability_prop], begin_with=\"\\t\")\n","sub_path":"solai_evolutionary_algorithm/mutations/properties_mutation.py","file_name":"properties_mutation.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"269416675","text":"from collections import namedtuple\nfrom utils import writeln, write_blank_if, log\nfrom template import Templates\n\nimport os\n\nFilename = namedtuple('Filename', ['cc', 'h'])\n\n# Creates a new file in the specified path. The directories are created in the\n# \"mkdir -p\" fashion.\ndef open_file(path):\n assert(path.count('\\\\') == 0)\n\n dir_list = path.split('/')[0:-1]\n dir = \"/\".join(dir_list)\n if not os.path.exists(dir):\n os.makedirs(dir)\n assert(os.path.exists(dir))\n\n return open(path, \"w\")\n\n# Builds output file paths.\n#\n# TODO: need to figure out whether this function must merge path\n# - 'proto_name' may have a common prefix with 'out_path'...\ndef get_cpp_file_paths(file_ast, out_path):\n assert(out_path)\n if out_path[-1] != \"/\" and out_path[-1] != \"\\\\\":\n out_path += \"/\"\n\n parts = file_ast.cpp_include_path().split('.')\n assert(parts[-1] == \"h\")\n parts.pop(-1)\n return Filename(out_path + \".\".join(parts) + \".cc\",\n out_path + \".\".join(parts) + \".h\")\n\ndef cpp_arg_type(proto_type):\n if proto_type == \"string\" or proto_type == \"bytes\":\n return \"const std::string&\"\n elif proto_type == \"wstring\":\n return \"const std::wstring&\"\n elif proto_type[-2:] == \"32\" or proto_type[-2:] == \"64\":\n return proto_type + \"_t\"\n else:\n return proto_type.replace(\".\", \"::\")\n\ndef cpp_impl_type(proto_type):\n if proto_type == \"string\" or proto_type == \"bytes\":\n return \"std::string\"\n elif proto_type == \"wstring\":\n return \"std::wstring\"\n elif proto_type[-2:] == \"32\" or proto_type[-2:] == \"64\":\n return proto_type + \"_t\"\n else:\n return proto_type.replace(\".\", \"::\")\n\n#\n# File\n#\nclass File:\n def __init__(self, *args, **kwargs):\n super(File, self).__init__(*args, **kwargs)\n\n def generate(self, out_path):\n log(0, \"Generating C++ code for \" + self.path)\n fname = get_cpp_file_paths(self, out_path)\n self.generate_header(fname.h)\n self.generate_source(fname.cc)\n\n file = open_file(args.cpp_out + \"/infra.h\")\n writeln(file, Templates.infra)\n\n def count_extends(self):\n count = len(self.extends)\n for _, msg in self.messages.items():\n count += msg.count_extends()\n return count\n\n def generate_header(self, fname):\n file = open_file(fname)\n\n writeln(file, \"#pragma once\\n\")\n writeln(file, \"#include \")\n writeln(file, \"#include \")\n writeln(file, \"#include \")\n writeln(file, \"#include \")\n writeln(file, \"#include \")\n writeln(file, \"#include \")\n writeln(file, \"\")\n\n # Generate and print forward type declarations bunched by their namespace\n self.generate_forward_imported_declarations_header(file)\n\n # Top-level enums.\n for _, enum in self.enums.items():\n enum.generate_declaration(file)\n if len(self.enums.keys()) > 0:\n writeln(file, \"\")\n\n extend_count = self.count_extends()\n\n # Messages.\n for _, msg in self.messages.items():\n msg.generate_header(file, self.namespace)\n\n # Extension declarations\n if len(self.extends) > 0:\n writeln(file, \"// Extensions\")\n for _, extend in self.extends.items():\n extend.generate_extend_declarations(file, 0)\n writeln(file, \"\")\n\n for ns in reversed(self.namespace.split(\".\")):\n writeln(file, \"} // \" + ns)\n\n # Extension helpers\n if extend_count > 0:\n writeln(file, \"\")\n writeln(file, \"namespace proto_ng { namespace detail {\")\n for _, extend in self.extends.items():\n extend.generate_extend_helpers(file)\n for _, msg in self.messages.items():\n msg.generate_extend_helpers(file)\n writeln(file, \"} }\")\n writeln(file, \"\")\n\n # Hashing and equivalence support\n for _, msg in self.messages.items():\n msg.generate_hasher(file)\n msg.generate_equivalence(file)\n\n def generate_forward_imported_declarations_header(self, file):\n # First build sets of the enums/messages used by this File.\n fdecls = {}\n for name, file_ast in self.imports.items():\n if not file_ast.namespace in fdecls:\n fdecls[file_ast.namespace] = set()\n file_ast.generate_forward_imported_declarations(\n self.imported_type_names,\n fdecls[file_ast.namespace])\n\n # Now print them in \"imported\" batches.\n for ns in sorted(fdecls.keys()):\n decl_set = fdecls[ns]\n writeln(file, \"// Forward declarations from \" + ns)\n for part in ns.split(\".\"):\n writeln(file, \"namespace \" + part + \" {\")\n\n for decl in sorted(decl_set):\n writeln(file, decl)\n\n for part in reversed(ns.split(\".\")):\n writeln(file, \"} // \" + part)\n writeln(file, \"\")\n\n for ns in self.namespace.split(\".\"):\n writeln(file, \"namespace \" + ns + \" {\")\n writeln(file, \"\")\n\n def generate_source(self, fname):\n file = open_file(fname)\n\n writeln(file, Templates.impl)\n\n # Include directives. At this point we need every generated type that comes from\n # every \"import\" statement.\n writeln(file, \"#include <\" + self.cpp_include_path() + \">\")\n for _, file_ast in self.imports.items():\n writeln(file, \"#include <\" + file_ast.cpp_include_path() + \">\")\n writeln(file, \"\")\n\n for ns in self.namespace.split(\".\"):\n writeln(file, \"namespace \" + ns + \" {\")\n writeln(file, \"\")\n\n # Messages\n for _, msg in self.messages.items():\n msg.generate_source(file, self.namespace)\n\n # Enums\n for _, enum in self.enums.items():\n enum.generate_definition(file)\n\n for ns in reversed(self.namespace.split(\".\")):\n writeln(file, \"} // \" + ns)\n\n # Finally generate extension objects. It's easier to declare these in a FQ fashion.\n for _, extend in self.extends.items():\n extend.generate_extend_definition(file)\n for _, msg in self.messages.items():\n msg.generate_extend_definition(file)\n\n def generate_forward_imported_declarations(self, imported_set, decl_set):\n for _, enum in self.enums.items():\n enum.generate_forward_declaration(imported_set, decl_set)\n\n for _, msg in self.messages.items():\n msg.generate_forward_imported_declarations(imported_set, decl_set)\n\n\n#\n# Enum\n#\nclass Enum:\n def __init__(self, *args, **kwargs):\n super(Enum, self).__init__(*args, **kwargs)\n\n def generate_forward_declaration(self, imported_set, decl_set):\n assert(self.impl_cpp_type)\n\n if self.fq_name in imported_set:\n decl_set.add(\"enum \" + self.impl_cpp_type.split(\"::\")[-1] + \" : int;\")\n\n # Decorate scoped enums so that their values remain unique.\n def decorate(self, value):\n if self.is_package_global:\n return value\n return self.impl_cpp_type + \"_\" + value\n\n def generate_declaration(self, file):\n writeln(file, \"enum \" + self.impl_cpp_type + \" : int {\")\n for id, value in self.values.items():\n writeln(file, self.decorate(value) + \" = \" + str(id) + \",\", 1)\n writeln(file, \"};\")\n writeln(file, \"std::ostream& operator<<(std::ostream&, \" + self.impl_cpp_type + \");\")\n\n def generate_shortcut_declarations(self, file, indent):\n writeln(file, \"// Enum: \" + self.fq_name, indent)\n writeln(file, \"using \" + self.name() + \" = \" + self.impl_cpp_type + \";\", indent)\n for id, value in self.values.items():\n writeln(file,\n \"const static \" + self.impl_cpp_type + \" \" + value + \" = \"\n + self.impl_cpp_type + \"_\" + value + \";\",\n indent)\n writeln(file, \"\")\n\n def generate_definition(self, file):\n writeln(file, \"// Enum: \" + self.fq_name)\n writeln(file, \"std::ostream& operator<<(std::ostream& st, \" + self.impl_cpp_type + \" val) {\")\n writeln(file, \"switch (val) {\", 1)\n for id, value in self.values.items():\n writeln(file, \"case \" + self.cpp_value_prefix() + value + ': // [' + str(id) + ']' , 1)\n writeln(file, 'st << \"' + value + '\";', 2)\n writeln(file, 'break;', 2)\n writeln(file, \"default:\", 1)\n writeln(file, 'st << \"\";', 2)\n writeln(file, \"}\", 1)\n writeln(file, \"return st;\", 1)\n writeln(file, \"}\")\n writeln(file, \"\")\n\n def initializer(self):\n # proto3\n if 0 in self.values.keys():\n return self.ns + \"::\" + self.decorate(self.values[0])\n\n # proto2 - ToDo: this should be an ordered list to preserver semantics!\n assert(len(self.values) > 0)\n if args.with_warnings:\n log(0, \"Warning: changing semantics for \" + self.fq_name + \" usage!\")\n return self.ns + \"::\" + self.decorate(str(list(self.values.values())[0]))\n\n\n#\n# Message\n#\nclass Message:\n def __init__(self, *args, **kwargs):\n super(Message, self).__init__(*args, **kwargs)\n\n def generate_header(self, file, ns, indent = 0):\n forwards = self.generate_forward_declarations(file)\n if forwards > 0:\n writeln(file, \"\")\n\n # Enums\n if len(self.enums) > 0:\n writeln(file, \"// Enums\")\n for _, enum in self.enums.items():\n enum.generate_declaration(file)\n if len(self.enums) > 0:\n writeln(file, \"\")\n\n # Start the C++ class.\n writeln(file, \"class \" + self.impl_cpp_type + \" {\", indent)\n writeln(file, \"public:\", indent)\n\n # Construction and assignment\n writeln(file, \"// Construction and assignment\", indent + 1)\n writeln(file, self.impl_cpp_type + \"();\", indent + 1)\n writeln(file, self.impl_cpp_type + \"(const \" + self.impl_cpp_type + \"&);\",\n indent + 1)\n writeln(file, self.impl_cpp_type + \"(\" + self.impl_cpp_type + \"&&);\",\n indent + 1)\n writeln(file, self.impl_cpp_type + \"& \" + \"operator=(const \" + \\\n self.impl_cpp_type + \"&);\",\n indent + 1)\n writeln(file, self.impl_cpp_type + \"& \" + \"operator=(\" + \\\n self.impl_cpp_type + \"&&);\",\n indent + 1)\n writeln(file, \"~\" + self.impl_cpp_type + \"();\", indent + 1)\n writeln(file, \"\")\n\n # Generally accessible, common API\n writeln(file, \"// Common API\", indent + 1)\n writeln(file,\n \"static const \" + self.impl_cpp_type + \"& \" + \"default_instance();\",\n indent + 1)\n writeln(file, \"void Clear();\", indent + 1)\n writeln(file, \"bool ParseFromString(const std::string& input_data);\", indent + 1)\n writeln(file, \"std::string SerializeAsString() const;\", indent + 1)\n writeln(file, 'std::string DebugString(std::string prefix = \"\") const;', indent + 1)\n writeln(file, \"std::string ShortDebugString() const;\", indent + 1)\n writeln(file, \"\")\n\n # Equality\n writeln(file, \"// This type is Regular and totally ordered.\", indent + 1)\n writeln(file, \"bool operator<(const \" + self.impl_cpp_type + \"&) const;\", indent + 1)\n writeln(file,\n \"friend bool operator>=(const \" + self.impl_cpp_type + \"& a, \" +\n \"const \" + self.impl_cpp_type + \"& b) { return !(a < b); }\",\n indent + 1)\n writeln(file,\n \"friend bool operator>(const \" + self.impl_cpp_type + \"& a, \" +\n \"const \" + self.impl_cpp_type + \"& b) { return b < a; }\",\n indent + 1)\n writeln(file,\n \"friend bool operator<=(const \" + self.impl_cpp_type + \"& a, \" +\n \"const \" + self.impl_cpp_type + \"& b) { return !(b > a); }\",\n indent + 1)\n writeln(file,\n \"friend bool operator==(const \" + self.impl_cpp_type + \"& a, \" +\n \"const \" + self.impl_cpp_type + \"& b) { return !(a > b) && !(a < b); }\",\n indent + 1)\n writeln(file,\n \"friend bool operator!=(const \" + self.impl_cpp_type + \"& a, \" +\n \"const \" + self.impl_cpp_type + \"& b) { return !(a == b); }\",\n indent + 1)\n writeln(file, \"\")\n\n # Extension support\n if self.min_extension_id:\n writeln(file, \"// Extension API (the base type's part)\", indent + 1)\n writeln(file, \"template\", indent + 1)\n writeln(file, \"bool HasExtension(Extension ext) const {\", indent + 1)\n writeln(file, \"return _HasField(::proto_ng::detail::ResolveField(ext));\", indent + 2)\n writeln(file, \"}\", indent + 1)\n\n writeln(file, \"template\", indent + 1)\n writeln(file,\n \"typename ::proto_ng::detail::Helper::mutable_ptr \" +\n \"MutableExtension(Extension ext) {\",\n indent + 1)\n writeln(file, \"if (!HasExtension(ext)) return nullptr;\", indent + 2)\n writeln(file,\n \"return reinterpret_cast::mutable_ptr>(\",\n indent + 2)\n writeln(file, \"_GetField(::proto_ng::detail::ResolveField(ext)));\", indent + 3)\n writeln(file, \"}\", indent + 1)\n\n writeln(file, \"template\", indent + 1)\n writeln(file,\n \"typename ::proto_ng::detail::Helper::ref \" +\n \"GetExtension(Extension ext) const {\",\n indent + 1)\n writeln(file, \"assert(HasExtension(ext));\", indent + 2)\n writeln(file,\n \"auto ptr = reinterpret_cast::ptr>(\",\n indent + 2)\n writeln(file, \"_GetField(::proto_ng::detail::ResolveField(ext)));\", indent + 3)\n writeln(file,\n \"return *ptr;\", indent + 2)\n\n writeln(file, \"}\", indent + 1)\n\n writeln(file, \"bool _HasField(int id) const { return false; }\", indent + 1)\n writeln(file, \"void* _GetField(int id) { return nullptr; }\", indent + 1)\n writeln(file, \"const void* _GetField(int id) const { return nullptr; }\", indent + 1)\n writeln(file, \"\")\n\n # Extensions\n if len(self.extends) > 0:\n writeln(file, \"// Extensions\", indent + 1)\n for _, extend in self.extends.items():\n extend.generate_extend_declarations(file, indent + 1)\n writeln(file, \"\")\n\n # Aliases for sub-messages\n if len(self.messages) > 0:\n writeln(file, \"// Sub-messages\", 1)\n for _, sub_msg in self.messages.items():\n writeln(file,\n \"using \" + sub_msg.name() + \" = \" + sub_msg.impl_cpp_type + \";\",\n 1)\n write_blank_if(file, self.messages)\n\n # Aliases for sub-enums.\n for _, enum in self.enums.items():\n enum.generate_shortcut_declarations(file, 1)\n\n # Fields\n for id, field in self.fields.items():\n field.generate_accessor_declarations(file, indent + 1)\n\n # Implementation\n writeln(file, \" private:\", indent)\n writeln(file, \"struct Representation;\", indent + 1)\n writeln(file, \"std::unique_ptr rep_;\", indent + 1)\n\n writeln(file, \"};\\n\", indent)\n\n # Sub-messages\n #\n # The header cannot be generated in the normal/native DFS style as C++ does not\n # allow forward declarations Outer::Inner. So, just pre-order DFS to flatten out\n # the tree.\n for _, sub_msg in self.messages.items():\n sub_msg.generate_header(file, ns)\n\n def generate_hasher(self, file, indent = 0):\n with_hashing = False\n for _, field in self.fields.items():\n for name, value in field.options.items():\n if name.find(\"include_in_hash\") >= 0:\n with_hashing = True\n if not with_hashing: return\n\n writeln(file, \"namespace std {\", indent)\n writeln(file, \"// Hashing for \" + self.fq_name)\n writeln(file, \"template<>\", indent)\n writeln(file, \"struct hash<\" + self.fq_cpp_ref() + \"> {\", indent)\n writeln(file, \"using argument_type = \" + self.fq_cpp_ref() + \";\", indent + 1)\n writeln(file, \"using result_type = std::size_t;\", indent + 1)\n writeln(file, \"size_t operator()(const argument_type& arg) const noexcept {\", indent + 1)\n writeln(file, \"size_t hash = 0;\", indent + 2)\n\n for _, field in self.fields.items():\n for name, value in field.options.items():\n if name.find(\"include_in_hash\") >= 0:\n writeln(file, \"::proto_ng::hash_combine(hash, arg.\" + field.name + \"());\",\n indent + 2)\n break\n writeln(file, \"return hash;\", indent + 2)\n writeln(file, \"}\", indent + 1)\n\n writeln(file, \"};\", indent)\n writeln(file, \"} // std\", indent)\n writeln(file, \"\")\n\n def generate_equivalence(self, file, indent = 0):\n with_hashing = False\n for _, field in self.fields.items():\n for name, value in field.options.items():\n if name.find(\"include_in_equivalence\") >= 0:\n with_hashing = True\n if not with_hashing: return\n\n writeln(file, \"namespace std {\", indent)\n writeln(file, \"// Equivalence for \" + self.fq_name)\n writeln(file, \"template<>\", indent)\n writeln(file, \"struct equal_to<\" + self.fq_cpp_ref() + \"> {\", indent)\n writeln(file, \"using result_type = bool;\", indent + 1)\n writeln(file, \"using first_argument_type = \" + self.fq_cpp_ref() + \";\", indent + 1)\n writeln(file, \"using second_argument_type = \" + self.fq_cpp_ref() + \";\", indent + 1)\n writeln(file,\n \"size_t operator()(const %s& a, const %s& b) const noexcept {\" %\n (self.fq_cpp_ref(), self.fq_cpp_ref()),\n indent + 1)\n\n for _, field in self.fields.items():\n for name, value in field.options.items():\n if name.find(\"include_in_equivalence\") >= 0:\n writeln(file, \"if (a.\" + field.name + \"() != b.\" + field.name + \"()) return false;\",\n indent + 2)\n break\n writeln(file, \"return true;\", indent + 2)\n writeln(file, \"}\", indent + 1)\n\n writeln(file, \"};\", indent)\n writeln(file, \"} // std\", indent)\n writeln(file, \"\")\n\n def generate_forward_declarations(self, file):\n # Forward declarations for sub-messages and enums.\n forwards = 0\n for _, sub_msg in self.messages.items():\n writeln(file, \"class \" + sub_msg.impl_cpp_type + \";\")\n forwards += sub_msg.generate_forward_declarations(file)\n\n # Forward declarations for the implicitly declared (forward-declared) local messages.\n for _, field in self.fields.items():\n if field.is_forward_decl:\n writeln(file, \"class \" + \"_\".join(field.raw_type.split(\".\")) + \";\")\n forwards += 1\n\n # Enums\n for _, enum in self.enums.items():\n writeln(file, \"enum \" + enum.impl_cpp_type.split(\"::\")[-1] + \" : int;\")\n forwards += 1\n return forwards\n\n def generate_forward_imported_declarations(self, imported_set, decl_set):\n assert(self.impl_cpp_type)\n if self.fq_name in imported_set:\n decl_set.add(\"class \" + self.impl_cpp_type.split(\"::\")[-1] + \";\")\n\n for _, enum in self.enums.items():\n enum.generate_forward_declaration(imported_set, decl_set)\n\n # (Sub)Messages\n for _, sub_msg in self.messages.items():\n sub_msg.generate_forward_imported_declarations(imported_set, decl_set)\n\n def count_extends(self):\n count = len(self.extends)\n for _, msg in self.messages.items():\n count += msg.count_extends()\n return count\n\n def generate_extend_declarations(self, file, indent):\n assert(self.is_extend)\n\n for id, field in self.fields.items():\n # Deal with C++ scopes - namespace-based things are \"extern\" while class-based\n # things are static.\n if self.is_file_scope():\n prefix = \"extern\"\n else:\n prefix = \"static\"\n writeln(file,\n prefix + \" const struct \" + field.name + \"_t { int id; } \" +\n field.name + \";\",\n indent)\n\n def generate_extend_helpers(self, file):\n if self.is_extend:\n for _, field in self.fields.items():\n field.generate_extend_helpers(file)\n return\n\n for _, extend in self.extends.items():\n extend.generate_extend_helpers(file)\n for _, msg in self.messages.items():\n msg.generate_extend_helpers(file)\n\n def generate_extend_definition(self, file):\n if self.is_extend:\n for id, field in self.fields.items():\n writeln(file,\n \"const ::\" + field.parent.cpp_extend_namespace() + \"::\" + field.name + \"_t \" +\n field.parent.cpp_extend_namespace() + \"::\" +\n field.name + \"{\" + str(id) + \"};\")\n\n for _, extend in self.extends.items():\n extend.generate_extend_definition(file)\n for _, msg in self.messages.items():\n msg.generate_extend_definition(file)\n\n def generate_source(self, file, ns):\n # Implementation\n writeln(file, \"//\")\n writeln(file, \"// \" + self.fq_name)\n writeln(file, \"//\")\n writeln(file, \"struct \" + self.impl_cpp_type + \"::Representation {\")\n for id, field in self.fields.items():\n field.generate_implementation_definition(file)\n writeln(file, \"\")\n if len(self.fields) > 0:\n writeln(file,\n \"std::bitset<\" + str(sorted(self.fields.keys())[-1] + 1) + \"> _Presence;\",\n 1)\n writeln(file, \"};\\n\")\n\n # Construction, copying and assigment\n writeln(file, self.impl_cpp_type + \"::\" + self.impl_cpp_type +\n \"() : rep_(std::make_unique()) {}\")\n writeln(file,\n self.impl_cpp_type + \"::\" + self.impl_cpp_type +\n \"(const \" + self.impl_cpp_type + \"& arg) : rep_(new Representation(*arg.rep_)) {}\")\n writeln(file, self.impl_cpp_type + \"::\" + self.impl_cpp_type +\n \"(\" + self.impl_cpp_type + \"&&) = default;\")\n writeln(file,\n self.impl_cpp_type + \"& \" + self.impl_cpp_type + \"::operator=(\" +\n \"const \" + self.impl_cpp_type + \"& arg) { \")\n writeln(file, \"if (this != &arg) *rep_ = *arg.rep_;\", 1)\n writeln(file, \"return *this;\", 1)\n writeln(file, \"}\")\n writeln(file, self.impl_cpp_type + \"& \" + self.impl_cpp_type + \"::operator=(\" +\n self.impl_cpp_type + \"&&) = default;\")\n writeln(file, self.impl_cpp_type + \"::~\" + self.impl_cpp_type + \"() = default;\")\n writeln(file, \"\")\n\n writeln(file, \"const \" + self.impl_cpp_type + \"& \" + self.impl_cpp_type + \"::default_instance() {\")\n writeln(file, \"static \" + self.impl_cpp_type + \" obj;\", 1)\n writeln(file, \"return obj;\", 1)\n writeln(file, \"}\")\n writeln(file, \"\")\n\n writeln(file, \"void \" + self.impl_cpp_type + \"::Clear() {\")\n writeln(file, \"*this = default_instance();\", 1)\n writeln(file, \"rep_->_Presence.reset();\", 1)\n writeln(file, \"}\")\n writeln(file, \"\")\n\n # The key comparison operator on which Regular semantics are built\n writeln(file,\n \"bool \" + self.impl_cpp_type + \"::operator<(const \" + self.impl_cpp_type +\n \"& arg) const {\")\n for _, field in self.fields.items():\n field.generate_less_check(file, 1)\n writeln(file, \"return false;\", 1)\n writeln(file, \"}\")\n writeln(file, \"\")\n\n # Debug helper functions\n writeln(file,\n \"std::string \" + self.impl_cpp_type + \"::DebugString(std::string prefix) const {\")\n writeln(file, \"std::stringstream ss;\", 1)\n writeln(file, \"\")\n for _, field in self.fields.items():\n field.generate_debug_output(file, 1)\n writeln(file, \"return ss.str();\", 1)\n writeln(file, \"}\")\n writeln(file, \"\")\n\n # Field accessors for the given message\n for id, field in self.fields.items():\n field.generate_accessor_definitions(file)\n\n # Enums\n for _, enum in self.enums.items():\n enum.generate_definition(file)\n\n # Sub-messages\n #\n # The header cannot be generated in the normal/native DFS style as C++ does not\n # allow forward declarations Outer::Inner. So, just pre-order DFS to flatten out\n # the tree.\n for _, sub_msg in self.messages.items():\n sub_msg.generate_source(file, ns)\n\n\n#\n# Field\n#\nclass Field:\n def __init__(self, *args, **kwargs):\n super(Field, self).__init__(*args, **kwargs)\n\n # Returns the full type of the field's accessor respecting the \"repeated\" tag's presence.\n def cpp_type_ref(self):\n def repeated(type):\n if self.is_map:\n return \"std::map<\" + type + \">\"\n elif self.is_repeated:\n return \"std::vector<\" + type + \">\"\n return type\n return repeated(self.base_cpp_type_ref())\n\n # Returns the C++ type of the field disregarding the \"repeated\" tag's presence.\n def base_cpp_type_ref(self):\n if self.is_builtin:\n assert(not self.resolved_type)\n return cpp_impl_type(self.raw_type)\n\n if self.is_map:\n if self.resolved_type:\n return cpp_impl_type(self.raw_type) + \", \" + self.mapped_type\n else:\n return cpp_impl_type(self.raw_type) + \", \" + cpp_impl_type(self.mapped_type)\n\n assert(self.resolved_type)\n if self.is_fq_ref:\n return self.resolved_type.fq_cpp_ref()\n else:\n return self.resolved_type.impl_cpp_type\n\n def generate_extend_helpers(self, file):\n # TODO(Oleg): get base types working here.\n if not self.resolved_type:\n return\n\n writeln(file, \"// [\" + str(self.id) + \"] \" + self.name + \" : \" + self.resolved_type.fq_name)\n writeln(file, \"template<>\")\n writeln(file,\n \"inline int ResolveField(::\" + self.parent.cpp_extend_namespace() + \"::\" +\n self.name + \"_t) { return \" + str(self.id) + \"; }\")\n\n writeln(file, \"template<>\")\n writeln(file,\n \"struct Helper<::\" + self.parent.cpp_extend_namespace() + \"::\" + self.name + \"_t> {\")\n writeln(file, \"using ref = const ::\" + self.resolved_type.fq_cpp_ref() + \"&;\", 1)\n writeln(file, \"using ptr = const ::\" + self.resolved_type.fq_cpp_ref() + \"*;\", 1)\n writeln(file, \"using mutable_ptr = ::\" + self.resolved_type.fq_cpp_ref() + \"*;\", 1)\n writeln(file, \"};\")\n writeln(file, \"\")\n\n def initializer(self):\n assert(self.is_enum)\n return self.resolved_type.initializer()\n\n if type(self.resolved_type.parent) == File:\n return self.resolved_type.initializer()\n\n return self.resolved_type.parent.impl_cpp_type + \"::\" + \\\n self.resolved_type.initializer()\n\n def generate_accessor_declarations(self, file, indent):\n writeln(file, \"// [\" + str(self.id) + \"] \" + self.name, indent)\n if self.is_builtin and not self.is_container():\n # These accessors take built-in args by value.\n writeln(file,\n self.cpp_type_ref() + \" \" + self.name + \"() const;\",\n indent)\n writeln(file,\n \"void set_\" + self.name + \"(\" + self.cpp_type_ref() + \");\",\n indent)\n elif self.is_enum and not self.is_container():\n # This one must deal with scopes, but the accessors work as built-ins.\n writeln(file,\n self.cpp_type_ref() + \" \" + self.name + \"() const;\",\n indent)\n writeln(file,\n \"void set_\" + self.name + \"(\" + self.cpp_type_ref() + \");\",\n indent)\n else:\n # These are sub-messages/containers and, thus, have reference-based accessors.\n writeln(file,\n \"const \" + self.cpp_type_ref() + \"& \" + self.name + \"() const;\",\n indent)\n writeln(file,\n self.cpp_type_ref() + \"& \" + self.name + \"();\",\n indent)\n if not args.omit_deprecated:\n writeln(file,\n \"/* deprecated */ \" + self.cpp_type_ref() + \"* mutable_\" + self.name + \"();\",\n indent)\n\n if not self.is_container():\n writeln(file, \"bool has_\" + self.name + \"() const;\", indent)\n writeln(file, \"void clear_\" + self.name + \"();\", indent)\n\n if self.is_container() and not args.omit_deprecated:\n writeln(file,\n \"/* deprecated */ \" + \"void clear_\" + self.name + \"();\",\n indent)\n\n if self.is_repeated and not args.omit_deprecated:\n if self.is_builtin or self.is_enum:\n writeln(file,\n \"/* deprecated */ \" + \\\n \"void add_\" + self.name + \"(\" + self.base_cpp_type_ref() + \");\",\n indent)\n writeln(file,\n \"/* deprecated */ \" + \\\n self.base_cpp_type_ref() + \" \" + self.name + \"(int idx) const;\",\n indent)\n else:\n writeln(file,\n \"/* deprecated */ \" + \\\n self.base_cpp_type_ref() + \"* add_\" + self.name + \"();\",\n indent)\n writeln(file,\n \"/* deprecated */ \" + \\\n self.base_cpp_type_ref() + \"* mutable_\" + self.name + \"(int idx);\",\n indent)\n writeln(file,\n \"/* deprecated */ \" + \\\n \"const \" + self.base_cpp_type_ref() + \"& \" + self.name + \"(int idx) const;\",\n indent)\n writeln(file,\n \"/* deprecated */ \" + \"int32_t \" + self.name + \"_size() const;\",\n indent)\n writeln(file, \"\")\n\n def generate_accessor_definitions(self, file):\n writeln(file, \"// [\" + str(self.id) + \"] \" + self.name)\n if self.is_builtin and not self.is_container():\n writeln(file,\n self.cpp_type_ref() + \" \" \\\n + self.parent.impl_cpp_type + \"::\" + self.name + \"() const {\")\n writeln(file, \"return rep_->\" + self.name + \";\", 1)\n writeln(file, \"}\")\n writeln(file,\n \"void \" + self.parent.impl_cpp_type + \"::set_\" + self.name + \\\n \"(\" + self.cpp_type_ref() + \" val) {\")\n writeln(file, \"rep_->\" + self.name + \" = val;\", 1)\n writeln(file, \"rep_->_Presence.set(\" + str(self.id) + \");\", 1)\n writeln(file, \"}\")\n elif self.is_enum and not self.is_container():\n writeln(file,\n self.cpp_type_ref() + \" \" \\\n + self.parent.impl_cpp_type + \"::\" + self.name + \"() const {\")\n writeln(file, \"return rep_->\" + self.name + \";\", 1)\n writeln(file, \"}\")\n writeln(file,\n \"void \" + self.parent.impl_cpp_type + \"::set_\" + self.name + \\\n \"(\" + self.cpp_type_ref() + \" val) {\")\n writeln(file, \"rep_->\" + self.name + \" = val;\", 1)\n writeln(file, \"rep_->_Presence.set(\" + str(self.id) + \");\", 1)\n writeln(file, \"}\")\n else:\n writeln(file,\n \"const \" + self.cpp_type_ref() + \"& \" + \\\n self.parent.impl_cpp_type + \"::\" + self.name + \"() const {\")\n writeln(file, \"return rep_->\" + self.name + \";\", 1)\n writeln(file, \"}\")\n writeln(file,\n self.cpp_type_ref() + \"& \" + \\\n self.parent.impl_cpp_type + \"::\" + self.name + \"() {\")\n writeln(file, \"rep_->_Presence.set(\" + str(self.id) + \");\", 1)\n writeln(file, \"return rep_->\" + self.name + \";\", 1)\n writeln(file, \"}\")\n\n if not self.is_container():\n writeln(file,\n \"void \" + self.parent.impl_cpp_type + \"::clear_\" + self.name + \"() {\")\n if self.is_algebraic:\n writeln(file, \"rep_->\" + self.name + \" = 0;\", 1)\n elif self.is_builtin:\n writeln(file, \"rep_->\" + self.name + \".clear();\", 1)\n elif self.is_enum:\n writeln(file, \"rep_->\" + self.name + \" = \" + self.initializer() + \";\", 1)\n else:\n writeln(file, \"rep_->\" + self.name + \".Clear();\", 1)\n writeln(file, \"rep_->_Presence.reset(\" + str(self.id) + \");\", 1)\n writeln(file, \"}\")\n\n writeln(file,\n \"bool \" + self.parent.impl_cpp_type + \"::has_\" + self.name + \"() const {\")\n writeln(file, \"return rep_->_Presence.test(\" + str(self.id) + \");\", 1)\n writeln(file, \"}\")\n\n if self.is_repeated and not args.omit_deprecated:\n if self.is_builtin or self.is_enum:\n writeln(file,\n \"/* deprecated */ void \" + self.parent.impl_cpp_type + \"::add_\" + self.name + \"(\" + \\\n self.base_cpp_type_ref() + \" value) {\")\n writeln(file, self.name + \"().push_back(std::move(value));\", 1)\n writeln(file, \"}\")\n writeln(file,\n \"/* deprecated */ \" + \\\n self.base_cpp_type_ref() + \" \" + self.parent.impl_cpp_type + \"::\" + \\\n self.name + \"(int idx) const {\")\n writeln(file, \"return \" + self.name + \"().at(idx);\", 1)\n writeln(file, \"}\")\n else:\n writeln(file,\n \"/* deprecated */ \" + self.base_cpp_type_ref() + \"* \" + \\\n self.parent.impl_cpp_type + \"::add_\" + self.name + \"() {\")\n writeln(file, self.name + \"().push_back({});\", 1)\n writeln(file, \"return &\" + self.name + \"().back();\", 1)\n writeln(file, \"}\")\n writeln(file,\n \"/* deprecated */ \" + \\\n \"const \" + self.base_cpp_type_ref() + \"& \" + \\\n self.parent.impl_cpp_type + \"::\" + self.name + \"(int idx) const {\")\n writeln(file, \"return \" + self.name + \"().at(idx);\", 1)\n writeln(file, \"}\")\n writeln(file,\n \"/* deprecated */ \" + \\\n self.base_cpp_type_ref() + \"* \" + \\\n self.parent.impl_cpp_type + \"::mutable_\" + self.name + \"(int idx) {\")\n writeln(file, \"return &\" + self.name + \"().at(idx);\", 1)\n writeln(file, \"}\")\n writeln(file, \"\")\n\n def generate_implementation_definition(self, file):\n if self.is_algebraic and not self.is_container():\n writeln(file, cpp_impl_type(self.raw_type) + \" \" + self.name + \" = 0;\", 1)\n elif self.is_builtin and not self.is_container():\n writeln(file, cpp_impl_type(self.raw_type) + \" \" + self.name + \";\", 1)\n elif self.is_enum and not self.is_container():\n writeln(file, self.cpp_type_ref() + \" \" + self.name + \" = \" + \\\n self.initializer() + \";\", 1)\n else:\n writeln(file, self.cpp_type_ref() + \" \" + self.name + \";\", 1)\n\n def generate_less_check(self, file, indent):\n writeln(file, \"// [\" + str(self.id) + \"] \" + self.name, indent)\n\n '''\n # This may be used to implement equality that takes presence into account\n if not self.is_container() and self.is_enum:\n writeln(file,\n \"if (rep_->_Presence.test(\" + str(self.id) +\n \") != arg.rep_->_Presence.test(\" + str(self.id) + \"))\",\n indent)\n writeln(file, \"return arg.rep_->_Presence.test(\" + str(self.id) + \");\",\n indent + 1)\n '''\n\n writeln(file,\n \"if (rep_->\" + self.name + \" < arg.rep_->\" + self.name + \")\",\n indent)\n writeln(file, \"return true;\", indent + 1)\n writeln(file, \"\")\n\n def generate_debug_output(self, file, indent):\n writeln(file, \"// \" + self.as_string(\"ns\"), indent)\n\n if self.is_repeated:\n # This is a vector of something\n writeln(file, \"for (const auto& entry : rep_->\" + self.name + \") {\", indent)\n if self.is_builtin or self.is_enum:\n if self.is_algebraic:\n entry = 'entry'\n else:\n entry = 'Escape(entry)'\n writeln(file,\n 'ss << \"' + self.name + ': \" << ' + entry + ' << \"\\\\n\";',\n indent + 1)\n else:\n writeln(file, 'ss << prefix << \"' + self.name + ' {\\\\n\";', indent + 1)\n writeln(file, 'ss << entry.DebugString(prefix + \" \");', indent + 1)\n writeln(file, 'ss << prefix << \"}\\\\n\";', indent + 1)\n writeln(file, '}', indent)\n elif self.is_map:\n # This is a map of something\n writeln(file, \"for (const auto& entry : rep_->\" + self.name + \") {\", indent)\n writeln(file, 'ss << prefix << \"' + self.name + ' {\\\\n\";', indent + 1)\n writeln(file, 'ss << prefix << \" key: \" << entry.first << \"\\\\n\";', indent + 1)\n writeln(file, 'ss << prefix << \" value {\\\\n\";', indent + 1)\n if self.resolved_type:\n writeln(file, 'ss << entry.second.DebugString(prefix + \" \");', indent + 1)\n else:\n writeln(file, \"ss << entry.second;\", indent + 1)\n writeln(file, 'ss << \"}\\\\n\";', indent + 1)\n writeln(file, \"}\", indent)\n elif self.is_builtin or self.is_enum:\n # This is a singular built-in\n writeln(file, \"if (rep_->_Presence.test(\" + str(self.id) + \"))\",\n indent)\n if self.is_algebraic or self.is_enum:\n value = 'rep_->' + self.name\n else:\n value = 'Escape(rep_->' + self.name + ')'\n writeln(file,\n 'ss << prefix << \"' + self.name + ': \" << ' + value + ' << \"\\\\n\";',\n indent + 1)\n else:\n # This is a singular sub-message\n writeln(file, \"if (rep_->_Presence.test(\" + str(self.id) + \"))\",\n indent)\n writeln(file,\n \"ss << prefix << \\\"\" + self.name +\n \": \\\" << rep_->\" + self.name + '.DebugString(prefix + \" \");',\n indent + 1)\n writeln(file, \"\")\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":39885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"438303186","text":"# calculate area\nprint((\"-\" * 30) + \"\\nArea Calculator\\n\" + (\"-\" * 30))\n\nprint(\"\\n1 - Square\\n2 - Rectangle\\n3 - Circle\\n\")\nchoice = raw_input(\"Your choice: \")\n\nif choice == \"1\":\n side = int(raw_input(\"A side: \"))\n print(\"Area of square is: {}\".format(side ** 2))\n\nelif choice == \"2\":\n aside = int(raw_input(\"A side: \"))\n bside = int(raw_input(\"B side: \"))\n print(\"Area of rectangle is: {}\".format(aside * bside))\n\nelif choice == \"3\":\n r = int(raw_input(\"Radius: \"))\n pi = 3.14159\n print(\"Area of circle is: {}\".format((r ** 2) * pi))\n\nelse:\n print(\"Good bye...\")","sub_path":"BasicPythonProjects/area.py","file_name":"area.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"141168388","text":"import json\n\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom django.http import HttpResponse\n# Create your views here.\n\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom .models import CourseOrg, CityDict\nfrom .forms import UserAskForm\n\n\n# 课程机构列表页,筛选页\nclass OrgView(View):\n def get(self, request):\n all_orgs = CourseOrg.objects.all()\n hot_orgs = all_orgs.order_by('-click_nums')[:3]\n all_citys = CityDict.objects.all()\n\n # 取出筛选城市\n city_id = request.GET.get('city', '')\n if city_id:\n all_orgs = all_orgs.filter(city_id=int(city_id))\n\n # 课程机构类别筛选\n category = request.GET.get('ct', '')\n if category:\n all_orgs = all_orgs.filter(category=category)\n\n # 排序\n sort = request.GET.get('sort', '')\n if sort == 'students':\n all_orgs = all_orgs.order_by('-students')\n elif sort == 'courses':\n all_orgs = all_orgs.order_by('-course_nums')\n\n #筛选完成之后再进行统计\n org_nums = all_orgs.count()\n\n # 分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n p = Paginator(all_orgs, 2, request=request)\n\n orgs = p.page(page)\n\n return render(request, 'org-list.html', {\n 'all_orgs': orgs,\n 'all_citys': all_citys,\n 'org_nums': org_nums,\n 'city_id': city_id,\n 'sort': sort,\n 'category': category,\n 'hot_orgs': hot_orgs,\n })\n\n\n# 用户添加咨询课程表单提交\nclass AddUserAskView(View):\n def post(self, request):\n user_ask_form = UserAskForm(request.POST)\n res = dict()\n if user_ask_form.is_valid():\n user_ask_form.save(commit=True)\n res['status'] = 'success'\n else:\n res['status'] = 'fail'\n res['msg'] = '添加出错'\n return HttpResponse(json.dumps(res), content_type='application/json')\n","sub_path":"apps/organization/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"525998748","text":"# -*- coding: utf-8 -*-\n\nfrom channel import message_queue\nfrom channel.messages import CreateResourceMessage\nimport uuid\n\nclass Resource(object):\n\t# items = DynamicResourceList()\n\n\tdef __init__(self, **values):\n\t\tself._values = values\n\t\tmessage_queue.put(CreateResourceMessage(self))\n\n\tdef reference(self):\n\t\tif not hasattr(self, '_reference'):\n\t\t\tself._reference = uuid.uuid4()\n\t\treturn self._reference\n\n\tdef as_dict(self):\n\t\tvalues = {}\n\t\tfor attribute_name in self.__class__.attributes:\n\t\t\tvalues[attribute_name] = self[attribute_name]\n\t\treturn {'class': self.__class__.__name__, 'reference': str(self.reference()), 'values': values}\n\n\tdef __getitem__(self, name):\n\t\tif name not in self.__class__.attributes:\n\t\t\traise AttributeError(name)\n\t\ttry:\n\t\t\treturn self._values[name]\n\t\texcept KeyError:\n\t\t\treturn None\n\n\tdef __setitem__(self, name, value):\n\t\tif name not in self.__class__.attributes:\n\t\t\traise AttributeError(name)\n\t\tif not isinstance(value, self.__class__.attributes[name]):\n\t\t\traise ValueError(\"Resource '%s' attribute '%s' must be of type '%s', not '%s'\" % (self.__class__.__name__, name, self.__class__.attributes[name], value.__class__))\n\t\tself._values[name] = value\n\nclass DynamicResourceList(object):\n\tdef __init__(self):\n\t\tself._items = []\n\n\tdef append(self, item):\n\t\tmessage = item.as_dict()\n\t\tmessage['type'] = 'append'\n\t\tself._items.append(item)\n\t\tmessage_queue.put(message)\n\n\tdef as_list(self):\n\t\treturn self._items\n\nclass User(Resource):\n\tpass\n","sub_path":"resource.py","file_name":"resource.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"291280235","text":"#Django envirement setup\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"top2000dumpert.settings\")\nimport django\ndjango.setup()\nfrom django.db.models import Min\nfrom top2000.models import DumpertItem\nimport requests\nfrom bs4 import BeautifulSoup\n\n\nclass RetriveDumpertItems:\n def __init__(self):\n #Informatie voor het maken vaan http request\n self.endpoint = \"https://www.dumpert.nl/\"\n self.cookie = {'cpc': '10'}\n self.headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}\n\n #De data voor een dumpertitem\n self.title = ''\n self.imageurl = ''\n self.views = 0\n self.kudos = 0\n self.url = ''\n\n def get_result_from_dumpert(self, extension):\n return requests.get(self.endpoint + str(extension), headers=self.headers, cookies=self.cookie)\n\n def get_dumpertitems_from_page(self, pagenumber):\n result = self.get_result_from_dumpert(str(pagenumber))\n\n soup = BeautifulSoup(result.content, 'html.parser')\n dumpertitemscontainer = soup.find_all('section', {'class': 'dump-cnt'})\n dumpertitems = dumpertitemscontainer[0].find_all('a', {'class': 'dumpthumb'})\n\n count = 0\n items = []\n for dumpertitem in dumpertitems:\n \"\"\"print('#############################')\n print(str(count) + '\\n')\n print(dumpertitem['title'])\n print(dumpertitem.find('img')['src'])\n details = dumpertitem.find('div', {'class': 'details'})\n\n print('#############################\\n')\n count += 1\"\"\"\n dp = RetriveDumpertItems()\n dp.title = dumpertitem['title']\n dp.imageurl = dumpertitem.find('img')['src'][35:]\n\n\n #De kudos en views staan op 1 regel in een p element we moeten ze dus daardoor eruit halen\n #Ze staan wel altijd op dezelfde positie\n details = dumpertitem.find('div', {'class': 'details'})\n viewsandkudos = details.find('p', {'class': 'stats'}).text.split()\n\n dp.views = int(viewsandkudos[1])\n dp.kudos = int(viewsandkudos[3])\n dp.url = dumpertitem['href'][33:]\n items.append(dp)\n return items\n\n def enter_dumpertitem_into_db(self,dumpertitem):\n dumpertitemdb = DumpertItem.objects.filter(title=dumpertitem.title, imageurl=dumpertitem.imageurl,\n url=dumpertitem.url)\n if len(dumpertitemdb) >= 1:\n print(\"update\")\n dumpertitemdb[0].views = dumpertitem.views\n dumpertitemdb[0].kudos = dumpertitem.kudos\n dumpertitemdb[0].save()\n else:\n print(\"new\")\n print(dumpertitem.title)\n print(dumpertitem.imageurl)\n print(dumpertitem.url)\n dp = DumpertItem.objects.create(title=dumpertitem.title, imageurl=dumpertitem.imageurl, url=dumpertitem.url,\n kudos=dumpertitem.kudos, views=dumpertitem.views)\n dp.save()\n\n return\n\n def remove_lowest_kudos_in_db(self):\n try:\n id = DumpertItem.objects.all()[DumpertItem.objects.all().count() - 1].id\n print(id)\n DumpertItem.objects.get(id=id).delete()\n except:\n print(\"iets fout gegaan tijdens het verwijderen van de laagste nog een keertje doen\")\n self.remove_lowest_kudos_in_db()\n\n def check_if_higher_than_lowest_kudo_db(self,dumpertitems):\n # Als we een item hebben wat groter is dan het laagste kudo in de database mits er meer dan 2000 objects erin zitten\n # dan mogen die objects erin\n lowest_kudos_in_db = DumpertItem.objects.all().aggregate(Min('kudos'))['kudos__min']\n if len(dumpertitems) <= 0:\n return\n highest_kudos_from_page = max(dumpertitems, key=lambda item: item.kudos)\n print(\n \"Highest kudo received:\" + str(highest_kudos_from_page.kudos) + \"Lowest from db\" + str(lowest_kudos_in_db))\n\n if lowest_kudos_in_db < highest_kudos_from_page.kudos:\n print(\"New database entry kudos: {} title: {}\".format(highest_kudos_from_page.kudos,\n highest_kudos_from_page.title))\n self.enter_dumpertitem_into_db(highest_kudos_from_page)\n dumpertitems.remove(highest_kudos_from_page)\n self.remove_lowest_kudos_in_db() # So that there will only be 2000 records\n self.check_if_higher_than_lowest_kudo_db(dumpertitems)\n\n return\n\n def retrive_dumpert_items_from_page(self, pagenumber, maxdumpertitemsindatabase):\n dp = RetriveDumpertItems()\n try:\n dumpertitems = dp.get_dumpertitems_from_page(pagenumber)\n except:\n return False\n\n # Als er niks meer is geef dat aan doormiddel van een 3 te sturen\n if int(len(dumpertitems)) == 0:\n return 3\n\n if DumpertItem.objects.all().count() >= maxdumpertitemsindatabase:\n self.check_if_higher_than_lowest_kudo_db(dumpertitems)\n else:\n print(\"The current count is\" + str(15 * pagenumber))\n if maxdumpertitemsindatabase - DumpertItem.objects.all().count() <= 15:\n for dumpertitem in dumpertitems[:maxdumpertitemsindatabase - DumpertItem.objects.all().count()]:\n self.enter_dumpertitem_into_db(dumpertitem)\n\n the_rest_of_dumpertitems = dumpertitems[\n 15 - (maxdumpertitemsindatabase - DumpertItem.objects.all().count()):]\n self.check_if_higher_than_lowest_kudo_db(the_rest_of_dumpertitems)\n else:\n for dumpertitem in dumpertitems:\n self.enter_dumpertitem_into_db(dumpertitem)\n\n return True\n\n def get_specific_dumpertitem(self, specificpage):\n results = self.get_result_from_dumpert('mediabase/'+str(specificpage))\n dumpertitem = RetriveDumpertItems()\n content = BeautifulSoup(results.content, 'html.parser')\n dumpertitem.kudos = int(content.find('div', {'class': 'dump-kudos'}).find('span', {'class': 'dump-amt'}).text)\n dumpertitem.views = int(content.find('div', {'class': 'dump-views'}).find('span', {'class': 'dump-amt'}).text)\n\n return dumpertitem\n","sub_path":"top2000dumpert/RetriveDumpertItems.py","file_name":"RetriveDumpertItems.py","file_ext":"py","file_size_in_byte":6458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304359970","text":"import json\nimport logging\nimport os\nimport time\n\nimport botocore.exceptions\n\nfrom ..base import BaseProvider\nfrom ... import exceptions\nfrom ...util import retry_with_backoff\nfrom stacker.session_cache import get_session\n\nlogger = logging.getLogger(__name__)\n\nMAX_TAIL_RETRIES = 5\n\n\ndef get_output_dict(stack):\n \"\"\"Returns a dict of key/values for the outputs for a given CF stack.\n\n Args:\n stack (dict): The stack object to get\n outputs from.\n\n Returns:\n dict: A dictionary with key/values for each output on the stack.\n\n \"\"\"\n outputs = {}\n for output in stack['Outputs']:\n logger.debug(\" %s %s: %s\", stack['StackName'], output['OutputKey'],\n output['OutputValue'])\n outputs[output['OutputKey']] = output['OutputValue']\n return outputs\n\n\ndef retry_on_throttling(fn, attempts=3, args=None, kwargs=None):\n \"\"\"Wrap retry_with_backoff to handle AWS Cloudformation Throttling.\n\n Args:\n fn (function): The function to call.\n attempts (int): Maximum # of attempts to retry the function.\n args (list): List of positional arguments to pass to the function.\n kwargs (dict): Dict of keyword arguments to pass to the function.\n\n Returns:\n passthrough: This returns the result of the function call itself.\n\n Raises:\n passthrough: This raises any exceptions the function call raises,\n except for boto.exception.BotoServerError, provided it doesn't\n retry more than attempts.\n \"\"\"\n def _throttling_checker(exc):\n \"\"\"\n\n Args:\n exc (botocore.exceptions.ClientError): Expected exception type\n\n Returns:\n boolean: indicating whether this error is a throttling error\n \"\"\"\n if exc.response['ResponseMetadata']['HTTPStatusCode'] == 400 and \\\n exc.response['Error']['Code'] == \"Throttling\":\n logger.debug(\"AWS throttling calls.\")\n return True\n return False\n\n return retry_with_backoff(fn, args=args, kwargs=kwargs, attempts=attempts,\n exc_list=(botocore.exceptions.ClientError, ),\n retry_checker=_throttling_checker)\n\n\nclass Provider(BaseProvider):\n\n \"\"\"AWS CloudFormation Provider\"\"\"\n\n DELETED_STATUS = \"DELETE_COMPLETE\"\n IN_PROGRESS_STATUSES = (\n \"CREATE_IN_PROGRESS\",\n \"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS\",\n \"UPDATE_IN_PROGRESS\",\n \"DELETE_IN_PROGRESS\",\n )\n COMPLETE_STATUSES = (\n \"CREATE_COMPLETE\",\n \"UPDATE_COMPLETE\",\n )\n\n def __init__(self, region, **kwargs):\n self.region = region\n self._outputs = {}\n self._cloudformation = None\n # Necessary to deal w/ multiprocessing issues w/ sharing ssl conns\n # see: https://github.com/remind101/stacker/issues/196\n self._pid = os.getpid()\n\n @property\n def cloudformation(self):\n # deals w/ multiprocessing issues w/ sharing ssl conns\n # see https://github.com/remind101/stacker/issues/196\n pid = os.getpid()\n if pid != self._pid or not self._cloudformation:\n session = get_session(self.region)\n self._cloudformation = session.client('cloudformation')\n\n return self._cloudformation\n\n def get_stack(self, stack_name, **kwargs):\n try:\n return retry_on_throttling(\n self.cloudformation.describe_stacks,\n kwargs=dict(StackName=stack_name))['Stacks'][0]\n except botocore.exceptions.ClientError as e:\n if \"does not exist\" not in e.message:\n raise\n raise exceptions.StackDoesNotExist(stack_name)\n\n def get_stack_status(self, stack, **kwargs):\n return stack['StackStatus']\n\n def is_stack_completed(self, stack, **kwargs):\n return self.get_stack_status(stack) in self.COMPLETE_STATUSES\n\n def is_stack_in_progress(self, stack, **kwargs):\n return self.get_stack_status(stack) in self.IN_PROGRESS_STATUSES\n\n def is_stack_destroyed(self, stack, **kwargs):\n return self.get_stack_status(stack) == self.DELETED_STATUS\n\n def tail_stack(self, stack, retries=0, **kwargs):\n def log_func(e):\n event_args = [e['ResourceStatus'], e['ResourceType'],\n e.get('ResourceStatusReason', None)]\n # filter out any values that are empty\n event_args = [arg for arg in event_args if arg]\n template = \" \".join([\"[%s]\"] + [\"%s\" for _ in event_args])\n logger.info(template, *([stack.fqn] + event_args))\n\n if not retries:\n logger.info(\"Tailing stack: %s\", stack.fqn)\n\n try:\n self.tail(stack.fqn,\n log_func=log_func,\n include_initial=False)\n except botocore.exceptions.ClientError as e:\n if \"does not exist\" in e.message and retries < MAX_TAIL_RETRIES:\n # stack might be in the process of launching, wait for a second\n # and try again\n time.sleep(1)\n self.tail_stack(stack, retries=retries + 1, **kwargs)\n else:\n raise\n\n @staticmethod\n def _tail_print(e):\n print(\"%s %s %s\" % (e['ResourceStatus'],\n e['ResourceType'],\n e['EventId']))\n\n def get_events(self, stackname):\n \"\"\"Get the events in batches and return in chronological order\"\"\"\n next_token = None\n event_list = []\n while 1:\n if next_token is not None:\n events = self.cloudformation.describe_stack_events(\n StackName=stackname, NextToken=next_token\n )\n else:\n events = self.cloudformation.describe_stack_events(\n StackName=stackname\n )\n event_list.append(events['StackEvents'])\n next_token = events.get('NextToken', None)\n if next_token is None:\n break\n time.sleep(1)\n return reversed(sum(event_list, []))\n\n def tail(self, stack_name, log_func=_tail_print, sleep_time=5,\n include_initial=True):\n \"\"\"Show and then tail the event log\"\"\"\n # First dump the full list of events in chronological order and keep\n # track of the events we've seen already\n seen = set()\n initial_events = self.get_events(stack_name)\n for e in initial_events:\n if include_initial:\n log_func(e)\n seen.add(e['EventId'])\n\n # Now keep looping through and dump the new events\n while 1:\n events = self.get_events(stack_name)\n for e in events:\n if e['EventId'] not in seen:\n log_func(e)\n seen.add(e['EventId'])\n time.sleep(sleep_time)\n\n def destroy_stack(self, stack, **kwargs):\n logger.debug(\"Destroying stack: %s\" % (self.get_stack_name(stack)))\n retry_on_throttling(self.cloudformation.delete_stack,\n kwargs=dict(StackName=self.get_stack_name(stack)))\n return True\n\n def create_stack(self, fqn, template_url, parameters, tags, **kwargs):\n logger.debug(\"Stack %s not found, creating.\", fqn)\n logger.debug(\"Using parameters: %s\", parameters)\n logger.debug(\"Using tags: %s\", tags)\n retry_on_throttling(\n self.cloudformation.create_stack,\n kwargs=dict(StackName=fqn,\n TemplateURL=template_url,\n Parameters=parameters,\n Tags=tags,\n Capabilities=[\"CAPABILITY_NAMED_IAM\"]),\n )\n return True\n\n def update_stack(self, fqn, template_url, old_parameters, parameters,\n tags, **kwargs):\n try:\n logger.debug(\"Attempting to update stack %s.\", fqn)\n retry_on_throttling(\n self.cloudformation.update_stack,\n kwargs=dict(StackName=fqn,\n TemplateURL=template_url,\n Parameters=parameters,\n Tags=tags,\n Capabilities=[\"CAPABILITY_NAMED_IAM\"]),\n )\n except botocore.exceptions.ClientError as e:\n if \"No updates are to be performed.\" in e.message:\n logger.debug(\n \"Stack %s did not change, not updating.\",\n fqn,\n )\n raise exceptions.StackDidNotChange\n raise\n return True\n\n def get_stack_name(self, stack, **kwargs):\n return stack['StackName']\n\n def get_outputs(self, stack_name, *args, **kwargs):\n if stack_name not in self._outputs:\n stack = self.get_stack(stack_name)\n self._outputs[stack_name] = get_output_dict(stack)\n return self._outputs[stack_name]\n\n def get_stack_info(self, stack_name):\n \"\"\" Get the template and parameters of the stack currently in AWS\n\n Returns [ template, parameters ]\n \"\"\"\n try:\n stacks = retry_on_throttling(\n self.cloudformation.describe_stacks,\n kwargs=dict(StackName=stack_name))\n except botocore.exceptions.ClientError as e:\n if \"does not exist\" not in e.message:\n raise\n raise exceptions.StackDoesNotExist(stack_name)\n\n try:\n template = retry_on_throttling(\n self.cloudformation.get_template,\n kwargs=dict(StackName=stack_name))['TemplateBody']\n except botocore.exceptions.ClientError as e:\n if \"does not exist\" not in e.message:\n raise\n raise exceptions.StackDoesNotExist(stack_name)\n\n stack = stacks['Stacks'][0]\n parameters = self.params_as_dict(stack.get('Parameters', []))\n\n return [json.dumps(template), parameters]\n\n @staticmethod\n def params_as_dict(parameters_list):\n parameters = dict()\n for p in parameters_list:\n parameters[p['ParameterKey']] = p['ParameterValue']\n return parameters\n","sub_path":"stacker/providers/aws/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":10242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502820084","text":"import csv,sys\nfrom pymongo import MongoClient\n\n\n\nclass constituency:\n parliamentary_constituency = \"\"\n name = \"\"\n total_population = 0\n sc_population = 0\n st_population = 0\n district = \"\"\n constituency_id = -1\n\n def __init__(self, name, district, constituencyId, total_population,sc_population, st_population, parliamentary_constituency):\n self.name = name\n self.district = district\n self.constituencyId = constituencyId\n self.total_population = total_population\n self.sc_population = sc_population\n self.st_population = st_population\n self.parliamentary_constituency = parliamentary_constituency\n\n def __str__(self):\n toString = \"name: \" + self.name + \" district: \" + self.district + \" constituencyId: \" + str(self.constituencyId) + \" population: \" + str(self.total_population)\n return toString\n \n\ndb_interaction = sys.argv[1]\n\nif (db_interaction == \"true\"):\n connection = MongoClient(\"ds135916.mlab.com\", 35916)\n db = connection[\"ministryofmagic\"]\n db.authenticate(\"soumyadeep\", sys.argv[2])\n #print(db.collection_names())\n\n if (\"constituencies\" in db.collection_names()):\n pass\n else:\n db.create_collection(\"constituencies\")\n\n constituency_collection = db.constituencies\n\n\nwith open('karnataka-constituencies.csv', 'rt') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n try:\n parliamentary_constituency = row[0].split('-')[1]\n name = row[1].split('.')[1].strip()\n total_population = int(row[2])\n sc_population = int(row[3])\n st_population = int(row[4])\n district = row[5]\n single_constituency = constituency(name, district, -1, total_population, sc_population, st_population, parliamentary_constituency)\n if (db_interaction == \"true\"):\n constituency_collection.insert_one(single_constituency.__dict__)\n\n print(single_constituency)\n except(IndexError):\n print(\"Header\")\n\n","sub_path":"getPopulation.py","file_name":"getPopulation.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"279589312","text":"#! /usr/bin/python3\n# NAME\n# bwkr01.py - example 01 using bitwarden_keyring\n#\n# REVISION HISTORY\n# 08/27/2019 T.J. Yang init. \n# DESCRIPTION\n# 1. tested on centos 7.6 using python36 \n# 2. Doing CRUD DB operations on top of bitwarden's linux cli tool.\n#\n\n# USAGE\n# ./bwkr01.py options\n# \n# DEBUG\n# python3 -m pdb bwkr01.py\n#\n# OPTIONS\n#\n#\n# RETURN CODE\n# SUCCESS (=0) - script completed successfully\n# ERROR (=1) - error ... bad things happened\n#\n# REFERENCE:\n# 1. https://github.com/mickaelperrin/bitwarden-decrypt-cli\n#\n#\n# ---------------------------- CONSTANT DECLARATION ---------------------------\n# WHAT: Define exit status flags.\n# WHY: Easier to program with a mnemonic than with the numbers.\n# NOTE: THESE DO NOT CHANGE UNLESS NOTIFIED BY HP! Startup depends on these\n# exit statuses being defined this way.\n#\n\nSUCCESS = 0\nERROR = 1\nVERBOSE = False\nDEBUG = False\nVERSION = '0.5'\nlibdir = 'lib'\n\n#\n# ---------------------------- IMPORT DECLARATION ---------------------------\n#\nimport base64\nimport io\nimport json\nimport os\nimport sys\nimport getopt\nimport configparser # https://docs.python.org/3/library/configparser.html\nimport bitwarden_keyring as bwkr\n\n#\n# ---------------------------- VARIABLE DECLARATION ---------------------------\n#\n\n#\n# ---------------------------- FUNCTION DECLARATION ---------------------------\n#\n\ndef parse_commandline(argv):\n global DEBUG # http://stackoverflow.com/questions/21015066/local-variable-referenced-before-assignment-python-error\n global VERBOSE # http://stackoverflow.com/questions/21015066/local-variable-referenced-before-assignment-python-error\n if len(argv) < 1 :\n usage()\n sys.exit(2)\n try: # https://pymotw.com/2/getopt/\n opts, args = getopt.getopt(sys.argv[1:],':hdvV',)\n except getopt.GetoptError:\n print (\"Wrong options. \")\n usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n usage()\n sys.exit()\n elif opt in (\"-V\", \"--Version\"):\n print (\"%s version %s\" % (sys.argv[0],VERSION))\n sys.exit()\n elif opt in (\"-d\",\"--debug\"):\n DEBUG = True\n elif opt in (\"-v\",\"--verbose\"):\n VERBOSE = True\n\n# usage information\ndef usage ():\n PROGNAME = sys.argv[0]\n print ( PROGNAME + \": \" + PROGNAME + \"\"\" \\\n [options] \n options:\n -c, --config= Path to alternate configuration file\n -d, --debug log message in /var/log/snowt.log\n -h, --help Print this usage message\n -v, --verbose Verbose output (additive)\n -V, --version Display program version and exit\"\"\" )\n\n#\n# **************************** MAIN SCRIPT ************************************\n#\n\n# for linux export BITWARDENCLI_APPDATA_DIR=~/.config/Bitwarden\\ CLI/\n# instantiate\n\nif __name__ == '__main__':\n parse_commandline(sys.argv[1:])\n config = configparser.ConfigParser()\n config.read('./bwkeyring.conf') # parse existing file\n if DEBUG: print (\"topsecret:%s\" % config['topsecret.server.com'])\n\n #print (\"BW_SESSION=%s\" % bwkr.get_session(os.environ))\n# print (\"path=%s\" % bwkr.get_db_location(os.environ,\"linux\"))\n# print (\"ASK_BW_SESSION=%s \" % bwkr.ask_for_session(False))\n# print (\"ASK_BW_SESSION=%s \" % bwkr.ask_for_session(True))\n# print (\"BW_SESSION=%s \" % bwkr.get_session(os.environ))\n# print (\"password=%s \" % bwkr.get_password(\"login\",\"nagios01\"))\n\n\n","sub_path":"examples/bwkr01.py","file_name":"bwkr01.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"654360593","text":"from regex_env import RegexEnv\n\n\"\"\" Uses generators to iterate through every possibility listed in the matchname\nstatement \"\"\"\n\ndef matchPattern(params):\n\n regexEnv = RegexEnv()\n\n for regexGroups in matchPatternHelper(params):\n regexEnv.groupdict = regexGroups\n\n yield regexEnv\n\ndef matchPatternHelper(params):\n \"\"\" Recursively calls on smaller inputs, iterates through all the,\n each time returning a new regex environment \"\"\"\n if params == []:\n\n yield {}\n else:\n for env in matchPatternHelper(params[1:]):\n currParam = params[0]\n\n for match in currParam.matches:\n env.update({currParam.name: match})\n yield env\n\n\n","sub_path":"src/semantics/match_pat.py","file_name":"match_pat.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"310625906","text":"# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom paddle.trainer_config_helpers import *\n\nis_predict = get_config_arg(\"is_predict\", bool, False)\nif not is_predict:\n define_py_data_sources2(\n train_list='data/train.list',\n test_list='data/test.list',\n module='dataprovider',\n obj='process',\n args={'mean_path': 'data/mean.meta'})\n\nsettings(\n batch_size=128,\n learning_rate=0.1 / 128.0,\n learning_rate_decay_a=0.1,\n learning_rate_decay_b=50000 * 100,\n learning_rate_schedule='discexp',\n learning_method=MomentumOptimizer(0.9),\n regularization=L2Regularization(0.0005 * 128), )\n\n\ndef vgg_bn_drop(input):\n def conv_block(ipt, num_filter, groups, dropouts, num_channels=None):\n return img_conv_group(\n input=ipt,\n num_channels=num_channels,\n pool_size=2,\n pool_stride=2,\n conv_num_filter=[num_filter] * groups,\n conv_filter_size=3,\n conv_act=ReluActivation(),\n conv_with_batchnorm=True,\n conv_batchnorm_drop_rate=dropouts,\n pool_type=MaxPooling())\n\n conv1 = conv_block(input, 64, 2, [0.3, 0], 3)\n conv2 = conv_block(conv1, 128, 2, [0.4, 0])\n conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0])\n conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0])\n conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0])\n\n drop = dropout_layer(input=conv5, dropout_rate=0.5)\n fc1 = fc_layer(input=drop, size=512, act=LinearActivation())\n bn = batch_norm_layer(\n input=fc1, act=ReluActivation(), layer_attr=ExtraAttr(drop_rate=0.5))\n fc2 = fc_layer(input=bn, size=512, act=LinearActivation())\n return fc2\n\n\ndatadim = 3 * 32 * 32\nclassdim = 10\ndata = data_layer(name='image', size=datadim)\nnet = vgg_bn_drop(data)\nout = fc_layer(input=net, size=classdim, act=SoftmaxActivation())\nif not is_predict:\n lbl = data_layer(name=\"label\", size=classdim)\n cost = classification_cost(input=out, label=lbl)\n outputs(cost)\nelse:\n outputs(out)\n","sub_path":"image_classification/models/vgg.py","file_name":"vgg.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"326438442","text":"# coding: utf-8\n\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\n\n### Bonjour Melissa: tu dois ajouter «.py» à ton script pour que je puisse le faire fonctionner\n\nfichier = \"missions_JHR.csv\" ### Je rebaptise ton fichier pour faire mes tests\n\nurl = \"https://www.nasa.gov/missions\"\n\nentetes = {\n\t# \"User-Agent : Tiffany Toussaint\" ### Ici, il manquait une virgule pour séparer les deux éléments de ton dictionnaire; il fallait en outre mettre la clé et la valeur chacune dans leur paire de guillemets\n\t\"User-Agent\" : \"Tiffany Toussaint\",\n\t\"From\" : \"cg491001@ens.uq-8am.ca\"\n}\n\ncontenu = requests.get(url, headers=entetes)\n# page = BeautifulSoup(contenu.text,encode(\"latin-1\").decode(\"utf-8\"),\"html.parser\") ### Ici, ce n'est pas une virgule qui précède le «encode», mais un point. Par ailleurs, ce truc n'était pas nécessaire puisque l'encodage de la page était parfait \npage = BeautifulSoup(contenu.text,\"html.parser\")\n\n# print()\n# missions = page.find_all(class_=\"clickable\") ### Ici, tu as reproduit l'exemple donné dans la vidéo. Il faut adapter ton script au contenu se trouvant dans la page que tu souhaites moissonner.\n### La ligne ci-dessous aurait normalement dû marcher\n# missions = page.find(\"div\", id=\"content\").find_all(\"p\")\n\n### Mais ce site est généré par du JavaScript et impossible à moissonner avec les méthodes que je vous ai montrées, malheureusement...\n### Si tu fais print(page), tu vas voir que le code HTML que BeautifulSoup peut aller chercher est différent de celui que ton navigateur montre...\nprint(page)\n\nfor mission in missions:\n\t# print(mission)\n\t# print(page.find(\"div\", class_\"static-landing-page\").find_next(\"url\").find_all) ### Il manquait un «égale» ici; mais cette commande n'est pas très utile\n\t# href = (mission.find(\"a\").[\"href\"].text) ### Ici, le . n'était pas nécessaire avant le [\"href\"]\n\thref = (mission.find(\"a\")[\"href\"].text)\n\t# titre = (mission.find(\"href\").[\"> <\"] ### Pour extraire le nom des missions, c'est dans le contenu de la balise «a» qu'il fallait le trouver\n\ttitre = mission.find(\"a\").text\n\tinfos = [href,titre]\n\tprint(\"><\"*30)\n\n\tnasa = open(fichier,\"a\")\n\tetoiles = vsc.writer(nasa)\n\tetoiles.writerow(infos)\n\n### Ton script, corrigé comme je l'ai fait, aurait dû marcher si on avait eu affaire à un site «normal»... Tu ne seras pas pénalisée pour cela...","sub_path":"jhr.py","file_name":"jhr.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"248386917","text":"import pygame\nfrom pygame.locals import *\nimport sys#sysはシステムパラメータ関数のライブラリ\n\n\ndef main():\n clock = pygame.time.Clock()\t#単位時間あたりに処理させるフレームすなわち「コマ」の数を示す、頻度の数値である。\t\t\t \n pygame.init() \t# Pygameの初期化\n screen = pygame.display.set_mode((800, 600)) # 800*600の画面\n px = 100 #初期座標\n vx = 2\t #x方向の速度\n py = 100 #初期座標\n vy = 2\t #y方向の速度\n ay = 0\t#重力加速度\n \n while (1): #繰り返し無限ループ\n screen.fill((255,255,255)) \t\t# 画面を白に\n pygame.draw.circle(screen,(10,10,10), (px,py), 50,) #ボールのカスタマイズ,101010は色,50はボールの大きさxy\n px += vx# 次の位置を計算\n vy += ay#y方向に重力をかける 重力は縦に働く \n py += vy#y座標にy速度をかける\n \n if px >= 750 or px <= 50:# 左右の壁\t\n vx *= -2#??\n if py >= 550 or py <= 50:\t# 上下の壁\n vy *= -2#??\n print(py,vy,ay)\t\t #座標の表示\n pygame.display.update() \t\t\t\t# 画面更新\n clock.tick(60)\t\t #60fpsごとに画面を更新\n\n # イベント処理\n for event in pygame.event.get(): #イベントからキーボードやマウスの動きを取得\n if event.type == QUIT: # 閉じるボタンが押されたら終了\n pygame.quit() # Pygameの終了(ないと終われない)\n sys.exit() # 終了(ないとエラーで終了することになる)\n\nif __name__ == \"__main__\":\n main()\n\n\n#参考サイトhttp://aocchi.xyz/2019/07/26/pythonpygame%E3%81%A7%E3%82%B2%E3%83%BC%E3%83%A0%E4%BD%9C%E3%82%8A-%E3%82%A2%E3%82%A4%E3%82%B3%E3%83%B3%E3%82%92%E5%8B%95%E3%81%8B%E3%81%99/","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"644076498","text":"#!/usr/bin/env python\nbirthday = {'Alice': 'Mar 14', 'Bob': 'May 17', 'Tom': 'Jun 16'}\nwhile True:\n print('Enter a name (blank to quit):')\n name = input()\n\n if name == '':\n break\n\n if name in birthday:\n print(birthday[name] + ' is the birthday for ' + name)\n else:\n print('No information for current name')\n print('Could you tell me when is his/her birthday?')\n birth = input()\n birthday[name] = birth\n print('Birthday database updated!')\n","sub_path":"python/Automate the Boring Stuff/Basic/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"10617255","text":"from torchvision.utils import save_image\n\n\ndef save_imgs(imgs, to_size, name):\n # x = np.array(x)\n # x = np.transpose(x, (1, 2, 0)) * 255\n # x = x.astype(np.uint8)\n # imsave(name, x)\n\n # x = 0.5 * (x + 1)\n\n # to_size = (C, H, W)\n imgs = imgs.clamp(0, 1)\n imgs = imgs.view(imgs.size(0), *to_size)\n save_image(imgs, name)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"492628029","text":"from django.conf.urls import url\nfrom . import views # This line is new!\n\napp_name = \"users\"\nurlpatterns = [\n\turl(r'^$', views.index, name=\"index\"),\n url(r'^new$', views.new, name=\"new\"),\n url(r'^edit/(?P\\d+)$', views.edit, name=\"edit\"),\n url(r'^edit_info/(?P\\d+)$', views.edit_info, name=\"edit_info\"),\n url(r'^user_edit/(?P\\d+)$', views.user_edit, name=\"user_edit\"),\n url(r'^show/(?P\\d+)$', views.show, name=\"show\"),\n url(r'^clear$', views.clear, name=\"clear\"),\n url(r'^remove/(?P\\d+)$', views.remove, name=\"remove\"),\n url(r'^confirm_remove/$', views.confirm_remove, name=\"confirm_remove\"),\n\n #url(r'^remove$', views.remove, name=\"remove\"),\n\n \n \n ]\n''' userdash:index \n userdash:signin\n userdash:login\n userdash:register\n userdash:register_user'''\n''' dashboard:index\n dashboard:admin '''\n''' users:index \n users:new \n users:edit \n users:edit_info\n users:user_edit\n users:show\n users:clear\n users:remove\n users:confirm_remove'''\n\n\n","sub_path":"userdashboard/apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"108496837","text":"# -*- coding: utf-8 -*-\n# Commented out IPython magic to ensure Python compatibility.\n# %matplotlib inline\n# Commented out IPython magic to ensure Python compatibility.\n# Colab and system related\nimport os\nimport sys\n###\n# Necessary to convert tensorflow-object (e.g. Neural Network) to Nifty-Operator\nsys.path.append('corrupted_data_classification/helper_functions/')\n\n###\nimport tensorflow as tf\n# Include path to access helper functions and Mask / Conv Operator\nsys.path.append('corrupted_data_classification/helper_functions/')\nfrom helper_functions import clear_axis, gaussian, get_cmap, info_text, get_noise, rotation, split_validation_set\nimport Mask # Masking Operator\nimport Conv # Convolution Operator\nsys.path.remove\n# Tensorflow\n\n# Plotting\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm \nimport matplotlib.pyplot as plt\n# %matplotlib inline\nplt.rcParams['figure.dpi'] = 200 # 200 e.g. is really fine, but slower\n\n# Numerics\nimport random\nimport numpy as np\nfrom sklearn.neighbors import KernelDensity\nfrom scipy.stats import multivariate_normal\nimport sklearn as sk\nfrom sklearn import decomposition\n\n# Load MNIST Dataset\nmnist = tf.keras.datasets.mnist\n(XTrain, YTrain), (XTest, YTest) = mnist.load_data()\nXTrain, XTest = XTrain / 255.0, XTest / 255.0\n# Cut out last 100 Training images for comparison\nXTrain = XTrain[0:-100]\nYTrain = YTrain[0:-100]\n\n# Reshape Xtrain and XTest to 1x784 Vectors instead of 28x28 arrays\nXTrain = XTrain.reshape((len(XTrain), np.prod(XTrain.shape[1:])))\nXTest = XTest.reshape((len(XTest), np.prod(XTest.shape[1:])))\n\nXTrain, YTrain, XVal, YVal = split_validation_set(XTrain, YTrain, val_perc=0.2)\n\ndef autoencoder_deep(latent_space_size):\n Input = tf.keras.layers.Input(shape=784)\n h1 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(Input)\n h2 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h1)\n h3 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(h2)\n encoded = tf.keras.layers.Dense(latent_space_size, activation='linear', \n activity_regularizer=tf.keras.regularizers.L2(0.001))(h3)\n # Decoder\n Decoder_Input = tf.keras.layers.Input(shape=latent_space_size) # Input for Decoder\n h5 = tf.keras.layers.Dense(128, activation='selu', kernel_initializer='lecun_normal')(Decoder_Input)\n h6 = tf.keras.layers.Dense(256, activation='selu', kernel_initializer='lecun_normal')(h5)\n h7 = tf.keras.layers.Dense(512, activation='selu', kernel_initializer='lecun_normal')(h6)\n decoded = tf.keras.layers.Dense(784, activation='sigmoid')(h7)\n\n # Decouple Encoder and Decoder from overall model\n Encoder = tf.keras.Model(Input, encoded)\n Decoder = tf.keras.Model(Decoder_Input, decoded)\n decoded = Decoder(encoded)\n model = tf.keras.Model(Input, [decoded, encoded])\n return Encoder, Decoder, model\n\n\nEncoder, Decoder, model = autoencoder_deep(10)\n\n# Loss Function for Reconstruction of images (i.e. overall Autoencoder)\ndef loss_fn_AE(y_true, y_pred):\n # y_pred = tf.nn.elu(y_pred) * tf.nn.softplus(y_pred)\n # return tf.losses.categorical_crossentropy(y_true, y_pred)\n # y_pred = tf.nn.softmax(y_pred)\n return tf.losses.binary_crossentropy(y_true,y_pred)\n #return tf.keras.losses.MeanSquaredError(y_true, y_pred)\n# Loss Function for Classification of Images in latent space\ndef loss_fn_Encoder(y_true, y_pred):\n y_pred = tf.nn.softmax(y_pred)\n return tf.losses.sparse_categorical_crossentropy(y_true, y_pred)\n\n# Training Options\nmodel.compile(optimizer='adam',\n #loss=[loss_fn_AE, loss_fn_Encoder],\n loss=[loss_fn_AE, loss_fn_Encoder], \n metrics=['accuracy'])\n\n# Training and Testing\nresults = model.fit(XTrain, [XTrain, YTrain], epochs=25)\nmodel.evaluate(XTest, [XTest, YTest], verbose=2)\n\n# Save trained Decoder and trained Encoder\nDecoder.save('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Decoder/', save_format='tf')\nEncoder.save('./corrupted_data_classification/NNs/MNIST/pretrained_supervised_ae10/Encoder/', save_format='tf')\n\nplt.plot(results.history['dense_3_accuracy'])\n\n","sub_path":"NNs/MNIST/pretrained_supervised_ae10/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":4195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"474668038","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/afshari9978/Projects/namaki_backend/avishan/templatetags/avishan_tags.py\n# Compiled at: 2020-04-21 05:34:59\n# Size of source mod 2**32: 767 bytes\nfrom django import template\nfrom django.db import models\nfrom avishan.models import AvishanModel\nregister = template.Library()\n\n@register.filter\ndef translator(value: str) -> str:\n data = {'phone':'شماره همراه', \n 'email':'ایمیل'}\n try:\n return data[value.lower()]\n except KeyError:\n return value\n\n\n@register.filter\ndef leading_zeros(value, desired_digits):\n \"\"\"\n Given an integer, returns a string representation, padded with [desired_digits] zeros.\n \"\"\"\n num_zeros = int(desired_digits) - len(str(value))\n padded_value = []\n while num_zeros >= 1:\n padded_value.append('0')\n num_zeros = num_zeros - 1\n\n padded_value.append(str(value))\n return ''.join(padded_value)","sub_path":"pycfiles/django_avishan-0.2.40-py3-none-any/avishan_tags.cpython-36.py","file_name":"avishan_tags.cpython-36.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"399960198","text":"from django.test import TestCase\nfrom django.test import Client\nfrom .models import Role,Flow,FlowStage\nfrom .models import FlowStageTextInformation as TextInformation, FlowStageImageInformation as ImageInformation, FlowStageVideoInformation as VideoInformation\nimport json\n\n\n# Create your tests here.\nclass RolesModelTests(TestCase):\n def setUp(self):\n Role.objects.create(name=\"doctor\",description=\"this is doctor\")\n Role.objects.create(name=\"nurse\",description=\"this is nurse\")\n Role.objects.create(name=\"receptionist\",description=\"this is receptionist\")\n Flow.objects.create(name=\"doctorFlow\",role=Role.objects.get(id=1))\n Flow.objects.create(name=\"nurseFlow\",role=Role.objects.get(id=2))\n Flow.objects.create(name=\"studentFlow\",role=Role.objects.get(id=3))\n FlowStage.objects.create(name=\"diagnose\",flow=Flow.objects.get(id=1))\n FlowStage.objects.create(name=\"prescribe\", flow=Flow.objects.get(id=1))\n FlowStage.objects.create(name=\"operator\",flow=Flow.objects.get(id=1))\n FlowStage.objects.create(name=\"injection\",flow=Flow.objects.get(id=2))\n FlowStage.objects.create(name=\"transfusion\", flow=Flow.objects.get(id=2))\n ImageInformation.objects.create(content=\"image1\",flowStage = FlowStage.objects.get(id=1),imageFile=\"image1_url\")\n ImageInformation.objects.create(content=\"image2\", flowStage=FlowStage.objects.get(id=2), imageFile=\"image2_url\")\n ImageInformation.objects.create(content=\"image3\",flowStage = FlowStage.objects.get(id=3),imageFile=\"image3_url\")\n ImageInformation.objects.create(content=\"image4\", flowStage=FlowStage.objects.get(id=4), imageFile=\"image4_url\")\n TextInformation.objects.create(content=\"Text1\",flowStage = FlowStage.objects.get(id=1))\n TextInformation.objects.create(content=\"Text2\",flowStage = FlowStage.objects.get(id=2))\n TextInformation.objects.create(content=\"Text3\",flowStage = FlowStage.objects.get(id=3))\n TextInformation.objects.create(content=\"Text5\",flowStage = FlowStage.objects.get(id=5))\n VideoInformation.objects.create(content=\"Video1\",flowStage = FlowStage.objects.get(id=1), videoFile=\"video1_url\")\n VideoInformation.objects.create(content=\"Video2\",flowStage = FlowStage.objects.get(id=2), videoFile=\"video2_url\")\n VideoInformation.objects.create(content=\"Video3\",flowStage = FlowStage.objects.get(id=3), videoFile=\"video3_url\")\n VideoInformation.objects.create(content=\"Video4\",flowStage = FlowStage.objects.get(id=4), videoFile=\"video4_url\")\n\n\n def test_no_roles(self):\n doctor=Role.objects.get(id=1)\n nurse=Role.objects.get(id=2)\n receptionist=Role.objects.get(id=3)\n doctor.delete()\n nurse.delete()\n receptionist.delete()\n response = self.client.get(\"/role_learning/roles/\")\n result={}\n roles=[]\n roles.append(\"NONE\")\n result[\"roles\"]=roles\n self.assertEqual(response.content.decode('utf-8'),json.dumps(result))\n\n #test return roles\n def test_get_roles(self):\n response = self.client.get(\"/role_learning/roles/\")\n result={}\n roles_list = []\n role={}\n role[\"id\"] = 1\n role[\"name\"] = \"doctor\"\n role[\"description\"] = \"this is doctor\"\n roles_list.append(role)\n role={}\n role[\"id\"] = 2\n role[\"name\"] = \"nurse\"\n role[\"description\"] = \"this is nurse\"\n roles_list.append(role)\n role={}\n role[\"id\"] = 3\n role[\"name\"] = \"receptionist\"\n role[\"description\"] = \"this is receptionist\"\n roles_list.append(role)\n result[\"roles\"]=roles_list\n # print(response.content.decode('utf-8'))\n # print(json.dumps(roles_list))\n self.assertEqual(response.content.decode('utf-8'),json.dumps(result))\n\n\n #return a flow have full information\n def test_get_flow_have_all_information(self):\n response = self.client.get(\"/role_learning/role/1\")\n result={}\n result[\"name\"]=\"doctor\"\n result[\"description\"]=\"this is doctor\"\n details=[]\n info={}\n info[\"id\"]=1\n info[\"name\"]=\"diagnose\"\n words=[]\n word_info={}\n word_info[\"word\"]=\"Text1\"\n words.append(word_info)\n info[\"words\"]=words\n images=[]\n image_info={}\n image_info[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/image1_url\"\n images.append(image_info)\n info[\"imgUrls\"]=images\n videos=[]\n video_info={}\n video_info[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/video1_url\"\n videos.append(video_info)\n info[\"videoUrls\"]=videos\n details.append(info)\n\n info={}\n info[\"id\"]=2\n info[\"name\"]=\"prescribe\"\n words=[]\n word_info={}\n word_info[\"word\"]=\"Text2\"\n words.append(word_info)\n info[\"words\"]=words\n images=[]\n image_info={}\n image_info[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/image2_url\"\n images.append(image_info)\n info[\"imgUrls\"]=images\n videos=[]\n video_info={}\n video_info[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/video2_url\"\n videos.append(video_info)\n info[\"videoUrls\"]=videos\n details.append(info)\n\n info={}\n info[\"id\"]=3\n info[\"name\"]=\"operator\"\n words=[]\n word_info={}\n word_info[\"word\"]=\"Text3\"\n words.append(word_info)\n info[\"words\"]=words\n images=[]\n image_info={}\n image_info[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/image3_url\"\n images.append(image_info)\n info[\"imgUrls\"]=images\n videos=[]\n video_info={}\n video_info[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/video3_url\"\n videos.append(video_info)\n info[\"videoUrls\"]=videos\n details.append(info)\n result[\"details\"]=details\n# print(response.content.decode('utf-8'))\n# print(json.dumps(result))\n self.assertEqual(response.content.decode('utf-8'),json.dumps(result))\n\n def test_get_flow_no_details(self):\n response = self.client.get(\"/role_learning/role/3\")\n result={}\n result[\"name\"]=\"receptionist\"\n result[\"description\"]=\"this is receptionist\"\n details=[]\n details.append(\"NONE\")\n result[\"details\"]=details\n self.assertEqual(response.content.decode('utf-8'),json.dumps(result))\n\n def test_get_flow_no_specific_information(self):\n response = self.client.get(\"/role_learning/role/2\")\n result={}\n result[\"name\"]=\"nurse\"\n result[\"description\"]=\"this is nurse\"\n details=[]\n info={}\n info[\"id\"]=4\n info[\"name\"]=\"injection\"\n info[\"words\"]=\"NONE\"\n images=[]\n imginfo={}\n imginfo[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/image4_url\"\n images.append(imginfo)\n info[\"imgUrls\"]=images\n videos=[]\n videoinfo={}\n videoinfo[\"url\"]=\"http://127.0.0.1:8000/role_learning/media/video4_url\"\n videos.append(videoinfo)\n info[\"videoUrls\"]=videos\n details.append(info)\n info={}\n info[\"id\"]=5\n info[\"name\"]=\"transfusion\"\n words=[]\n wordinfo={}\n wordinfo[\"word\"]=\"Text5\"\n words.append(wordinfo)\n info[\"words\"]=words\n info[\"imgUrls\"]=\"NONE\"\n info[\"videoUrls\"]=\"NONE\"\n details.append(info)\n result[\"details\"]=details\n self.assertEqual(response.content.decode('utf-8'),json.dumps(result))\n","sub_path":"PetHospital/role_learning/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"379320504","text":"'''\ndemonstrates openhtf framework\n'''\nimport openhtf as htf\nimport test as T\nfrom openhtf.output.callbacks.json_factory import OutputToJSON as JSON\nfrom openhtf.output.callbacks import console_summary\n\nfrom openhtf.util import checkpoints\n\nfrom openhtf.util import conf\nfrom openhtf.output.servers import station_server\nfrom openhtf.output.web_gui import web_launcher\nfrom openhtf.plugs import user_input\n\ndef main():\n test_devices()\n\ndef get_all_devices():\n '''\n Placeholder for some sort of discovery function for finding hardware\n attached to something somewhere (all quite vague at the moment)\n '''\n return [\n {\n 'uuid': '8675309B',\n 'type': 'om',\n 'subtype': 'D-Egg',\n 'name': 'Pedro',\n 'hacks': {'status': 'N/A', 'ohms': 42}\n }, {\n 'uuid': 'A675309A',\n 'type': 'om',\n 'subtype': 'mDOM',\n 'name': 'Linda',\n 'hacks': {'status': 'ERROR', 'ohms': 6.2, 'FAIL_FOO': True}\n }, {\n 'uuid': '00000042',\n 'type': 'pmt',\n 'subtype': 'pDOM',\n 'name': 'Harold',\n 'hacks': {'status': 'OK', 'ohms': 7}\n }\n ]\n\ndef test_devices():\n devices = get_all_devices()\n\n # for example, pass in cmd_session\n #session = T.get_session()\n session = None\n\n #def foo(x):\n # try:\n\n for device in devices:\n # or could create new session per device here...\n\n test = htf.Test(\n T.VapeTest.run_mod,\n T.VapeTest.puff,\n T.VapeTest.coil.with_args(resist=0.2, wattage=50),\n checkpoints.checkpoint(),\n T.DOMTest.iceboot.with_args(dom=device, FAKEresults={\n 'status': device['hacks']['status']}),\n T.DOMTest.run_foo_command.with_args(cmd_args=['dom'], cmd_kwargs={}),\n #test_name = T.VapeTest.__name__,\n test_version='1.0.2',\n test_description='I am a vape test',\n subtype=device['subtype'],\n type=device['type'],\n websession=session,\n device=device,\n device_config={\n 'somesetting': 'somevalue',\n }\n )\n test.configure(name='{}:{}:{}'.format(T.VapeTest.__name__, device['uuid'], device['name']))\n test.add_output_callbacks(JSON('./results/{metadata[type]}.{metadata[subtype]}.{dut_id}.{metadata[test_name]}-v{metadata[test_version]}.json', indent=4, default=str))\n #test.add_output_callbacks(JSON('{dut_id}.{metadata[test_name]}-v{metadata[test_version]}.json', indent=4, inline_attachments=False))\n if False:\n test.add_output_callbacks(console_summary.ConsoleSummary())\n\n test.execute(test_start=lambda: device['uuid'])\n\n'''\ndef serve():\n @htf.measures(htf.Measurement('hello_world_measurement'))\n def hello_world(test):\n test.logger.info('Hello World!')\n test.measurements.hello_world_measurement = 'Hello Again!'\n\n\n conf.load(station_server_port='4444')\n with station_server.StationServer() as server:\n web_launcher.launch('http://localhost:4444')\n for i in range(5):\n test = htf.Test(hello_world)\n test.add_output_callbacks(server.publish_final_state)\n test.execute(test_start=user_input.prompt_for_test_start())\n'''\n\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"examples/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"348448916","text":"from linkbot.utils.cmd_utils import *\n\n\n@command(\n ['{c} set <@role>', '{c} remove'],\n 'Set/remove the role to be used as the entry level role.',\n [\n ('{c} set @Noobs', 'Sets the entry level role for this server to Noobs.'),\n ('{c} remove', 'Removes automatic entry level role setting for this server.')\n ]\n)\n@restrict(SERVER_ONLY)\nasync def entryrole(cmd: Command):\n if not cmd.args:\n with db.connect() as (conn, cur):\n cur.execute(\"SELECT entry_role FROM servers WHERE server_id = %s;\", [cmd.guild.id])\n role_id = cur.fetchone()[0]\n if not role_id:\n raise CommandError(cmd, 'There is not an entry level role set for this server.')\n role = discord.utils.get(cmd.guild.roles, id=role_id)\n await cmd.channel.send(f'Entry level role: `{role}`')\n\n elif cmd.args[0] == 'set':\n mentions = cmd.message.role_mentions\n if not mentions:\n raise CommandSyntaxError(cmd, \"You must at-mention a role in the server to be the entry level role.\")\n role = cmd.message.role_mentions[0]\n with db.connect() as (conn, cur):\n cur.execute(\"UPDATE servers SET entry_role = %s\", [role.id])\n conn.commit()\n await send_success(cmd.message)\n\n elif cmd.args[0] == 'remove':\n with db.connect() as (conn, cur):\n cur.execute(\"SELECT entry_role FROM servers WHERE server_id = %s;\", [cmd.guild.id])\n role_id = cur.fetchone()[0]\n if not role_id:\n raise CommandError(cmd, \"There isn't an entry role to remove.\")\n cur.execute(\"UPDATE servers SET entry_role = NULL\")\n conn.commit()\n await send_success(cmd.message)\n else:\n raise CommandSyntaxError(cmd, \"Invalid subcommand.\")\n\n\n@on_event('ready')\nasync def entryrole_check_all():\n with db.connect() as (conn, cur):\n cur.execute(\"SELECT server_id, entry_role FROM servers WHERE entry_role IS NOT NULL;\")\n results: Tuple[int, int] = cur.fetchall()\n for (guild_id, role_id) in results:\n guild = client.get_guild(guild_id)\n role = discord.utils.get(guild.roles, id=role_id)\n for member in guild.members:\n if len(member.roles) == 1:\n await member.add_roles(role)\n\n\n@on_event('member_join')\nasync def entryrole_check_one(member):\n with db.connect() as (conn, cur):\n cur.execute(\"SELECT entry_role FROM servers WHERE server_id = %s AND entry_role IS NOT NULL;\", [member.guild.id])\n result = cur.fetchone()\n if result:\n role = discord.utils.get(member.guild.roles, id=result[0])\n await member.add_roles(role)\n","sub_path":"linkbot/commands/EntryRole.py","file_name":"EntryRole.py","file_ext":"py","file_size_in_byte":2672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"214983008","text":"\nclass Operon:\n def __init__(self, operon_id, genome_id, sequence_id, start=None, end=None, strand=None):\n self.operon_id = operon_id\n self.genome_id = genome_id\n self.sequence_id = sequence_id\n self.start = start\n self.end = end\n self.strand = strand\n self.gene_count = 0\n self.gene_ids = []\n\n def __str__(self):\n return self.operon_id + ':' + self.gene_ids[0] + '-' + self.gene_ids[-1]\n","sub_path":"py/DapSeqAgent/ReferenceData/Operon.py","file_name":"Operon.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"495167540","text":"#imports opencv. Requires Numpy\nimport cv2\n\n#Open a image file. \n#Doesn't raise exception if file not found.Returns None.\nimg = cv2.imread('data/IO.png',0)\n\nprint(type(img))\n# display image\n#Display only for a miliseconds\ncv2.imshow('image',img)\n\n#Number of ms you want to wait. Give 0 to wait until the window is closed.\n#The pressed key will be saved in 'k'\n#Some 64 bit machine requires '& 0xFF' after wait key\nk = cv2.waitKey(0)\n\n\nif k == 27: #27 is ESC\n #Destroys all windows\n cv2.destroyAllWindows()\n\nelif k == ord('s'):\n #Writes a image\n cv2.imwrite('lena_copy.png',img)\n cv2.destroyAllWindows()\n\n","sub_path":"Codes/IO.py","file_name":"IO.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"306917138","text":"class BlackJackAI():\r\n def __init__(self):\r\n self.dealer_card = -1\r\n self.name = 'AI_MATH'\r\n self.prob = [[0 for i in range(0,23)] for i in range(0, 23)]\r\n self.card_prob = [ 1 for i in range(11) ]\r\n self.card_prob[10] = 4\r\n\r\n for x in range(1, 11):\r\n self.prob[1][x] = self.card_prob[x]\r\n \r\n for n in range(2, 22):\r\n for y in range(1, 22):\r\n for z in range(1, 11):\r\n v = y + z\r\n if v > 21:\r\n v = 22\r\n self.prob[n][v] += self.prob[n-1][y] * self.card_prob[z]\r\n \r\n self.count = [0] * 31\r\n for i in range(1, 22):\r\n for j in range(1, i+1):\r\n self.count[i] += self.prob[j][i]\r\n \r\n self.prob_percent = [[0 for i in range(0,23)] for i in range(0, 23)] \r\n for n in range(1, 22):\r\n tmp = 0\r\n for x in range(1, 23):\r\n tmp += self.prob[n][x]\r\n for x in range(1, 23):\r\n self.prob_percent[n][x] = self.prob[n][x] / float(tmp)\r\n\r\n def update_dealer_card(self, a):\r\n if a == self.dealer_card:\r\n return\r\n self.dealer_card = a\r\n final_prob = [0] * 31\r\n for i in range(17, 31):\r\n final_prob[i] = self.count[i - a]\r\n if final_prob[i] == 0:\r\n continue\r\n for j in range(i-1, 16, -1):\r\n final_prob[i] -= self.count[j - a]\r\n \r\n t = 0\r\n for i in final_prob[17:]:\r\n t += i\r\n \r\n self.dealer_prob = [0] * 31\r\n for i in range(17, 22):\r\n self.dealer_prob[i] = float(final_prob[i]) / t\r\n \r\n self.bust = 0\r\n for i in range(22, 31):\r\n self.bust += final_prob[i]\r\n self.bust = self.bust / float(t)\r\n \r\n\r\n def stand_win(self, n):\r\n win_rate = self.bust\r\n for i in range(17, n):\r\n win_rate += self.dealer_prob[i]\r\n return win_rate\r\n \r\n def hit_win_step(self, n, step):\r\n win_rate = 0\r\n for i in range(n+1, 22):\r\n win_rate += self.prob_percent[step][i - n] * self.stand_win(i)\r\n return win_rate\r\n\r\n def hit_win(self, n):\r\n win_rate = 0.0\r\n for i in range(1, 22 - n):\r\n win_rate = max(win_rate,self.hit_win_step(n, i))\r\n return win_rate\r\n\r\n def action(self, state):\r\n self.update_dealer_card(state[1])\r\n n = state[0]\r\n hit_win_rate = self.hit_win(n)\r\n if state[2]:\r\n hit_win_rate = max(hit_win_rate, self.hit_win(n - 10))\r\n if self.stand_win(n) < hit_win_rate:\r\n return 1\r\n else:\r\n return 0","sub_path":"ai_math.py","file_name":"ai_math.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"403152120","text":"#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\nimport sys\nimport logging\nimport ConfigParser\nimport multiprocessing\nimport threading\nimport Queue\nimport codecs\nimport socket\nfrom os import lstat, listdir\nfrom os.path import join, basename, dirname\nfrom stat import S_ISDIR, S_ISLNK\nfrom optparse import OptionParser\nfrom time import time\n\nDONE_SENTINEL = '$$##%%@@##$$'\n\ndef _write_records(csv_file, csv_file_lock, file_info_tbl):\n with csv_file_lock:\n for record in file_info_tbl:\n csv_file.write(unicode('|'.join(record) + '\\n', 'utf-8', 'replace'))\n del file_info_tbl[:]\n\n\ndef _get_pids_for_file(path_id_tbl, path_id_tbl_lock, fullpath, direct_pid):\n pids = [direct_pid]\n with path_id_tbl_lock:\n while 1:\n fullpath = dirname(fullpath)\n pid = path_id_tbl.get(fullpath, None)\n if pid:\n pids.append(pid)\n else:\n break\n if direct_pid:\n # root id\n pids.append(0)\n return '{' + ','.join([str(p) for p in pids]) + '}'\n\n\ndef _do_walktree(dir_name, dir_id, pid, csv_file, csv_file_lock, file_id, file_id_lock,\n available_vec, available_vec_lock, path_id_tbl, path_id_tbl_lock,\n scan_info, subdir_queue, file_info_tbl):\n '''Walk each file/dir for dir_name and collect the file information'''\n\n if len(file_info_tbl) > 200000:\n _write_records(csv_file, csv_file_lock, file_info_tbl)\n\n try:\n with path_id_tbl_lock:\n path_id_tbl[dir_name] = dir_id\n\n has_subdir = 0\n gen_pids = False\n for f in listdir(dir_name):\n pathname = join(dir_name, f)\n try:\n st = lstat(pathname)\n except Exception:\n logging.warning(' '.join([str(r) for r in sys.exc_info()[1:2]]))\n continue\n\n if S_ISLNK(st.st_mode):\n pass\n elif S_ISDIR(st.st_mode):\n if f in scan_info[-1]:\n logging.info('skipping excluded dir: ' + pathname)\n continue\n\n has_subdir = 1\n with file_id_lock:\n file_id[0] += 1 # acquire an id for this subdir\n temp_file_id = file_id[0]\n\n # Before doing the recursive, first check if someother thread has nothing\n # to do. If so, feed the workitem for the threads\n # Note, if this is the last dir, this will be a wrong call, but anyway\n with available_vec_lock:\n has_free_thread = (available_vec[0] != available_vec[1])\n\n if has_free_thread:\n subdir_queue.put((pathname, temp_file_id, dir_id))\n else:\n _do_walktree(pathname, temp_file_id, dir_id, csv_file, csv_file_lock, file_id, file_id_lock,\n available_vec, available_vec_lock, path_id_tbl, path_id_tbl_lock, scan_info,\n subdir_queue, file_info_tbl)\n else:\n with file_id_lock:\n file_id[0] += 1 # acquire an id for this file\n temp_file_id = file_id[0]\n\n # generate parent dir ids for the first file under this dir\n if gen_pids:\n pids = ''\n else:\n pids = _get_pids_for_file(path_id_tbl, path_id_tbl_lock, dir_name, dir_id)\n gen_pids = True\n\n file_info_tbl.append((str(time()), str(scan_info[0]), f, str(temp_file_id), str(st.st_size),\n str(int(st.st_atime)), str(int(st.st_mtime)), str(st.st_uid),\n str(st.st_gid), '0', '0', str(dir_id), pids))\n\n # handle this dir\n st = lstat(dir_name)\n if dir_id:\n dname = basename(dir_name)\n else:\n dname = dir_name\n\n file_info_tbl.append((str(time()), str(scan_info[0]), dname, str(dir_id), '0', str(int(st.st_atime)),\n str(int(st.st_mtime)), str(st.st_uid), str(st.st_gid), '1',\n str(has_subdir), str(pid), ''))\n except Exception:\n logging.warning(' '.join([str(r) for r in sys.exc_info()[1:2]]))\n\n\ndef _walktree(**kwargs):\n csv_file_lock = kwargs['csv_file_lock']\n file_id_lock = kwargs['file_id_lock']\n available_vec_lock = kwargs['available_vec_lock']\n path_id_tbl_lock = kwargs['path_id_tbl_lock']\n csv_file = kwargs['csv_file']\n file_id = kwargs['file_id']\n available_vec = kwargs['available_vec']\n path_id_tbl = kwargs['path_id_tbl']\n scan_info = kwargs['scan_info']\n subdir_queue = kwargs['subdir_queue']\n thread_id = kwargs['thread_id']\n file_info_tbl = []\n all_done = False\n\n while 1:\n with available_vec_lock:\n # Declare I am free\n available_vec[0] &= ~(1 << thread_id)\n # Check if everybody is free, if yes, then we are all done\n if not available_vec[0]:\n all_done = True\n\n if all_done:\n # Notify the other thread\n subdir_queue.put((DONE_SENTINEL, 0, 0))\n return\n\n # Try to get worktime from the queue. Hope some other thread detects\n # I am free and feed some workitem to me\n (dir_name, dir_id, pid) = subdir_queue.get()\n if dir_name == DONE_SENTINEL:\n # Notify the other thread\n subdir_queue.put((DONE_SENTINEL, 0, 0))\n return\n\n # Got one workitem, declare I will be busy\n with available_vec_lock:\n available_vec[0] |= (1 << thread_id)\n\n # Busy with the workitem\n _do_walktree(dir_name, dir_id, pid, csv_file, csv_file_lock, file_id, file_id_lock,\n available_vec, available_vec_lock, path_id_tbl, path_id_tbl_lock,\n scan_info, subdir_queue, file_info_tbl)\n\n # Write remaining records\n _write_records(csv_file, csv_file_lock, file_info_tbl)\n\n\ndef _start_collect_for_scanroot(scan_info):\n csv_file_lock = threading.Lock()\n file_id_lock = threading.Lock()\n available_vec_lock = threading.Lock()\n path_id_tbl_lock = threading.Lock()\n path_id_tbl = {}\n subdir_queue = Queue.Queue()\n num_thread = multiprocessing.cpu_count()\n if num_thread <= 1:\n num_thread = 2\n\n all_threads_busy = 0\n for i in range(num_thread):\n all_threads_busy |= (1 << i)\n\n file_id = [0]\n available_vec = [all_threads_busy, all_threads_busy]\n csv_file_name = scan_info[1].replace('\\\\', '_').replace('/', '_').replace(':', '$') + '_file_info.csv'\n with codecs.open(csv_file_name, 'w', encoding='utf-8', errors='replace') as csv_file:\n csv_file.write('partition_id|name|id|size|atime|mtime|uid|gid|type|has_subdir|pid|pids\\n')\n subdir_queue.put((scan_info[1], 0, -1))\n work_threads = []\n for i in range(num_thread):\n th = threading.Thread(target=_walktree, \\\n kwargs={'csv_file': csv_file, \\\n 'csv_file_lock': csv_file_lock, \\\n 'file_id': file_id, \\\n 'file_id_lock': file_id_lock, \\\n 'available_vec': available_vec, \\\n 'available_vec_lock': available_vec_lock, \\\n 'path_id_tbl': path_id_tbl, \\\n 'path_id_tbl_lock': path_id_tbl_lock, \\\n 'scan_info': scan_info, \\\n 'subdir_queue': subdir_queue, \\\n 'thread_id': i})\n th.start()\n work_threads.append(th)\n\n for th in work_threads:\n th.join()\n\n\ndef _collect_for_scanroot(work_queue):\n logging.basicConfig(filename='file_info_collector.log',\n format='%(levelname)s:%(asctime)s:%(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n\n while 1:\n scan_info = work_queue.get()\n if scan_info != DONE_SENTINEL:\n print('get job ', scan_info)\n logging.info('get job(' + ' '.join([str(r) for r in scan_info[0:-1]]) +')')\n _start_collect_for_scanroot(scan_info)\n else:\n work_queue.put(DONE_SENTINEL)\n return\n\ndef _get_partition_info_linux(scan_info_tbl, partition_start_id):\n import subprocess\n import re\n\n inaccessible_scan_roots = []\n for scan_info in scan_info_tbl:\n try:\n st = lstat(scan_info[1])\n except:\n logging.warning('scan root ' + scan_info[1] + ' is not accessible, will ignore it!')\n inaccessible_scan_roots.append(scan_info[1])\n\n mount_list = subprocess.Popen(['mount'], stdout=subprocess.PIPE).communicate()[0].split('\\n')\n mount_tbl = {}\n for m in mount_list:\n # nfs\n p = re.search('(.+):(.+)\\s+on\\s+(.+)\\s+type\\s+(\\w+)', m)\n if p:\n mount_tbl[p.group(3)] = [p.group(1), p.group(2), p.group(4)]\n continue\n\n # cifs\n p = re.search('//(.+?)(/.+)\\s+on\\s+(.+)\\s+type\\s+(\\w+)', m)\n if p:\n mount_tbl[p.group(3)] = [p.group(1), p.group(2), p.group(4)]\n\n export_info, partition_info_rec = None, []\n with open(scan_info[0]+'_partition_info.csv', 'w') as f:\n f.write('partition_id|site|exportserver|exportroot|exporttype|scanserver|scanroot\\n')\n for scan_info in scan_info_tbl:\n if scan_info[1] in mount_tbl:\n export_info = mount_tbl[scan_info[1]]\n elif scan_info[1] not in inaccessible_scan_roots:\n # local drive\n export_info = [scan_info[0], scan_info[1], 'localfs']\n\n if export_info:\n partition_info_rec.append(partition_start_id)\n partition_info_rec.append(scan_info[-1])\n partition_info_rec.extend(export_info)\n partition_info_rec.extend(scan_info[0:-1])\n scan_info[0] = partition_start_id\n partition_start_id += 1\n f.write('|'.join([str(r) for r in partition_info_rec]) + '\\n')\n del partition_info_rec[:]\n export_info = None\n\n\ndef _get_partition_info_win(scan_info_tbl, partition_start_id):\n import re\n\n mount_tbl = {}\n for scan_info in scan_info_tbl:\n try:\n st = lstat(scan_info[1])\n p = re.search(r'\\\\\\\\(.+?)(\\\\.+)', scan_info[1])\n if p:\n mount_tbl[scan_info[1]] = [p.group(1), p.group(2), 'cifs']\n else:\n mount_tbl[scan_info[1]] = [scan_info[1], scan_info[1], 'localfs']\n except:\n logging.warning('scan root ' + scan_info[1] + ' is not accessible, will ignore it!')\n\n export_info, partition_info_rec = None, []\n with open(scan_info[0]+'_partition_info.csv', 'w') as f:\n f.write('partition_id|site|exportserver|exportroot|exporttype|scanserver|scanroot\\n')\n for scan_info in scan_info_tbl:\n if scan_info[1] in mount_tbl:\n export_info = mount_tbl[scan_info[1]]\n partition_info_rec.append(partition_start_id)\n partition_info_rec.append(scan_info[-1])\n partition_info_rec.extend(export_info)\n partition_info_rec.extend(scan_info[0:-1])\n scan_info[0] = partition_start_id\n partition_start_id += 1\n f.write('|'.join([str(r) for r in partition_info_rec]) + '\\n')\n del partition_info_rec[:]\n export_info = None\n\n\ndef _get_partition_info(scan_info_tbl, partition_start_id):\n import platform\n\n os_type = platform.system()\n if os_type == 'Linux' or os_type == 'Darwin':\n _get_partition_info_linux(scan_info_tbl, partition_start_id)\n elif os_type == 'Windows':\n _get_partition_info_win(scan_info_tbl, partition_start_id)\n else:\n assert(0)\n\ndef collect_file_info(cfg_file):\n cfg_parser = ConfigParser.ConfigParser()\n cfg_parser.read(cfg_file)\n\n hostname = socket.gethostname()\n scan_info_tbl = []\n sites = cfg_parser.sections()\n for site in sites:\n if site != 'COLLECTOR_OPTIONS':\n for id, scan_root in cfg_parser.items(site):\n scan_info_tbl.append([hostname, scan_root, site])\n\n partition_start_id = cfg_parser.getint('COLLECTOR_OPTIONS', 'partition_start_id')\n _get_partition_info(scan_info_tbl, partition_start_id)\n\n try:\n exclude_dirs = cfg_parser.get('COLLECTOR_OPTIONS', 'exclude_dirs').split(',')\n except Exception:\n exclude_dirs = []\n\n num_process = multiprocessing.cpu_count()\n if len(scan_info_tbl) < num_process:\n num_process = len(scan_info_tbl)\n\n logging.basicConfig(filename='file_info_collector.log',\n format='%(levelname)s:%(asctime)s:%(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n level=logging.INFO)\n logging.info('start collecting file info')\n\n work_queue = multiprocessing.Queue()\n for scan_info in scan_info_tbl:\n scan_info[-1] = exclude_dirs\n work_queue.put(scan_info)\n work_queue.put(DONE_SENTINEL)\n\n work_pool = multiprocessing.Pool(num_process, _collect_for_scanroot, (work_queue,))\n work_pool.close()\n work_pool.join()\n\n logging.info('end collecting file info')\n\ndef usage():\n print('usage: file_info_collector_m.py -f scan_cfg')\n exit(1)\n\n\nif __name__ == '__main__':\n parser = OptionParser()\n parser.add_option('-f', dest='cfg_file', help='cfg file containing scan fs mount list')\n (options, args) = parser.parse_args()\n if not options or not options.cfg_file:\n usage()\n\n collect_file_info(options.cfg_file)\n","sub_path":"Python/LPLoader/FLR/file_info_collector_m.py","file_name":"file_info_collector_m.py","file_ext":"py","file_size_in_byte":13937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"301132734","text":"f=open(\"primes1000000.txt\")\nn=input(\"Input an integer: \")\nflag=False\nif int(n)<=1000000:\n for line in f:\n if int(line)==int(n):\n print(\"It is a prime number.\")\n flag=True\n break\n if not flag:\n print(\"It is not a prime number.\")\nelse:\n for line in f:\n if int(n)%int(line)==0:\n flag=True\n break\n if flag:\n print(\"It is not a prime number.\")\n else:\n print(\"It is a prime number.\") \n","sub_path":"3_4.py","file_name":"3_4.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"304451928","text":"import re\nfrom datetime import datetime\n\nimport pytest\n\nfrom wetterdienst.dwd.radar import DWDRadarData\nfrom wetterdienst.util.datetime import round_minutes\nfrom wetterdienst.dwd.radar.metadata import DWDRadarParameter, DWDRadarDate\nfrom wetterdienst.dwd.radar.sites import DWDRadarSite\n\n\n@pytest.mark.remote\ndef test_radar_request_composite_latest_rx_reflectivity():\n \"\"\"\n Example for testing radar COMPOSITES latest.\n \"\"\"\n\n request = DWDRadarData(\n parameter=DWDRadarParameter.RX_REFLECTIVITY,\n start_date=DWDRadarDate.LATEST,\n )\n\n buffer = next(request.collect_data())[1]\n payload = buffer.getvalue()\n\n month_year = datetime.utcnow().strftime(\"%m%y\")\n header = (\n f\"RX......10000{month_year}BY 8101..VS 3SW 2.28.1PR E\\\\+00INT 5GP 900x 900MS \" # noqa:E501,B950\n f\"..<(asb,)?boo,ros,hnr,umd,pro,ess,fld,drs,neu,(nhb,)?oft,eis,tur,(isn,)?fbg(,mem)?>\" # noqa:E501,B950\n )\n\n assert re.match(bytes(header, encoding=\"ascii\"), payload[:160])\n\n\n@pytest.mark.remote\ndef test_radar_request_composite_latest_rw_reflectivity():\n \"\"\"\n Example for testing radar COMPOSITES (RADOLAN) latest.\n \"\"\"\n\n request = DWDRadarData(\n parameter=DWDRadarParameter.RW_REFLECTIVITY,\n start_date=DWDRadarDate.LATEST,\n )\n\n buffer = next(request.collect_data())[1]\n payload = buffer.getvalue()\n\n month_year = datetime.utcnow().strftime(\"%m%y\")\n header = (\n f\"RW......10000{month_year}\"\n f\"BY16201..VS 3SW 2.28.1PR E-01INT 60GP 900x 900MF 00000001MS \"\n f\"..\"\n )\n\n assert re.match(bytes(header, encoding=\"ascii\"), payload[:160])\n\n\n@pytest.mark.remote\ndef test_radar_request_site_latest_dx_reflectivity():\n \"\"\"\n Example for testing radar SITES latest.\n \"\"\"\n\n request = DWDRadarData(\n parameter=DWDRadarParameter.DX_REFLECTIVITY,\n start_date=DWDRadarDate.LATEST,\n site=DWDRadarSite.BOO,\n )\n\n buffer = next(request.collect_data())[1]\n payload = buffer.getvalue()\n\n timestamp_aligned = round_minutes(datetime.utcnow(), 5)\n day = timestamp_aligned.strftime(\"%d\")\n month_year = timestamp_aligned.strftime(\"%m%y\")\n header = f\"DX{day}....10132{month_year}BY.....VS 2CO0CD4CS0EP0.80.80.80.80.80.80.80.8MS\" # noqa:E501,B950\n assert re.match(bytes(header, encoding=\"ascii\"), payload[:160])\n","sub_path":"tests/dwd/radar/test_api_latest.py","file_name":"test_api_latest.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"8285315","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Author : Mike\n# @Contact : 597290963@qq.com\n# @Time : 2021/2/13 9:05\n# @File : Rob.py\nfrom typing import List\n\n\"\"\"\n你是一个专业的小偷,计划偷窃沿街的房屋。每间房内都藏有一定的现金,影响你偷窃的唯一制约因素就是相邻的房屋装有相互连通的防盗系统,如果两间相邻的房屋在同一晚上被小偷闯入,系统会自动报警。\n\n给定一个代表每个房屋存放金额的非负整数数组,计算你 不触动警报装置的情况下 ,一夜之内能够偷窃到的最高金额。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/house-robber\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\n\n\nclass Solution:\n\n def rob(self, nums: List[int]) -> int:\n \"\"\"\n 0表示不偷,1表示偷\n 状态转移方程为dp[i][1] = dp[i - 1][0] + nums[i]\n dp[i][0] = max(dp[i - 1][0], dp[i][1])\n :param nums:\n :return:\n \"\"\"\n if not nums:\n return 0\n\n dp = [[0 for i in range(2)] for _ in range(len(nums))]\n dp[0][0] = 0\n dp[0][1] = nums[0]\n\n for i in range(1, len(nums)):\n dp[i][0] = max(dp[i - 1][0], dp[i - 1][1])\n dp[i][1] = dp[i - 1][0] + nums[i]\n\n return max(dp[-1])\n\n def rob1(self, nums: List[int]) -> int:\n \"\"\"\n 空间优化 o(n)\n 状态转移方程为dp[i] = max(dp[i - 2] + nums[i], dp[i - 1]),表示偷窃当前房间和不偷窃当前房间\n :param nums:\n :return:\n \"\"\"\n if not nums:\n return 0\n\n if len(nums) == 1:\n return nums[0]\n\n dp = [0] * len(nums)\n dp[0] = nums[0]\n dp[1] = max(nums[0], nums[1])\n for i in range(2, len(nums)):\n dp[i] = max(dp[i - 2] + nums[i], dp[i - 1])\n\n return dp[-1]\n\n def rob2(self, nums: List[int]) -> int:\n \"\"\"\n 空间优化 o(1)\n \"\"\"\n if not nums:\n return 0\n\n if len(nums) == 1:\n return nums[0]\n\n first = nums[0]\n second = max(nums[0], nums[1])\n for i in range(2, len(nums)):\n first, second = second, max(first + nums[i], second)\n\n return max(first, second)\n\n\nif __name__ == '__main__':\n print(Solution().rob2([1, 2, 3, 1]))\n","sub_path":"datastructure/dp_exercise/Rob.py","file_name":"Rob.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"635056596","text":"\"\"\"\r\nInfo for parsing the music of SD Snatcher\r\n\"\"\"\r\nfrom konami_scc.common import Commands, COMMANDS_COMMON\r\nfrom konami_scc.games import ssnake\r\nfrom konami_scc.games.ssnake import COMMANDS, apply_command, state_to_controllers\r\n\r\nNAME = 'sdsnatcher'\r\n\r\nVST_MODE = 3\r\n\r\nCOMMANDS = COMMANDS_COMMON + [\r\n (Commands.SINGLE_PARAM, [0xdb, 0xe2, 0xe3, 0xe4, 0xe7, 0xea,\r\n 0xed, 0xee, 0xf2, 0xf3, 0xf8, 0xfe]),\r\n (Commands.DOUBLE_PARAM, [0xd6, 0xda, 0xd7, 0xe6]),\r\n (Commands.TRIPLE_PARAM, [0xd9, 0xde, 0xe5, 0xeb]),\r\n (Commands.QUAD_PARAM, [0xf1, 0xf4]),\r\n (Commands.COMMAND_F0, [0xf0]),\r\n]\r\n\r\napply_command = ssnake.apply_command\r\n\r\nstate_to_controllers = ssnake.state_to_controllers\r\n\r\n\r\n# multiple memory dumps, each with different memory mapper pages mapped into RAM\r\nMEMORY_DUMPS = ['sdsnatcher-a.bin', 'sdsnatcher-b.bin', 'sdsnatcher-c.bin', 'sdsnatcher-d.bin', 'sdsnatcher-e.bin', 'sdsnatcher-f.bin']\r\n\r\n\r\nTRACKS_START_ADDRESS = 0x674f\r\n\r\n\r\nTRACKS = [\r\n (79, \"STRANGE OVERTURE\"),\r\n (80, \"ANOTHER BAD ACCIDENT\"),\r\n (81, \"DIFFICULT MOVE\"),\r\n (128, \"MODERN CRUSADE\"),\r\n (82, \"KASUGA\"),\r\n (85, \"THE FIRST LEAD\"),\r\n (75, \"STRESS\"),\r\n (33, \"DETECTION\"),\r\n (89, \"EXCLUSION\"),\r\n (76, \"RESISTANCE\"),\r\n (90, \"BIO SYNDROME\"),\r\n (77, \"A POTENT POWER\"),\r\n (78, \"MISFORTUNE\"),\r\n (93, \"THEME OF KATHERINE #1\"),\r\n (94, \"THEME OF KATHERINE #2\"),\r\n (121, \"FACTORY PLACE\"),\r\n (95, \"RECONCILIATION\"),\r\n (126, \"DESTRUCTION\"),\r\n (140, \"AMUSEMENT PARK\"),\r\n (122, \"CREDIT\"),\r\n (123, \"PERCUSSION HEAD\"),\r\n (124, \"HIGH TIDE\"),\r\n (125, \"LOW TIDE\"),\r\n (96, \"THE GATES OF HELL\"),\r\n (87, \"FOR M.M.\"),\r\n (117, \"EPICUREAN\"),\r\n (91, \"HEARTBEAT\"),\r\n (97, \"WARNING\"),\r\n (88, \"LET'S GO DOWNTOWN\"),\r\n (102, \"PRELUDE NO.1\"),\r\n (103, \"PRELUDE NO.2\"),\r\n (83, \"JUNKER HQ\"),\r\n (127, \"AN AWFUL FELLOW\"),\r\n (101, \"THE PEACEFUL AVENUE\"),\r\n (104, \"FUNNY\"),\r\n (99, \"BODY SNATCHERS\"),\r\n (120, \"AMAZED\"),\r\n (116, \"DECADENCE BEAT\"),\r\n (92, \"HIGH PRESSURE\"),\r\n (86, \"MYSTERIOUS CLUE\"),\r\n (108, \"HARRY'S RETROSPECTION\"),\r\n (106, \"THE MERRY-GO-ROUND\"),\r\n (107, \"THE DAY DREAM\"),\r\n (118, \"JIM & HILDA\"),\r\n (110, \"VANITY\"),\r\n (111, \"NORTH DOWNTOWN\"),\r\n (112, \"RECKLESS\"),\r\n (132, \"WILD TIMES\"),\r\n (136, \"CASE CLOSED\"),\r\n (130, \"IN DANGER\"),\r\n (105, \"HEART BREAK\"),\r\n (133, \"DEAR JAIME\"),\r\n (131, \"MASTER SNATCHER\"),\r\n (134, \"PROPAGANDA\"),\r\n (135, \"TRIUMPHAL ARCH\"),\r\n (114, \"SUNRISE,2\"),\r\n (115, \"CODA\"),\r\n (84, \"UNUSED\"),\r\n (113, \"UNUSED)\"),\r\n]\r\n","sub_path":"konami_scc/games/sdsnatcher.py","file_name":"sdsnatcher.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"611213587","text":"import numpy as np\nfrom scipy.stats import ttest_ind, ttest_ind_from_stats\nimport matplotlib.pyplot as plt\nfrom AdditionalFunc import*\n\n\n# Data processing\ndef OTSU_enhance(img_gray, th_begin=0, th_end=256, th_step=1):\n assert img_gray.ndim <= 2, \"must input a gary_img instead of {}d.\".format(img_gray.ndim)\n \n if max(img_gray)!=0:\n img_gray = 255*img_gray/max(img_gray)\n \n max_g = 0\n suitable_th = 0\n for threshold in range(th_begin, th_end, th_step):\n bin_img = img_gray > threshold\n bin_img_inv = img_gray <= threshold\n fore_pix = np.sum(bin_img)\n back_pix = np.sum(bin_img_inv)\n if 0 == fore_pix:\n break\n if 0 == back_pix:\n continue\n \n w0 = float(fore_pix) / img_gray.size\n u0 = float(np.sum(img_gray * bin_img)) / fore_pix\n w1 = float(back_pix) / img_gray.size\n u1 = float(np.sum(img_gray * bin_img_inv)) / back_pix\n # intra-class variance\n g = w0 * w1 * (u0 - u1) * (u0 - u1)\n if g > max_g:\n max_g = g\n suitable_th = threshold\n a = img_gray[np.nonzero(img_gray>suitable_th)]\n b = img_gray[np.nonzero(img_gray<=suitable_th)]\n t, p = ttest_ind(a, b, equal_var=False)\n if p<0.05:\n diff = True\n else:\n diff = False\n diff = True\n return (suitable_th, diff, img_gray)\n\ndef PolarClassifier(profile, ratio=1/5, Mode=\"Polarity\"):\n \n celllen = len(profile)\n threshold, diff, mod_profile = OTSU_enhance(profile)\n profile_2v = mod_profile>threshold # can be modified to real values.\n LPole = profile_2v[:int(celllen*ratio)]\n MCell = profile_2v[int(celllen*ratio):int(celllen*(1-ratio))]\n RPole = profile_2v[int(celllen*(1-ratio)):]\n \n L_verify = np.any(LPole)\n R_verify = np.any(RPole)\n M_verify = np.any(MCell)\n tmp = mod_profile.copy()\n mod_profile[np.nonzero(mod_profile<=threshold)]=0\n mod_profile[np.nonzero(mod_profile>threshold)]=1\n \n# fig = plt.figure(figsize=(8,3))\n# ax = fig.add_subplot(111)\n# ax.plot(np.arange(len(tmp)), tmp, color='g', linewidth=2)\n #ax.plot(np.arange(len(tmp)), np.ones(len(tmp))*threshold, color='grey', linewidth=2, linestyle='--')\n# ax.plot(np.arange(len(tmp)), np.ones(len(tmp))*np.mean(tmp), color='grey', linewidth=2, linestyle='--')\n# ax.plot(np.arange(len(tmp)), np.ones(len(tmp))*np.max(tmp), color='grey', linewidth=2, linestyle='--')\n# plt.grid('off')\n# plt.tight_layout()\n# ax.set_ylim([0, 255])\n# ax.spines['right'].set_visible(False)\n# ax.spines['top'].set_visible(False)\n# ax.yaxis.set_ticks_position('left')\n# ax.xaxis.set_ticks_position('bottom')\n# ax.axhline(linewidth=2, y=0, color='k')\n# ax.axvline(linewidth=2, x=0, color='k')\n# ax.patch.set_facecolor('white')\n# plt.show()\n \n #PeakRange = (np.nonzero(mod_profile>0)[0][0], np.nonzero(mod_profile>0)[0][-1])\n \n if diff == False:\n PeakRange = np.arange(len(tmp))\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"diffuse\", PeakRange, PolePos)\n elif L_verify==True and R_verify==True:\n if M_verify == True:\n PeakRange = np.arange(len(tmp))\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"diffuse\", PeakRange, PolePos)\n else:\n PeakRange = np.nonzero(mod_profile>0)[0]\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"multipolar\", PeakRange, PolePos)\n elif L_verify==False and R_verify==False:\n if M_verify == True:\n PeakRange = np.nonzero(mod_profile>0)[0]\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"midcell\", PeakRange, PolePos)\n else:\n PeakRange = np.arange(len(tmp))\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"diffuse\", PeakRange, PolePos)\n else:\n if M_verify == True:\n PeakRange = np.nonzero(mod_profile>0)[0]\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"occlusion\", PeakRange, PolePos)\n else:\n PeakRange = np.nonzero(mod_profile>0)[0]\n ind = PeakRange[tmp[PeakRange].argmax()]\n PolePos = 1 if ind/len(tmp)>0.5 else 0\n return (\"unipolar\", PeakRange, PolePos)\n\n\ndef PolarityScore(data, ratio=1/3, PopZ=False):\n # \"PopZ=0\" means there is a PopZ focus at left side of a cell.\n # \"PopZ=1\" means there is a PopZ focus at right side of a cell.\n # \"PopZ=3\" means there is no PopZ focus in a cell.\n new_data = []\n length = []\n for i, d in enumerate(data):\n length.append(len(d))\n q1 = int(ratio*(len(d)))\n q3 = int((1-ratio)*(len(d)))\n PS = 0\n \n left = d[0:q1].max()\n right = d[q3:].max()\n \n if np.mean(d) == 0:\n PS = 0\n elif PopZ!=False:\n if PopZ[i]==0:\n PS = left/np.mean(d)\n else:\n PS = right/np.mean(d)\n else:\n if right > left:\n PS = right/np.mean(d)\n else:\n PS = left/np.mean(d)\n \n new_data.append(PS)\n \n return (np.array(length), np.array(new_data), np.mean(new_data), np.std(new_data))\n\n \ndef flip_profile(data, ratio=1/3, PopZ=False):\n new_data = []\n for i, d in enumerate(data):\n q1 = int(ratio*(len(d)))\n q3 = int((1-ratio)*(len(d)))\n try:\n left = d[0:q1].max()\n right = d[q3:].max()\n except:\n print('error here!!!', d)\n \n if type(PopZ)!=type(False):\n if PopZ[i]==0:\n new_data.append(d)\n else:\n reversed_arr = d[::-1]\n new_data.append(reversed_arr)\n else:\n if right > left:\n reversed_arr = d[::-1]\n new_data.append(reversed_arr)\n else:\n new_data.append(d)\n \n return np.array(new_data)\n\n\ndef arr_length_norm(arrs, method):\n min_len = 2000\n for arr in arrs:\n tmp_len = len(arr)\n if tmp_len < min_len:\n min_len = tmp_len\n \n if method == 'mean':\n transform_arrs = []\n for arr in arrs:\n tmp_len = len(arr)\n if tmp_len > min_len:\n tmp_arr = []\n size = tmp_len-min_len+1\n for i in range(min_len):\n tmp_arr.append(np.mean(arr[i:i+size]))\n transform_arrs.append(np.array(tmp_arr))\n else:\n transform_arrs.append(arr.flatten())\n\n return np.array(transform_arrs)\n\n\ndef profile_statistics(data, Norm=True, NormV=None):\n EditProfile = []\n \n if Norm == True:\n for line in data:\n if np.max(line) == 0:\n line = np.array(line)/1\n else:\n line = np.array(line)/np.max(line)\n EditProfile.append(line)\n else:\n if NormV!=None:\n for line in data:\n line = np.array(line)/NormV\n EditProfile.append(line)\n else:\n EditProfile = data\n \n avg = np.mean(EditProfile, axis=0)\n std = np.std(EditProfile, axis=0)\n \n return (avg, std, EditProfile)\n\n\ndef TotalIntensity(data, mode=\"mean\"):\n if mode == \"mean\":\n func = np.mean\n else:\n func = np.sum\n TI = []\n for line in data:\n TI.append(func(line))\n tot_avg = np.mean(TI)\n tot_std = np.std(TI)\n \n return (tot_avg, tot_std, TI)\n\n \ndef RegionCompare(profiles, ratio=1/3, PopZ=False): # need to be fixed (consider PopZ side)\n Pole, PoleMax = [], []\n Oppo, OppoMax = [], []\n Mid, MidMax = [], []\n length = []\n for i, profile in enumerate(profiles):\n # Record the length of the cell.\n length.append(len(profile))\n # Compute the index of the three regions.\n head = int(len(profile)*ratio)\n tail = int(len(profile)*(1-ratio))\n # Sum up the total intensity of the three regions.\n fist = np.sum(profile[0:head])\n second = np.sum(profile[head:tail])\n third = np.sum(profile[tail:])\n # Normalize the intensity by the total intensity.\n total = np.sum(fist)+np.sum(second)+np.sum(third)\n ft_norm = fist/total\n sd_norm = second/total\n td_norm = third/total\n # The maximum values in the three regions.\n max1 = np.max(profile[0:head])/np.mean(total)\n max2 = np.max(profile[head:tail])/np.mean(total)\n max3 = np.max(profile[tail:])/np.mean(total)\n # Flip the intensity which locates at the PopZ to the left side of the cell.\n if type(PopZ)!=type(False):\n if PopZ[i]==0:\n Pole.append(ft_norm)\n Mid.append(sd_norm)\n Oppo.append(td_norm)\n PoleMax.append(max1)\n MidMax.append(max2)\n OppoMax.append(max3)\n else:\n Pole.append(td_norm)\n Mid.append(sd_norm)\n Oppo.append(ft_norm)\n PoleMax.append(max3)\n MidMax.append(max2)\n OppoMax.append(max1)\n else:\n if ft_norm > td_norm:\n PoleMax.append(max1)\n MidMax.append(max2)\n OppoMax.append(max3)\n Pole.append(ft_norm)\n Mid.append(sd_norm)\n Oppo.append(td_norm)\n else:\n Pole.append(td_norm)\n Mid.append(sd_norm)\n Oppo.append(ft_norm)\n PoleMax.append(max3)\n MidMax.append(max2)\n OppoMax.append(max1)\n # Statistics\n Pole_avg, Mid_avg, Oppo_avg = np.mean(Pole), np.mean(Mid), np.mean(Oppo)\n Pole_std, Mid_std, Oppo_std = np.std(Pole), np.std(Mid), np.std(Oppo)\n PoleM_avg, MidM_avg, OppoM_avg = np.mean(PoleMax), np.mean(MidMax), np.mean(OppoMax)\n PoleM_std, MidM_std, OppoM_std = np.std(PoleMax), np.std(MidMax), np.std(OppoMax)\n # Normalize the composition ratio.\n scaler = 1/(Pole_avg+Mid_avg+Oppo_avg)\n avg = (scaler*Pole_avg, scaler*Mid_avg, scaler*Oppo_avg)\n std = (scaler*Pole_std, scaler*Mid_std, scaler*Oppo_std)\n avgM = (PoleM_avg, MidM_avg, OppoM_avg)\n stdM = (PoleM_std, MidM_std, OppoM_std)\n return (avg, std, avgM, stdM)\n \n\ndef MidIntensity(profile, ratio):\n midregion_head = int(len(profile)*ratio)\n midregion_tail = int(len(profile)*(1-ratio))\n midtotal = np.sum(profile[midregion_head:midregion_tail])\n total = np.sum(profile)\n return midtotal/total\n\n\ndef LenNorm(profile, Len=150):\n profile = profile.flatten()\n \n def linearEq(x1, y1, x2, y2, value):\n a = (y2-y1)/(x2-x1)\n b = y1-a*x1\n return a*value + b\n \n def area(x1, y1, x2, y2):\n return (y1+y2)*abs(x2-x1)/2\n \n \n y_collect = []\n Grid = Len-1\n arr = np.zeros(Grid)\n profile_ind = np.arange(len(profile))\n if len(profile) != Len:\n stride = (len(profile)-1)/(Len-1)\n for i in range(Grid):\n if i == Grid-1:\n x_last = i*stride\n y_last = y_this\n ceil = np.nonzero(x_last<=profile_ind)[0][0]\n x_this = profile_ind[-1]\n y_this = profile[x_this]\n midpoints = x_this - ceil\n y_collect.append(y_this)\n \n if midpoints == 0:\n arr[i] = area(x_last, y_last, x_this, y_this)\n else:\n cover = 0\n for j in range(midpoints+1):\n if j==0:\n cover+=area(x_last, y_last, ceil+j, profile[ceil+j])\n elif j==midpoints:\n cover+=area(ceil+j-1, profile[ceil+j-1], x_this, y_this)\n else:\n cover+=area(ceil+j-1, profile[ceil+j-1], ceil+j, profile[ceil+j])\n arr[i] = cover\n else:\n x_this = (i+1)*stride\n x_last = i*stride\n ceil = np.nonzero(x_this<=profile_ind)[0][0]\n floor = np.nonzero(x_last<=profile_ind)[0][0]\n midpoints = ceil-floor\n x2, x1 = ceil, ceil-1\n y2, y1 = profile[x2], profile[x1]\n y_this = linearEq(x1, y1, x2, y2, x_this)\n\n\n if i==0:\n x0, y0 = 0, profile[0]\n y_collect.append(y0)\n if midpoints == 0:\n arr[0] = area(x0, y0, x_this, y_this)\n else:\n cover = 0\n for j in range(midpoints+1):\n if j==0:\n cover+=area(x0, y0, floor+j, profile[floor+j])\n elif j==midpoints:\n cover+=area(floor+j-1, profile[floor+j-1], x_this, y_this)\n else:\n cover+=area(floor+j-1, profile[floor+j-1], floor+j, profile[floor+j])\n arr[0] = cover\n else:\n if midpoints == 0:\n arr[i] = area(x_last, y_last, x_this, y_this)\n else:\n cover = 0\n for j in range(midpoints+1):\n if j==0:\n cover+=area(x_last, y_last, floor+j, profile[floor+j])\n elif j==midpoints:\n cover+=area(floor+j-1, profile[floor+j-1], x_this, y_this)\n else:\n cover+=area(floor+j-1, profile[floor+j-1], floor+j, profile[floor+j])\n arr[i] = cover\n\n y_last = y_this\n y_collect.append(y_this)\n\n else:\n y_collect.append(profile[0])\n for i in range(Len-1):\n x2, x1 = profile_ind[i+1], profile_ind[i]\n y2, y1 = profile[x2], profile[x1]\n y_collect.append(y2)\n cover = area(x1, y1, x2, y2)\n arr[i] = cover\n\n return arr, y_collect\n\ndef Batch_LenNorm(profiles, option='points', Length=150):\n res_collect = []\n for profile in profiles:\n arr, y_collect = LenNorm(profile, Len=150)\n if option==\"area\":\n res_collect.append(arr)\n else:\n res_collect.append(y_collect)\n \n return res_collect\n\n\ndef MassCenterScore(data, PopZ=False):\n new_data = []\n length = []\n for i, line in enumerate(data):\n length.append(len(line))\n if MassCenter1D(line)>len(line)-1:\n print('bugbug')\n a = 1-(MassCenter1D(line)/(len(line)-1))\n b = (MassCenter1D(line)/(len(line)-1))\n if type(PopZ)!=type(False):\n if PopZ[i]==0:\n new_data.append(b)\n else:\n new_data.append(a)\n else:\n if a>b:\n new_data.append(b)\n else:\n new_data.append(a)\n \n return (np.array(length), np.array(new_data), np.mean(new_data), np.std(new_data))\n\n\ndef PolarLocalRatio(data, ratio=1/3, PopZ=False, PeakRange=[]):\n new_data = []\n length = []\n for i, d in enumerate(data):\n length.append(len(d))\n q1 = int(ratio*(len(d)))\n q3 = int((1-ratio)*(len(d)))\n tot = np.sum(d)\n PUL = 0\n \n left = np.sum(d[0:q1])\n right = np.sum(d[q3:])\n if tot == 0:\n PUL = 0\n else:\n if type(PopZ)!=type(False):\n if PeakRange:\n PUL = np.sum(d[PeakRange[i]])/tot\n else:\n if PopZ[i]==0:\n PUL = left/tot\n else:\n PUL = right/tot\n else:\n if right > left:\n PUL = right/tot\n else:\n PUL = left/tot\n \n new_data.append(PUL)\n \n return (np.array(length), np.array(new_data), np.mean(new_data), np.std(new_data))\n\ndef PatternNormalize(profiles1, profiles2):\n # profiles1 is the reference of the average spatial pattern used to normalize.\n # profiles1 and 2 have to have the same length (dimension).\n profile_collect = []\n for profile in profiles2:\n profile_collect.append(profile/profiles1)\n return profile_collect\n\n\ndef Polar2ndChannel(data2, PeakRange, mode='colocalization'):\n \"\"\"\n !!!The profiles in the data should have normalized length and correct direction.\n \"\"\"\n new_data = []\n for i, d in enumerate(zip(data2, PeakRange)):\n ind_arr = np.array(d[1])\n sig_arr = np.array(d[0])\n polar_tot = np.sum(sig_arr[ind_arr])\n diff_pixel = (np.sum(sig_arr)-polar_tot)/(len(sig_arr)-len(ind_arr))\n B = len(sig_arr)*diff_pixel\n A = polar_tot-(diff_pixel*len(ind_arr))\n if A+B == 0:\n Bowman_colocal = 0\n else:\n Bowman_colocal = A/(A+B)\n if mode == 'colocalization':\n new_data.append(Bowman_colocal)\n else:\n new_data.append(polar_tot)\n \n return (np.array(new_data), np.mean(new_data), np.std(new_data))\n\n\ndef LR_diff(data, PopZ=False, ratio=1/3, pseudo=0):\n new_data = []\n length = []\n for i, line in enumerate(data):\n q1 = int(ratio*(len(line)))\n q3 = int((1-ratio)*(len(line)))\n tmpL = len(line[:q1])\n L = np.sum(line[:q1])+pseudo\n R = np.sum(line[-tmpL:])+pseudo\n if len(line[:q1])!=len(line[-tmpL:]):\n print('len error')\n exit()\n if L==0 or R==0:\n #new_data.append(1)\n print('skip')\n else:\n if PopZ!=False:\n #if type(PopZ)!=type(False):\n if PopZ[i]==0:\n new_data.append(L/R)\n else:\n new_data.append(R/L)\n else:\n if (L/R)>(R/L):\n new_data.append(L/R)\n else:\n new_data.append(R/L) \n return (np.array(length), np.array(new_data), np.mean(new_data), np.std(new_data))\n\n\ndef LR_max(data, PopZ=False, ratio=1/3, pseudo=10**(-5)):\n new_data = []\n length = []\n for i, line in enumerate(data):\n q1 = int(ratio*(len(line)))\n q3 = int((1-ratio)*(len(line)))\n L = np.max(line[:q1])+pseudo\n R = np.max(line[q3:])+pseudo\n\n a = L/R\n b = R/L\n if PopZ!=False:\n #if type(PopZ)!=type(False):\n if PopZ[i]==0:\n new_data.append(a)\n else:\n new_data.append(b)\n else:\n if a>b:\n new_data.append(a)\n else:\n new_data.append(b)\n \n return (np.array(length), np.array(new_data), np.mean(new_data), np.std(new_data))\n\ndef Pearson_Correlation(x_arrs, y_arrs, ratio=1/3, pseudo_v=10**(-5)):\n # pearson coeff = cov(x, y) / ( sig(x) * sig(y) )\n res = []\n for x_arr, y_arr in zip(x_arrs, y_arrs):\n r = np.corrcoef(x_arr.flatten(), y_arr.flatten())\n if np.isnan(r[0][1]):\n pass\n# print('zero std bug', r, np.std(x_arr), np.std(y_arr))\n else:\n res.append(r[0][1])\n \n return res\n\n\ndef PeakFeature(profile, PeakRange):\n height = np.max(profile)\n width = len(PeakRange)\n if height<0:\n print('width bug')\n return height/width\n\n\ndef kymo_preprocess(profileCh1, profileCh2):\n \"\"\"\n ::profileCh1::\n The input is an one-layer array.\n It contains [[mother cell array], [daughter cell array]].\n Besides, the second input ::profileCh2:: represents the second channel signals.\n \"\"\"\n # Always put the parental cell (with higher PopZ peak) at left side.\n# if np.any(profileCh1[1]): # If and only if the second cell does not contain nothing.\n# if max(profileCh1[0])= 0.5:\n profileCh1[0] = profileCh1[0][::-1]\n profileCh2[0] = profileCh2[0][::-1]\n\n # Merge the parental and daughter cell profiles.\n if np.any(profileCh1[1]): # If and only if the second cell does not contain nothing.\n kymo_tmp1 = np.concatenate((profileCh1[0].flatten(), profileCh1[1].flatten()))\n kymo_tmp2 = np.concatenate((profileCh2[0].flatten(), profileCh2[1].flatten()))\n if np.max(profileCh2[1].flatten()) == 0:\n daughter = 0\n else:\n #sum\n daughter = np.sum(profileCh2[1].flatten())#np.max(profileCh2[1].flatten()))\n if np.max(profileCh2[0].flatten()) == 0:\n mother = 0\n else:\n #sum\n mother = np.sum(profileCh2[0].flatten())#np.max(profileCh2[0].flatten()))\n \n else: # There is only one cell.\n kymo_tmp1 = profileCh1[0].flatten()\n kymo_tmp2 = profileCh2[0].flatten()\n if np.max(profileCh2[0].flatten()) == 0:\n mother = 0\n else:\n #sum\n mother = np.sum(profileCh2[0].flatten())\n daughter = 0\n kymo1 = (kymo_tmp1)#/kymo_tmp1.max())\n kymo2 = (kymo_tmp2)#/kymo_tmp2.max())\n\n if np.isnan(mother) or np.isnan(daughter):\n print('nan error')\n return (kymo1, kymo2, mother, daughter)","sub_path":"AnalysisFunc.py","file_name":"AnalysisFunc.py","file_ext":"py","file_size_in_byte":22186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"38386630","text":"import os\n\nfrom django.conf import settings\n\nfrom django.http import HttpResponse, JsonResponse\nfrom django.views.generic import View, TemplateView\n\n\n#Functional based view\nclass PostListView1(View):\n def get(self, request):\n name = 'Jack Valentine'\n html = '''\n

AskDjango

\n

{name}

\n

Let's study hard!

\n '''.format(name=name)\n return HttpResponse(html)\n\npost_list1 = PostListView1.as_view()\n\n\n#Template based view\nclass PostListVIew2(TemplateView):\n template_name = 'dojo/post_list.html'\n\n def get_context_data(self):\n context = super().get_context_data()\n context['name'] = 'Jack Valentine'\n return context\n\npost_list2 = PostListVIew2.as_view()\n\n\n#JSON Return\nclass PostListVIew3(View):\n def get(self, request):\n return JsonResponse(self.get_data(), json_dumps_params={'ensure_ascii': False})\n\n def get_data(self):\n return {\n 'message': 'Hello Python & Django',\n 'items': ['Python', 'Django', 'Celery', 'Azure', 'AWS']\n }\n\npost_list3 = PostListVIew3.as_view()\n\n\n#Binary File Download\nclass ExcelDownloadView(View):\n def get(self, request):\n filepath = os.path.join(settings.UPLOADED_DIR, 'lorem-ipsum(excel-95).xls')\n filename = os.path.basename(filepath)\n with open(filepath, 'rb') as f:\n response = HttpResponse(f, content_type='application/vnd.ms-excel')\n response['Content-Dispostion'] = 'attachment; filename=\"{}\"'.format(filename)\n return response\n\nexcel_download = ExcelDownloadView.as_view()","sub_path":"askdjango/dojo/views_cbv.py","file_name":"views_cbv.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"292227251","text":"import time\n\nclass Motor:\n '''\n Motor wrapper class. Create a motor object by passing in\n the desired adafruit motorkit and the desired motor number.\n The motor will be initialized to initialThrottle. Using a\n wrapper allows for custom throttle behaviour.\n '''\n def __init__(self, motorKit, index, initialThrottle):\n self.motorKit = motorKit\n if index == 1:\n self.motorFunction = motorKit.motor1\n elif index == 2:\n self.motorFunction = motorKit.motor2\n elif index == 3:\n self.motorFunction = motorKit.motor3\n elif index == 4:\n self.motorFunction = motorKit.motor4\n else:\n pass\n\n self.prevThrottle = initialThrottle\n\n def throttle(self, value, delay):\n '''\n Throttle the adafruit motor associated with this object\n to the given value.\n Deprecated: Gradually increase/decrease the throttle to\n the given value with the given delay between each step.\n '''\n difference = value - self.prevThrottle\n if difference == 0:\n return\n step = round(difference/(abs(difference)*1), 1)\n temp = self.prevThrottle\n try:\n self.motorFunction.throttle = value\n except:\n self.prevThrottle = value\n return\n self.prevThrottle = value\n","sub_path":"MotorControl/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"605788182","text":"import random\nimport sys\n\nspells = {\n 'MagicMissile': 53,\n 'Drain': 73,\n 'Shield': 113,\n 'Poison': 173,\n 'Recharge': 229\n}\nboss_hit = 58\nboss_damage = 9\nown_hit = 50\nown_mana = 500\nown_armor = 0\n\n\ndef play_game():\n b_hit = boss_hit\n o_hit = own_hit\n o_armor = own_armor\n o_mana = own_mana\n shield_timer = 0\n poison_timer = 0\n recharge_timer = 0\n spend_mana = 0\n i = 0\n\n while o_hit > 0 and b_hit > 0:\n # if i % 2 == 0:\n # print('-- Player turn --')\n # else:\n # print('-- Boss turn --')\n # print('- Player has ' + str(o_hit) + ' hit points, ' + str(o_armor) + ' armor, ' + str(o_mana) + ' mana')\n # print('- Boss has ' + str(b_hit) + ' hit points')\n\n if i % 2 == 0: # My turn\n spell = None\n\n for j in xrange(100):\n spell = random.choice(spells.keys())\n if not (spell == 'Shield' and shield_timer > 0) or \\\n (spell == 'Poison' and poison_timer > 0) or \\\n (spell == 'Recharge' and recharge_timer > 0):\n if o_mana >= spells[spell]:\n o_mana -= spells[spell]\n spend_mana += spells[spell]\n break\n else:\n spell = None\n\n if spell == 'MagicMissile':\n # print('Player casts Magic Missile.')\n b_hit -= 4\n elif spell == 'Drain':\n # print('Player casts Drain.')\n b_hit -= 2\n o_hit += 2\n elif spell == 'Shield':\n # print('Player casts Shield.')\n shield_timer = 6\n o_armor = 7\n elif spell == 'Poison':\n # print('Player casts Poison.')\n poison_timer = 6\n elif spell == 'Recharge':\n # print('Player casts Recharge.')\n recharge_timer = 5\n else:\n return sys.maxint\n else: # Boss turn\n # print('Boss attacks for ' + str(boss_damage - o_armor) + ' damage')\n o_hit -= (boss_damage - o_armor) if (boss_damage - o_armor) > 1 else 1\n\n # print('')\n\n if o_hit > 0 and b_hit > 0:\n if shield_timer == 0:\n o_armor = 0\n if shield_timer > 0:\n # print('Shield timer')\n shield_timer -= 1\n if poison_timer > 0:\n # print('Poison timer')\n b_hit -= 3\n poison_timer -= 1\n if recharge_timer > 0:\n # print('Recharge timer')\n o_mana += 101\n recharge_timer -= 1\n\n i += 1\n\n return spend_mana if o_hit > 0 else sys.maxint\n\nmin_mana = sys.maxint\nfor i in xrange(100000):\n costs = play_game()\n if costs < min_mana:\n min_mana = costs\n\nprint(min_mana)\n","sub_path":"day22p1.py","file_name":"day22p1.py","file_ext":"py","file_size_in_byte":2940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"171795836","text":"#!/usr/bin/env python\n\"\"\"\n\n@author: jstrick\nCreated on Sun Mar 10 17:16:33 2013\n\n\"\"\"\nimport csv\n\ndata = (\n ('February', 28, 'The shortest month, with 28 or 29 days'),\n ('March', 31, 'Goes out like a \"lamb\"'),\n ('April', 30, 'Its showers bring May flowers'),\n)\n\nwith open('../TEMP/stuff.csv', 'w') as STUFF:\n wtr = csv.writer(STUFF)\n for row in data:\n wtr.writerow(row)\n","sub_path":"EXAMPLES/csv_write.py","file_name":"csv_write.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"468909922","text":"import socket,ssl\r\n\r\n# SSL Context\r\ncontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\r\ncontext.verify_mode = ssl.CERT_REQUIRED\r\ncontext.check_hostname = True\r\ncontext.load_default_certs()\r\n\r\n# Create a TCP/IP socket\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\nipaddr = socket.gethostbyname('imap.gmail.com')\r\nprint(ipaddr);\r\n\r\n# Connect the socket to the port where the server is listening\r\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nssl_sock = context.wrap_socket(sock, server_hostname='imap.gmail.com')\r\nssl_sock.connect(('imap.gmail.com', 993))\r\n\r\n\r\n#server_address = (ipaddr, 993)\r\n#ssl.wrap_socket(sock)\r\n#sock.connect(server_address)\r\n\r\ntry:\r\n\r\n # Send data\r\n message = 'tag login loginID password'\r\n ssl_sock.send(message.encode())\r\n\r\n # Look for the response\r\n amount_received = 0\r\n amount_expected = 10\r\n\r\n while amount_received < 10:\r\n data = ssl_sock.recv(16)\r\n amount_received += len(data)\r\n amount_received += 1\r\n print(data)\r\n\r\nfinally:\r\n sock.close()","sub_path":"connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"441814092","text":"from itertools import product\nfrom skimage import io\nimport pandas as pd\nimport numpy as np\nimport fnmatch\nimport os\n\n\n# high level function for building dataset\ndef read_dataset(source_dir: str, classes: list, file_pattern: str, standard_shape: list):\n source_class_gen = make_images_sources(source_dir, classes, file_pattern)\n dataset = build_dataset(source_class_gen, standard_shape)\n return dataset\n\n\n# read the source directory content and return all paths of classes images_for_labs\ndef make_images_sources(source_dir, classes, file_pattern):\n sources = list()\n for class_name in classes:\n full_paths_files_source_dir = (os.path.abspath(\"{}/{}\".format(source_dir, file_name))\n for file_name in os.listdir(source_dir))\n class_sources = fnmatch.filter(full_paths_files_source_dir, file_pattern.format(class_name))\n sources.extend(class_sources)\n yield from product(class_sources, [class_name])\n\n\n# read each image, cut it for standard size, reshape for feature vector form (1 * features)\n# and insert into the DataFrame also with the class label\ndef build_dataset(source_class_gen, std_shape):\n columns = make_columns(std_shape) + [\"class\"]\n rows = list()\n\n for image_path, class_name in source_class_gen:\n feature_vector_length = np.prod(std_shape)\n image_matrix = io.imread(image_path)\n cut_img = cut_image(image_matrix, std_shape)\n feature_vector = cut_img.reshape(feature_vector_length)\n rows.append((*feature_vector, class_name))\n return pd.DataFrame(data=rows, columns=columns), columns[:-1]\n\n\ndef make_columns(std_shape):\n return [\"{}:{}:{}\".format(*triple) for triple in product(range(1, std_shape[0] + 1),\n range(1, std_shape[1] + 1),\n range(1, std_shape[2] + 1))]\n\n\n# cut standard image size from the center\ndef cut_image(image_matrix, std_shape):\n real_shape = image_matrix.shape\n\n height_border = calc_border(std_shape, real_shape, 0)\n width_border = calc_border(std_shape, real_shape, 1)\n\n height_size = calc_size(std_shape, real_shape, 0)\n width_size = calc_size(std_shape, real_shape, 1)\n\n result_matrix = np.zeros(std_shape)\n result_matrix[:height_size, :width_size] = image_matrix[height_border:height_border+height_size,\n width_border:width_size+width_border]\n return result_matrix\n\n\n# calc the border value\ndef calc_border(std_vals, real_vals, axis_index):\n return only_positive_int_numbers((real_vals[axis_index] - std_vals[axis_index]) / 2)\n\n\n# return positive number or 0\ndef only_positive_int_numbers(val):\n return int(val) if val > 0 else 0\n\n\n# if the real image size param(width or height through the axis_index) is bigger than\n# standard return standard value if not return real value of the image\ndef calc_size(std_vals, real_vals, axis_index):\n real_param = real_vals[axis_index]\n std_param = std_vals[axis_index]\n return std_param if real_param - std_param > 0 else real_param","sub_path":"labs/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502871883","text":"import sys\nimport logging\n\nfrom tornado.tcpserver import TCPServer\n\nfrom packet import InPacket, OutPacket, PacketStructureError\n\n\nclass TCPConsoleServer(TCPServer):\n cls_handler = None\n\n def __init__(self, *args, **kwargs):\n self.handler = self.cls_handler(kwargs.pop('user_data'), self)\n super().__init__(*args, **kwargs)\n\n def handle_stream(self, stream, address):\n StreamReader(stream, address, self)\n\n def dispatch(self, stream_r, method, data, request_id):\n self.handler.route(stream_r, method, data, request_id)\n\n\nclass StreamReader(object):\n\n terminator = b'\\n'\n\n def __init__(self, stream, address, server):\n self.stream = stream\n self.address = address\n self.server = server\n self.stream.set_close_callback(self._on_client_connection_close)\n self._read_data()\n\n def _on_client_connection_close(self):\n sys.exit(0)\n\n def _read_data(self):\n self.stream.read_until(self.terminator, self._on_data)\n\n def _on_data(self, data):\n data = data.decode('utf-8')\n logging.debug(\"checkio-cli server:: received: {}\".format(data))\n if data is None:\n logging.error(\"Client sent an empty data: {}\".format(self.address), exc_info=True)\n else:\n try:\n packet = InPacket.decode(data)\n except PacketStructureError as e:\n logging.error(e, exc_info=True)\n else:\n self.server.dispatch(self, **packet.get_all_data())\n self._read_data()\n\n def write(self, method, data=None, request_id=None, callback=None):\n if self.stream.closed():\n raise Exception('Connection is closed')\n\n message = OutPacket(method, data, request_id).encode()\n try:\n self.stream.write(message.encode('utf-8') + self.terminator, callback=callback)\n logging.debug(\"checkio-cli server:: write {}\".format(message))\n except Exception as e:\n logging.error(e, exc_info=True)\n\n def write_select_result(self, result, request_id):\n self.write(OutPacket.METHOD_SELECT_RESULT, result, request_id=request_id)\n","sub_path":"interfaces/checkio_cli/src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"259123804","text":"'''\nName:\t\tProblem 23\nA perfect number is a number for which the sum of its proper divisors is exactly equal to the number. For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28, which means that 28 is a perfect number.\n\nA number n is called deficient if the sum of its proper divisors is less than n and it is called abundant if this sum exceeds n.\n\nAs 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16, the smallest number that can be written as the sum of two abundant numbers is 24. By mathematical analysis, it can be shown that all integers greater than 28123 can be written as the sum of two abundant numbers. However, this upper limit cannot be reduced any further by analysis even though it is known that the greatest number that cannot be expressed as the sum of two abundant numbers is less than this limit.\n\nFind the sum of all the positive integers which cannot be written as the sum of two abundant numbers.\n\n1. Check to see if the number is abundant\n2. If it is abundant, add it to the list\n2. See if the number (abundant or not) is the sum of 2 abundant numbers\n\n'''\nimport math\ndef sumFactors(number):\n\tsum=1\n\troot=int(math.sqrt(number))\n\tfor i in range(2,root+1):\n\t\tif (number%i)==0:\n\t\t\tif(i==number/i):\n\t\t\t\tsum=sum+i\n\t\t\telse:\n\t\t\t\tsum=sum+i+number/i\n\tif(sum>number):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef checkSum(numberList, number):\n\tfor num in numberList:\n\t\tif(num>number/2):\n\t\t\treturn False\n\t\telse:\n\t\t\tif ((number-num) in numberList):\n\t\t\t\treturn True\n\treturn False\n\nabundantNumbers=[]\nsumOfTwo=False\nsum=0\nfor i in range(1,28124):\n\tabundant=sumFactors(i)\n\tif(abundant):\n\t\tabundantNumbers.append(i)\n\tif(i<24):\n\t\tsum=sum+i\n\telse:\n\t\tsumOfTwo=checkSum(abundantNumbers,i)\n\t\tif(not sumOfTwo):\n\t\t\tsum=sum+i\nprint(str(sum))\n","sub_path":"problem23.py","file_name":"problem23.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"132939629","text":"import sys\nfrom time import sleep\nfrom playwright.async_api import Dialog\nfrom os.path import dirname, abspath\nsys.path.insert(0, dirname(dirname(abspath(__file__))))\nfrom element.baidu_element import BaiduElem\nfrom playwright.sync_api import Page\n\n\ndef test_baidu_search(page: Page, base_url):\n \"\"\"\n 名称:百度搜索\"playwright\"\n 步骤:\n 1、打开浏览器\n 2、输入\"playwright\"关键字\n 3、点击搜索按钮\n 检查点:\n * 检查页面标题是否相等。\n \"\"\"\n page.goto(base_url)\n page.type(BaiduElem.search_input, text=\"playwright\")\n page.click(BaiduElem.search_button)\n sleep(2)\n assert page.title() == \"playwright_百度搜索\"\n\n\ndef test_baidu_search_setting(page, base_url):\n \"\"\"\n 名称:百度搜索设置\n 步骤:\n 1、打开百度浏览器\n 2、点击设置链接\n 3、在下拉框中\"选择搜索\"\n 4、点击\"保存设置\"\n 5、对弹出警告框保存\n 检查点:\n * 检查是否弹出提示框\n \"\"\"\n page.goto(base_url)\n page.click(BaiduElem.settings)\n page.click(BaiduElem.search_setting)\n sleep(2)\n page.click(BaiduElem.save_setting)\n\n def on_dialog(dialog: Dialog):\n assert dialog.type == \"alert\"\n assert dialog.message == \"已经记录下您的使用偏好\"\n dialog.accept()\n\n page.on(\"dialog\", on_dialog)\n\n\ndef test_zzzz(page: Page, base_url):\n assert 2+ 2 == 4","sub_path":"test_dir/test_baidu.py","file_name":"test_baidu.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"640227007","text":"import pandas as pd\nfrom potara.summarizer import Summarizer\nfrom potara.document import Document\n\nfile_list = ['Boeing Crisis.csv', 'Brexit.csv', 'Hong Kong Protests.csv', 'Iran Sanctions.csv', 'US China Trade War.csv']\n\ndef summarize(file):\n s = Summarizer()\n to_display = {}\n # for i in range(len(file_list)):\n issue = file.split('.')[0]\n df = pd.read_csv(file).sort_values(by=['probability'], ascending=False)\n articles = df['content'].tolist()\n\n output = []\n \n print('Preparing Documents for Summarization')\n print('------------------------------------------------------')\n for k in range(0,10): \n output.append(Document(articles[k]))\n print('------------------------------------------------------')\n\n print('Summarization Process:', issue)\n print('------------------------------------------------------')\n s.clearDocuments\n\n # Adding docs, preprocessing them and computing some infos for the summarizer\n s.setDocuments(output)\n\n # Summarizing, where the actual work is done\n s.summarize(wordlimit=200)\n\n # You can then print the summary\n summary = s.summary\n print('------------------------------------------------------')\n\n print(summary)\n \n to_display[0] = {'group': issue, 'summary': ' '.join(summary)}\n\n output_df = pd.DataFrame.from_dict(to_display)\n output_df = output_df.transpose()\n output_file = issue +'_summary.csv'\n print(output_file)\n output_df.to_csv(output_file, encoding='utf-8-sig')\n\nsummarize(file_list[4])","sub_path":"model/summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"203010115","text":"import torch\nimport torch.nn.functional as F\nfrom torch import nn, einsum\n\nfrom einops import rearrange, repeat\nfrom einops.layers.torch import Rearrange\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef batched_index_select(values, indices, dim = 1):\n value_dims = values.shape[(dim + 1):]\n values_shape, indices_shape = map(lambda t: list(t.shape), (values, indices))\n indices = indices[(..., *((None,) * len(value_dims)))]\n indices = indices.expand(*((-1,) * len(indices_shape)), *value_dims)\n value_expand_len = len(indices_shape) - (dim + 1)\n values = values[(*((slice(None),) * dim), *((None,) * value_expand_len), ...)]\n\n value_expand_shape = [-1] * len(values.shape)\n expand_slice = slice(dim, (dim + value_expand_len))\n value_expand_shape[expand_slice] = indices.shape[expand_slice]\n values = values.expand(*value_expand_shape)\n\n dim += value_expand_len\n return values.gather(dim, indices)\n\ndef fourier_encode_dist(x, num_encodings = 4, include_self = True):\n x = x.unsqueeze(-1)\n device, dtype, orig_x = x.device, x.dtype, x\n scales = 2 ** torch.arange(num_encodings, device = device, dtype = dtype)\n x = x / scales\n x = torch.cat([x.sin(), x.cos()], dim=-1)\n x = torch.cat((x, orig_x), dim = -1) if include_self else x\n return x\n\n# classes\n\n# this follows the same strategy for normalization as done in SE3 Transformers\n# https://github.com/lucidrains/se3-transformer-pytorch/blob/main/se3_transformer_pytorch/se3_transformer_pytorch.py#L95\n\nclass CoorsNorm(nn.Module):\n def __init__(self, eps = 1e-8):\n super().__init__()\n self.eps = eps\n self.fn = nn.LayerNorm(1)\n\n def forward(self, coors):\n norm = coors.norm(dim = -1, keepdim = True)\n normed_coors = coors / norm.clamp(min = self.eps)\n phase = self.fn(norm)\n return (phase * normed_coors)\n\nclass PreNorm(nn.Module):\n def __init__(self, dim, fn):\n super().__init__()\n self.fn = fn\n self.norm = nn.LayerNorm(dim)\n\n def forward(self, feats, coors, **kwargs):\n feats = self.norm(feats)\n feats, coors = self.fn(feats, coors, **kwargs)\n return feats, coors\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, feats, coors, **kwargs):\n feats_out, coors_delta = self.fn(feats, coors, **kwargs)\n return feats + feats_out, coors + coors_delta\n\nclass GEGLU(nn.Module):\n def forward(self, x):\n x, gates = x.chunk(2, dim = -1)\n return x * F.gelu(gates)\n\nclass FeedForward(nn.Module):\n def __init__(\n self,\n *,\n dim,\n mult = 4,\n dropout = 0.\n ):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, dim * 4 * 2),\n GEGLU(),\n nn.Dropout(dropout),\n nn.Linear(dim * 4, dim)\n )\n\n def forward(self, feats, coors):\n return self.net(feats), 0\n\nclass EquivariantAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 4,\n edge_dim = 0,\n m_dim = 16,\n fourier_features = 4,\n norm_rel_coors = False,\n num_nearest_neighbors = 0,\n only_sparse_neighbors = False,\n coor_attention = False,\n valid_neighbor_radius = float('inf'),\n init_eps = 1e-3,\n soft_edges = False,\n rel_coors_sign_gating = False\n ):\n super().__init__()\n self.fourier_features = fourier_features\n self.num_nearest_neighbors = num_nearest_neighbors\n self.only_sparse_neighbors = only_sparse_neighbors\n self.valid_neighbor_radius = valid_neighbor_radius\n\n attn_inner_dim = heads * dim_head\n self.heads = heads\n self.to_qkv = nn.Linear(dim, attn_inner_dim * 3, bias = False)\n self.to_out = nn.Linear(attn_inner_dim, dim)\n\n pos_dim = (fourier_features * 2) + 1\n edge_input_dim = (dim_head * 2) + edge_dim\n\n self.to_pos_emb = nn.Sequential(\n nn.Linear(pos_dim, dim_head * 2),\n nn.ReLU(),\n nn.Linear(dim_head * 2, dim_head)\n )\n\n self.edge_mlp = nn.Sequential(\n nn.Linear(edge_input_dim, edge_input_dim * 2),\n nn.ReLU(),\n nn.Linear(edge_input_dim * 2, m_dim),\n nn.ReLU()\n )\n\n self.edge_gate = nn.Sequential(\n nn.Linear(m_dim, 1),\n nn.Sigmoid()\n ) if soft_edges else None\n\n self.to_attn_mlp = nn.Sequential(\n nn.Linear(m_dim, m_dim * 4),\n nn.ReLU(),\n nn.Linear(m_dim * 4, 1),\n Rearrange('... () -> ...')\n )\n\n self.coors_mlp = nn.Sequential(\n nn.Linear(m_dim, m_dim * 4),\n nn.ReLU(),\n nn.Linear(m_dim * 4, 1),\n Rearrange('... () -> ...')\n )\n\n self.coors_gate = nn.Sequential(\n nn.Linear(heads, heads),\n nn.Tanh()\n ) if rel_coors_sign_gating else None\n\n self.rel_coors_norm = CoorsNorm() if norm_rel_coors else nn.Identity()\n\n self.coor_attention = coor_attention\n\n self.to_coors_out = nn.Sequential(\n nn.Linear(heads, 1, bias = False),\n Rearrange('... () -> ...')\n )\n\n self.init_eps = init_eps\n self.apply(self.init_)\n\n def init_(self, module):\n if type(module) in {nn.Linear}:\n nn.init.normal_(module.weight, std = self.init_eps)\n\n def forward(\n self,\n feats,\n coors,\n edges = None,\n mask = None,\n adj_mat = None\n ):\n b, n, d, h, fourier_features, num_nn, only_sparse_neighbors, valid_neighbor_radius, device = *feats.shape, self.heads, self.fourier_features, self.num_nearest_neighbors, self.only_sparse_neighbors, self.valid_neighbor_radius, feats.device\n\n assert not (only_sparse_neighbors and not exists(adj_mat)), 'adjacency matrix must be passed in if only_sparse_neighbors is turned on'\n\n if exists(mask):\n num_nodes = mask.sum(dim = -1)\n\n rel_coors = rearrange(coors, 'b i d -> b i () d') - rearrange(coors, 'b j d -> b () j d')\n rel_dist = rel_coors.norm(p = 2, dim = -1)\n\n # calculate neighborhood indices\n\n nbhd_indices = None\n nbhd_masks = None\n nbhd_ranking = rel_dist\n\n if exists(adj_mat):\n if len(adj_mat.shape) == 2:\n adj_mat = repeat(adj_mat, 'i j -> b i j', b = b)\n\n self_mask = torch.eye(n, device = device).bool()\n self_mask = rearrange(self_mask, 'i j -> () i j')\n adj_mat.masked_fill_(self_mask, False)\n\n max_adj_neighbors = adj_mat.long().sum(dim = -1).max().item() + 1\n\n num_nn = max_adj_neighbors if only_sparse_neighbors else (num_nn + max_adj_neighbors)\n valid_neighbor_radius = 0 if only_sparse_neighbors else valid_neighbor_radius\n\n nbhd_ranking = nbhd_ranking.masked_fill(self_mask, -1.)\n nbhd_ranking = nbhd_ranking.masked_fill(adj_mat, 0.)\n\n if num_nn > 0:\n # make sure padding does not end up becoming neighbors\n if exists(mask):\n ranking_mask = mask[:, :, None] * mask[:, None, :]\n nbhd_ranking = nbhd_ranking.masked_fill(~ranking_mask, 1e5)\n\n nbhd_values, nbhd_indices = nbhd_ranking.topk(num_nn, dim = -1, largest = False)\n nbhd_masks = nbhd_values <= valid_neighbor_radius\n\n # calculate relative distance and optionally fourier encode\n\n rel_dist = rearrange(rel_dist, 'b i j -> b i j ()')\n\n if fourier_features > 0:\n rel_dist = fourier_encode_dist(rel_dist, num_encodings = fourier_features)\n rel_dist = rearrange(rel_dist, 'b i j () d -> b i j d')\n\n rel_dist = repeat(rel_dist, 'b i j d -> b h i j d', h = h)\n\n # derive queries keys and values\n\n q, k, v = self.to_qkv(feats).chunk(3, dim = -1)\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))\n\n # calculate nearest neighbors\n\n i = j = n\n\n if exists(nbhd_indices):\n i, j = nbhd_indices.shape[-2:]\n nbhd_indices_with_heads = repeat(nbhd_indices, 'b n d -> b h n d', h = h)\n k = batched_index_select(k, nbhd_indices_with_heads, dim = 2)\n v = batched_index_select(v, nbhd_indices_with_heads, dim = 2)\n rel_dist = batched_index_select(rel_dist, nbhd_indices_with_heads, dim = 3)\n rel_coors = batched_index_select(rel_coors, nbhd_indices, dim = 2)\n else:\n k = repeat(k, 'b h j d -> b h n j d', n = n)\n v = repeat(v, 'b h j d -> b h n j d', n = n)\n\n rel_dist_pos_emb = self.to_pos_emb(rel_dist)\n\n # inject position into values\n\n v = v + rel_dist_pos_emb\n\n # prepare mask\n\n if exists(mask):\n q_mask = rearrange(mask, 'b i -> b () i ()')\n k_mask = repeat(mask, 'b j -> b i j', i = n)\n\n if exists(nbhd_indices):\n k_mask = batched_index_select(k_mask, nbhd_indices, dim = 2)\n\n k_mask = rearrange(k_mask, 'b i j -> b () i j')\n\n mask = q_mask * k_mask\n\n if exists(nbhd_masks):\n mask &= rearrange(nbhd_masks, 'b i j -> b () i j')\n\n # expand queries and keys for concatting\n\n q = repeat(q, 'b h i d -> b h i n d', n = j)\n\n edge_input = torch.cat(((q * k), rel_dist_pos_emb), dim = -1)\n\n if exists(edges):\n if exists(nbhd_indices):\n edges = batched_index_select(edges, nbhd_indices, dim = 2)\n\n edges = repeat(edges, 'b i j d -> b h i j d', h = h)\n edge_input = torch.cat((edge_input, edges), dim = -1)\n\n m_ij = self.edge_mlp(edge_input)\n\n if exists(self.edge_gate):\n m_ij = m_ij * self.edge_gate(m_ij)\n\n coor_weights = self.coors_mlp(m_ij)\n coors_gate_input = rearrange(coor_weights, 'b h i j -> b i j h')\n\n if exists(mask):\n mask_value = -torch.finfo(coor_weights.dtype).max if self.coor_attention else 0.\n coor_weights.masked_fill_(~mask, mask_value)\n\n if self.coor_attention:\n coor_weights = coor_weights.softmax(dim = -1)\n\n rel_coors = self.rel_coors_norm(rel_coors)\n rel_coors = repeat(rel_coors, 'b i j c -> b i j c h', h = h)\n\n if exists(self.coors_gate):\n rel_coors_signs = self.coors_gate(coors_gate_input)\n rel_coors_signs = rearrange(rel_coors_signs, 'b i j h -> b i j () h')\n rel_coors = rel_coors * rel_coors_signs\n\n coors_out = einsum('b h i j, b i j c h -> b i c h', coor_weights, rel_coors)\n coors_out = self.to_coors_out(coors_out)\n\n # derive attention\n\n sim = self.to_attn_mlp(m_ij)\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n sim.masked_fill_(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1)\n\n # weighted sum of values and combine heads\n\n out = einsum('b h i j, b h i j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n out = self.to_out(out)\n\n return out, coors_out\n\n# transformer\n\nclass EnTransformer(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n num_tokens = None,\n dim_head = 64,\n heads = 8,\n num_edge_tokens = None,\n edge_dim = 0,\n m_dim = 16,\n fourier_features = 4,\n num_nearest_neighbors = 0,\n only_sparse_neighbors = False,\n num_adj_degrees = None,\n adj_dim = 0,\n coor_attention = False,\n valid_neighbor_radius = float('inf'),\n norm_rel_coors = False,\n rel_coors_sign_gating = False,\n init_eps = 1e-3,\n soft_edges = False\n ):\n super().__init__()\n assert not (exists(num_adj_degrees) and num_adj_degrees < 1), 'make sure adjacent degrees is greater than 1'\n\n self.token_emb = nn.Embedding(num_tokens, dim) if exists(num_tokens) else None\n self.edge_emb = nn.Embedding(num_edge_tokens, edge_dim) if exists(num_edge_tokens) else None\n\n self.num_adj_degrees = num_adj_degrees\n self.adj_emb = nn.Embedding(num_adj_degrees + 1, adj_dim) if exists(num_adj_degrees) and adj_dim > 0 else None\n adj_dim = adj_dim if exists(num_adj_degrees) else 0\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Residual(PreNorm(dim, EquivariantAttention(dim = dim, dim_head = dim_head, heads = heads, m_dim = m_dim, edge_dim = (edge_dim + adj_dim), fourier_features = fourier_features, norm_rel_coors = norm_rel_coors, num_nearest_neighbors = num_nearest_neighbors, only_sparse_neighbors = only_sparse_neighbors, valid_neighbor_radius = valid_neighbor_radius, coor_attention = coor_attention, init_eps = init_eps, soft_edges = soft_edges, rel_coors_sign_gating = rel_coors_sign_gating))),\n Residual(PreNorm(dim, FeedForward(dim = dim)))\n ]))\n\n self.num_nearest_neighbors = num_nearest_neighbors\n\n def forward(\n self,\n feats,\n coors,\n edges = None,\n mask = None,\n adj_mat = None,\n **kwargs\n ):\n b = feats.shape[0]\n\n if exists(self.token_emb):\n feats = self.token_emb(feats)\n\n if exists(self.edge_emb):\n assert exists(edges), 'edges must be passed in as (batch x seq x seq) indicating edge type'\n edges = self.edge_emb(edges)\n\n if exists(self.num_adj_degrees):\n assert exists(adj_mat), 'adjacency matrix must be passed in (keyword argument adj_mat)'\n\n if len(adj_mat.shape) == 2:\n adj_mat = repeat(adj_mat.clone(), 'i j -> b i j', b = b)\n\n adj_indices = adj_mat.clone().long()\n\n for ind in range(self.num_adj_degrees - 1):\n degree = ind + 2\n\n next_degree_adj_mat = (adj_mat.float() @ adj_mat.float()) > 0\n next_degree_mask = (next_degree_adj_mat.float() - adj_mat.float()).bool()\n adj_indices.masked_fill_(next_degree_mask, degree)\n adj_mat = next_degree_adj_mat.clone()\n\n if exists(self.adj_emb):\n adj_emb = self.adj_emb(adj_indices)\n edges = torch.cat((edges, adj_emb), dim = -1) if exists(edges) else adj_emb\n\n # main network\n\n for attn, ff in self.layers:\n feats, coors = attn(feats, coors, edges = edges, mask = mask, adj_mat = adj_mat)\n feats, coors = ff(feats, coors)\n\n return feats, coors\n","sub_path":"en_transformer/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":14788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"215630889","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\nimport numpy as np\n\ndef missing(data, mu_0, var_0):\n '''\n one time EM estimation\n :param data: x\n :param mu_0: initial mean\n :param var_0: initial covariance\n :return: estimated mean and covariance\n '''\n mu = np.zeros(3) # new\n mu[0:2] = 1 / 10 * data.sum(axis=0)[0:2]\n\n # set to zero every iteration\n expectation = [] # mean\n mean = 0\n\n covariance = np.zeros([3, 3]) # covariance\n var = np.zeros([3, 3])\n Dvar = var_0[2, 2] - np.dot(var_0[2, 0:2], np.linalg.inv(var_0[0:2, 0:2])).dot(var_0[0:2, 2])\n\n for i in range(10):\n if (i + 1) % 2 == 0: # even num\n # mean\n expectation.append(\n mu_0[2] + np.dot(var_0[2, 0:2], np.linalg.inv(var_0[0:2, 0:2])).dot((data[i, :] - mu_0)[0:2]))\n # covariance\n var[0:2, 0:2] += (data[i, :] - mu_0)[0:2].reshape(-1, 1) * (data[i, :] - mu_0)[0:2]\n var[0, 2] += (data[i, 0] - mu_0[0]) * (expectation[int((i + 1) / 2 - 1)] - mu_0[2])\n var[2, 0] = var[0, 2]\n var[1, 2] += (data[i, 1] - mu_0[1]) * (expectation[int((i + 1) / 2 - 1)] - mu_0[2])\n var[2, 1] = var[1, 2]\n var[2, 2] += Dvar + expectation[int((i + 1) / 2 - 1)] ** 2 + mu_0[2] ** 2 - 2 * mu_0[2] * expectation[\n int((i + 1) / 2 - 1)]\n else: # odd num\n #mean\n mean += data[i, 2]\n # covariance\n covariance += (data[i, :] - mu_0).reshape(-1, 1) * (data[i, :] - mu_0)\n mu[2] = 1 / 10 * (mean + sum(expectation))\n var = 1 / 10 * (var + covariance)\n\n return mu, var\n\n\ndef full(data):\n mu = 1/10 * data.sum(axis=0)\n var = 0\n for i in range(10):\n var += (data[i, :] - mu).reshape(-1, 1) * (data[i, :] - mu)\n var = var / 10\n print(\"在信息完整的情况下:\")\n print(\"估计的均值为\"+str(mu))\n print(\"协方差矩阵为\\n\"+str(var))\n return mu, var\n\n\ndef main():\n x = np.array([[0.42, -0.087, 0.58],[-0.2, -3.3, -3.4],[1.3, -0.32, 1.7],\n [0.39, 0.71, 0.23],[-1.6, -5.3, -0.15],[-0.029, 0.89, -4.7],\n [-0.23, 1.9, 2.2],[0.27, -0.3, -0.87],[-1.9, 0.76, -2.1],[0.87, -1.0, -2.6]])\n\n # initialize mu, var\n mu_0 = np.zeros(3)\n egvalue = np.ones(3)\n var_0 = np.diag(egvalue)\n\n # x3 missing\n times = 40 # 保证估计的均值和方差收敛\n for iter in range(times):\n mu,var = missing(x, mu_0,var_0)\n # update\n mu_0 = mu\n var_0 = var\n\n print(\"在x3信息缺失的情况下:\")\n print(\"估计的均值为\" + str(mu))\n print(\"协方差矩阵为\\n\" + str(var))\n\n # no missing data\n mu_full, var_full = full(x)\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"3EM+GMM/code/em_missing.py","file_name":"em_missing.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"197864172","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\nt=5\ndt=1/365\njump_risk=730\njump=.4\nf = [0 for i in range(int(t/dt))]\nM = [0 for i in range(int(t/dt))]\nliq_debt = [0 for i in range(int(t/dt))]\nM[0]=100\nmu=0.05\nsigma=.2\ncollateral_cutoff=1.5\n\ndef update_collat(cdps,M2,M1):\n\tcdps = [{\"collat\":cdp[\"collat\"]*(M2/M1),\"debt\":cdp[\"debt\"],\"open\":cdp[\"collat\"]*(M2/M1)>1.5} for cdp in cdps if cdp[\"open\"]==True]\n\treturn(cdps)\n\ncdps = [{\"collat\":1.55,\"debt\":10000000,\"open\":True},{\"collat\":2,\"debt\":10000000,\"open\":True},{\"collat\":2.5,\"debt\":10000000,\"open\":True}]\n\nrevert=0\n#cdps = update_collat(cdps)\nfor i in range(1,int(t/dt)):\n\tif revert>0:\n\t\t\tf[i] = jump/sigma\n\t\t\tM[i] = M[0]*math.exp((mu-(math.pow(sigma,2))/2)*(i*dt)+sigma*f[i]) #market cap calc based on GBM\n\t\t\trevert-=.25\n\telse:\n\t\tif np.random.randint(0,jump_risk-1)==1:\n\t\t\tf[i] = -1*jump/sigma\n\t\t\tM[i] = M[0]*math.exp((mu-(math.pow(sigma,2))/2)*(i*dt)+sigma*f[i]) #market cap calc based on GBM\n\t\t\trevert=1\t\t\n\t\telse:\n\t\t\tf[i] = f[i-1]+math.sqrt(dt)*np.random.normal(0,1,1)\n\t\t\tM[i] = M[0]*math.exp((mu-(math.pow(sigma,2))/2)*(i*dt)+sigma*f[i]) #market cap calc based on GBM\n\tcdps = update_collat(cdps,M[i],M[i-1])\n\tliq_debt[i] = sum([c[\"debt\"] for c in cdps if c[\"open\"]==False])\n\ndelta = [(M[n]-M[n-1])/M[n-1] for n in range(1,len(M))]\n\n\nfig, ax1 = plt.subplots()\nax1.set_xlabel('Days')\nax1.set_ylabel('Dai Liquidated')\nax1.plot(liq_debt, color='tab:red')\nax2 = ax1.twinx()\nax2.set_ylabel('ETH Price')\nax2.plot(M, color='tab:blue')\nplt.show()","sub_path":"collateral2.py","file_name":"collateral2.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"182581173","text":"#Given the names and grades for each student in a class of students, store them in a nested list and print the name(s) of any student(s) having the second lowest grade.\n\n#Note: If there are multiple students with the second lowest grade, order their names alphabetically and print each name on a new line.\n\nfrom operator import itemgetter\nstudents = []\n\nif __name__ == '__main__':\n for _ in range(int(input())):\n students.append([input(), float(input())])\n \nmn = min(students, key = itemgetter(1))[1]\nfiltered = [i for i in students if i[1] != mn]\nmn_filtered = min(filtered, key = itemgetter(1))[1]\nremove = [i for i in filtered if i[1] == mn_filtered]\nremove.sort()\n\nfor i in remove:\n print(i[0])\n","sub_path":"Python/Easy/Nested List.py","file_name":"Nested List.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"201435112","text":"from Adafruit_BME280 import *\n\nsensor = BME280(mode=BME280_OSAMPLE_8)\n\ndegrees = sensor.read_temperature()\npascals = sensor.read_pressure()\nhectopascals = pascals / 100\nhumidity = sensor.read_humidity()\n\n#print 'Timestamp = {0:0.3f}'.format(sensor.t_fine)\n#print 'Temp = {0:0.3f} deg C'.format(degrees)\n#print 'Pressure = {0:0.2f} hPa'.format(hectopascals)\n#print 'Pressure = {0:0.2f} Pa'.format(pascals)\n#print 'Humidity = {0:0.2f} %'.format(humidity)\n\ntemp = degrees * 9 / 5 + 32;\npressure = pascals * 0.0002953;\n\nprint ('{0:0.1f}|{1:0.2f}|{2:0.2f}').format(temp, humidity, pressure)\n","sub_path":"sensors_python/OLD_Sensors/bme280.py","file_name":"bme280.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"629907977","text":"#-*- coding:utf-8 -*-\nimport datetime\nfrom DataPreprocess import read_file, clean\nimport os\n\ndef Evaluate_Word_Space():\n print()\n\n\ndef Evaluate_Document_Space(Remove_stopwords_file, cluster_file):\n\n print(str(datetime.datetime.now()) + ' 正在进行文档空间的的效果评估...')\n\n #对应标签和簇\n title_documents, abstract_documents, labels, num_set, POS_dict = read_file(Remove_stopwords_file)\n documents = []\n POS_set = ['v', 'vn']\n for i in range(len(labels)):\n documents.append(title_documents[i] + abstract_documents[i])\n\n #统计每个标签中的动词\n label_words = {}\n label_set = list(set(labels))\n for label in label_set:\n words = []\n for i in range(len(num_set)):\n if labels[i] == label:\n for word in documents[i]:\n if POS_dict[word] in POS_set:\n words.append(word)\n else:\n pass\n else:\n pass\n words = set(words)\n label_words[label] = words\n\n #统计每簇中的动词\n cluster_words = {}\n with open(cluster_file, 'r', encoding = 'utf-8') as infile:\n content = infile.readlines()\n clusters_label = []\n for line in content:\n if line == '' or line == '\\n' or line == ' ':\n break\n else:\n line_clean = clean(line)\n clusters_label.append(line_clean[2])\n\n cluster_set = list(set(clusters_label))\n for cluster in cluster_set:\n words = []\n for i in range(len(num_set)):\n if clusters_label[i] == cluster:\n for word in documents[i]:\n if POS_dict[word] in POS_set:\n words.append(word)\n else:\n pass\n else:\n pass\n words = set(words)\n cluster_words[cluster] = words\n\n correspond_relation = []\n for i in range(len(label_set)):\n value_set = []\n for label_o in label_set:\n for label_p in cluster_set:\n intersection = list((label_words[label_o]) & (cluster_words[label_p]))\n union = list((label_words[label_o]) | (cluster_words[label_p]))\n value = len(intersection) / len(union)\n value_set.append(value)\n\n originallabel = label_set[int(value_set.index(max(value_set)) / (6 - i))]\n predictedlabel = cluster_set[int(value_set.index(max(value_set)) % (6 - i))]\n correspond_relation.append(originallabel + '_' + predictedlabel)\n\n label_set.remove(originallabel)\n cluster_set.remove(predictedlabel)\n\n evaluate_file = os.path.dirname(cluster_file) + '/evaluation_' + os.path.basename(cluster_file)\n fn = open(evaluate_file, 'w', encoding = 'utf-8')\n fn.write('Label\\tPrecision\\tRecall\\tF_value\\n')\n for item in correspond_relation:\n item_split = item.split('_')\n label = item_split[0]\n cluster = item_split[1]\n TP = 0\n TN = 0\n FP = 0\n FN = 0\n for i in range(len(labels)):\n if labels[i] == label and clusters_label[i] == cluster:\n TP += 1\n elif labels[i] == label and clusters_label[i] != cluster:\n TN += 1\n elif labels[i] != label and clusters_label[i] == cluster:\n FP += 1\n else:\n FN += 1\n precision = TP / (TP + FP)\n recall = TP / (TP + TN)\n fvalue = 2 * precision * recall / (precision + recall)\n fn.write(label + '\\t' +str(precision) + '\\t' + str(recall) + '\\t' + str(fvalue) + '\\n')\n fn.close()\n\n\n\n","sub_path":"Evaluate.py","file_name":"Evaluate.py","file_ext":"py","file_size_in_byte":3679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"370314480","text":"from scipy.optimize import curve_fit\nimport numpy as np\nfrom openpyxl import Workbook\nimport sys\nimport matplotlib.pyplot as plt\n\nclass peakSeparate:\n def __init__(self, savePath, filepath):\n self.rawDataX = [] # 記錄整個(原始)光譜的峰座標\n self.rawDataY = []\n \n self.splitPos = [] # 記錄各個重疊峰的start & end在rawData的index\n\n self.topX = [] # 記錄所有峰頂座標\n self.topY_fit = [] # 記錄分峰頂峰的Y座標\n self.topY_actual = [] # 記錄分峰頂峰對應到fit Curve的Y座標\n\n self.fitCurveY = [] # 記錄重疊峰做curve_fit後的Y座標\n self.drawY = [] # 串接所有重疊峰的fit curve\n\n self.allSeparatedPeak = [] # 記錄每個重疊峰的分峰\n\n self.readData(filepath)\n self.splitData()\n\n for i in range(len(self.splitPos)):\n self.sub_peakX = []\n self.sub_peakY = []\n\n self.guess_total = []\n self.width = 10\n\n startX = self.rawDataX[self.splitPos[i][0]]\n for s in range(10):\n self.sub_peakX.append(startX - 10 + s)\n self.sub_peakY.append(0.0)\n\n self.sub_peakX.extend(self.rawDataX[self.splitPos[i][0]:self.splitPos[i][1] + 1].tolist())\n self.sub_peakY.extend(self.rawDataY[self.splitPos[i][0]:self.splitPos[i][1] + 1].tolist())\n\n endX = self.rawDataX[self.splitPos[i][1]]\n for e in range(10):\n self.sub_peakX.append(endX + 1 + e)\n self.sub_peakY.append(0.0)\n\n self.sub_peakX = np.array(self.sub_peakX)\n self.sub_peakY = np.array(self.sub_peakY)\n\n if (self.sub_peakY.max() > 1000):\n self.check()\n\n if self.sub_peakY.max() >= 200 or i == 0:\n self.doneFit = False\n\n while not self.doneFit:\n maxWidth = (self.sub_peakX[-1] - self.sub_peakX[0]) * 2 / 3\n\n self.setPeak()\n self.setSepratePeak()\n if not self.doneFit:\n self.width += 2\n if self.width > maxWidth:\n break\n \n if self.doneFit == False:\n self.fitCurveY.append(self.sub_peakY[10: -10].tolist())\n else:\n self.fitCurveY.append(self.sub_peakY[10: -10].tolist())\n \n self.addAllCurveFit()\n self.saveTopToCsv(savePath, (filepath.split(\"/\")[-1]).split(\".\")[0])\n\n ##讀txt檔\n def readData(self, path):\n f = open(path, 'r')\n\n data_x = []\n data_y = []\n\n lineCount = 0\n for line in f.readlines():\n tmp = line.split('\\t')\n tmp[0] = int(tmp[0])\n tmp[1] = float(tmp[1])\n data_x.append(tmp[0])\n data_y.append(tmp[1])\n lineCount += 1\n if lineCount > 1413:\n break\n f.close()\n\n data_x.append(0)\n data_y.append(0)\n\n self.rawDataX = np.array(data_x)\n self.rawDataY = np.array(data_y)\n self.rawDataY = np.where(self.rawDataY <= 50.0, 0.0, self.rawDataY)\n\n def splitData(self):\n start = False\n startY_index = 0\n for i in range(len(self.rawDataY)):\n if self.rawDataY[i] != 0.0 and start != True:\n startY_index = i\n start = True\n elif self.rawDataY[i] == 0.0 and start == True:\n self.splitPos.append([startY_index, i - 1])\n start = False\n\n if start:\n self.splitPos.append([startY_index, i])\n start = False\n\n def func(self, x, *params):\n # 根據參數的長度確定要擬合的函數數量\n num_func = int(len(params) / 3)\n\n # 將每個參數插入一個高斯函數並添加到 y_list\n y_list = []\n for i in range(num_func):\n y = np.zeros_like(x)\n param_range = list(range(3 * i, 3 * (i + 1), 1))\n amp = params[int(param_range[0])]\n ctr = params[int(param_range[1])]\n wid = params[int(param_range[2])]\n y = y + amp * np.exp(-((x - ctr) / wid) ** 2)\n y_list.append(y)\n\n # 覆蓋 y_list 中的所有高斯函數\n y_sum = np.zeros_like(x)\n for i in y_list:\n y_sum = y_sum + i\n\n # 最後添加背景\n y_sum = y_sum + params[-1]\n return y_sum\n\n def fit_plot(self, x, *params):\n num_func = int(len(params) / 3)\n y_list = []\n for i in range(num_func):\n y = np.zeros_like(x)\n param_range = list(range(3 * i, 3 * (i + 1), 1))\n amp = params[int(param_range[0])]\n ctr = params[int(param_range[1])]\n wid = params[int(param_range[2])]\n y = y + amp * np.exp(-((x - ctr) / wid) ** 2) + params[-1]\n y_list.append(y)\n return y_list\n\n def gaussian(self, x, *param):\n return param[0] * np.exp(-np.power(x - param[2], 2.) / (2 * np.power(param[4], 2.))) + param[1] * np.exp(\n -np.power(x - param[3], 2.) / (2 * np.power(param[5], 2.)))\n\n def setPeak(self):\n guess = []\n\n startX = self.sub_peakX[0]\n endX = self.sub_peakX[len(self.sub_peakX) - 1]\n maxY = np.array(self.sub_peakY).max()\n highestX = self.sub_peakX[self.sub_peakY.tolist().index(maxY)]\n\n # ---最高峰的左邊----------------------------#\n minus = self.width\n w = int(minus / 2)\n nowX = highestX\n while nowX - minus >= startX:\n valueX = nowX - minus\n while valueX not in self.sub_peakX:\n valueX -= 1\n x_index = self.sub_peakX.tolist().index(valueX)\n nowhigh = self.sub_peakY[x_index]\n if nowhigh != 0:\n guess.append([0, valueX, w])\n nowX = nowX - minus\n\n # ---最高峰----------------------------------#\n guess.append([0, highestX, w])\n\n # ---最高峰的一右邊--------------------------#\n add = self.width\n w = int(add / 2)\n nowX = highestX\n\n while nowX + add <= endX:\n valueX = nowX + add\n while valueX not in self.sub_peakX:\n valueX -= 1\n x_index = self.sub_peakX.tolist().index(valueX)\n nowhigh = self.sub_peakY[x_index]\n if nowhigh != 0:\n guess.append([0, valueX, w])\n nowX = nowX + add\n\n # ---設定背景强度---------------------------#\n background = 10\n\n self.guess_total = []\n for i in guess:\n self.guess_total.extend(i)\n self.guess_total.append(background)\n\n ##計算重疊峰 fit後的峰 & 處理分峰\n def setSepratePeak(self):\n try:\n popt, pcov = curve_fit(self.func, self.sub_peakX, self.sub_peakY, p0=self.guess_total)\n self.doneFit = True\n except RuntimeError:\n self.doneFit = False\n\n if self.doneFit:\n fit = np.array(self.func(self.sub_peakX, *popt))\n fit = np.where(fit < 0, 0, fit)\n\n y_list = self.fit_plot(self.sub_peakX, *popt)\n baseline = np.zeros_like(self.sub_peakX)\n\n point = [] # 分峰的最高點\n\n # y_list: 記錄分峰的peak資料\n for n, i in enumerate(y_list):\n i = np.array(i)\n i = np.where(i < 0.0, 0.0, i)\n maxV = np.max(i)\n index = self.sub_peakX[i.tolist().index(maxV)]\n if maxV != 0:\n point.append([index, maxV, round(self.sub_peakY[i.tolist().index(maxV)], 2)])\n\n point.sort()\n\n self.allSeparatedPeak.append([self.sub_peakX, baseline, y_list, point])\n\n self.topX.extend([i[0] for i in point])\n self.topY_fit.extend([i[1] for i in point])\n self.topY_actual.extend([i[2] for i in point])\n self.topX.append(\"\")\n self.topY_fit.append(\"\")\n self.topY_actual.append(\"\")\n\n fitY = self.func(self.sub_peakX, *popt)\n self.fitCurveY.append(fitY[10: -10].tolist())\n\n #將每段重疊峰的fit Curve組合在一起\n def addAllCurveFit(self):\n self.drawY = self.rawDataY.copy()\n for i in range(len(self.splitPos)): # 被切成幾個重疊峰\n count = 0\n for index in range(self.splitPos[i][0], (self.splitPos[i][1] + 1)): # 每個重疊峰區段\n self.drawY[index] = self.fitCurveY[i][count]\n count += 1\n\n #判斷波段是否振盪過小\n def check(self):\n top_idx = self.sub_peakY.tolist().index(self.sub_peakY.max())\n\n top_left_bottom_idx = top_idx - 1\n while top_left_bottom_idx > 0:\n if (self.sub_peakY[top_left_bottom_idx] < 1000) and (self.sub_peakY[top_left_bottom_idx - 1] >= self.sub_peakY[top_left_bottom_idx] and self.sub_peakY[top_left_bottom_idx] <= self.sub_peakY[top_left_bottom_idx + 1]):\n break\n top_left_bottom_idx -= 1\n\n #left\n topY_left_idx = self.sub_peakY.tolist().index(self.sub_peakY[:top_left_bottom_idx+1].max())\n\n if self.sub_peakY[topY_left_idx] > 800:\n bottom_left_idx = topY_left_idx\n while bottom_left_idx < top_left_bottom_idx:\n if (self.sub_peakY[bottom_left_idx] < 1000) and (self.sub_peakY[bottom_left_idx - 1] >= self.sub_peakY[bottom_left_idx] and self.sub_peakY[bottom_left_idx] <= self.sub_peakY[bottom_left_idx + 1]):\n break\n bottom_left_idx += 1\n\n\n if top_left_bottom_idx - bottom_left_idx >= 50:\n maxV = self.sub_peakY[bottom_left_idx : top_left_bottom_idx+1].max()\n minV = self.sub_peakY[bottom_left_idx: top_left_bottom_idx + 1].min()\n if (maxV-minV) < 800:\n self.sub_peakY[bottom_left_idx:top_left_bottom_idx+1] = [0] * (top_left_bottom_idx - bottom_left_idx + 1)\n\n #right\n top_right_bottom_idx = top_idx\n while top_right_bottom_idx < len(self.sub_peakY) - 1:\n if (self.sub_peakY[top_right_bottom_idx] < 1000) and (self.sub_peakY[top_right_bottom_idx - 1] >= self.sub_peakY[top_right_bottom_idx] and self.sub_peakY[top_right_bottom_idx] <= self.sub_peakY[top_right_bottom_idx + 1]):\n break\n top_right_bottom_idx += 1\n\n topY_right_idx = self.sub_peakY.tolist().index(self.sub_peakY[top_right_bottom_idx:].max())\n if self.sub_peakY[topY_right_idx] > 800:\n bottom_right_idx = topY_right_idx\n while bottom_right_idx >= top_right_bottom_idx:\n if (self.sub_peakY[bottom_right_idx] < 1000) and (self.sub_peakY[bottom_right_idx - 1] >= self.sub_peakY[bottom_right_idx] and self.sub_peakY[bottom_right_idx] <= self.sub_peakY[bottom_right_idx + 1]):\n break\n bottom_right_idx -= 1\n\n if bottom_right_idx - top_right_bottom_idx >= 50:\n max = self.sub_peakY[top_right_bottom_idx: bottom_right_idx + 1].max()\n min = self.sub_peakY[top_right_bottom_idx: bottom_right_idx + 1].min()\n if max - min > 800:\n self.sub_peakY[top_right_bottom_idx:bottom_right_idx + 1] = [0] * (bottom_right_idx - top_right_bottom_idx + 1)\n\n #儲存fit後的Curve\n def saveFitCurve(self, path, fileName): \n for i in range(len(self.rawDataX)):\n if self.rawDataX[i] >= 300:\n x_300 = i\n break\n\n wb = Workbook()\n ws = wb.active\n\n ws.append([\"X\", \"Y\"])\n for i in range(x_300, len(self.rawDataX)):\n ws.append([self.rawDataX[i], self.drawY[i]])\n\n wb.save(path + \"//\" + fileName + '-fitCurve.xlsx')\n\n #儲存每個分峰的最高點對應到的x座標、y座標、最高點對應到fit Curve的值\n def saveTopToCsv(self, path, fileName):\n wb = Workbook()\n ws = wb.active\n\n spaceIndex = 0 \n ws.append([\"X\", \"sub_curve Y\", \"actual Y\"])\n for i in range(spaceIndex, len(self.topX)):\n ws.append([self.topX[i], self.topY_fit[i], self.topY_actual[i]])\n\n wb.save(path + \"//\" + fileName + '.xlsx')\n\n","sub_path":"peakSeparate.py","file_name":"peakSeparate.py","file_ext":"py","file_size_in_byte":12436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"292550511","text":"#!/usr/bin/python3\ndef revNum(s):\n\tcnt = 0\n\tlength=len(s)\n\tfor i in range(length-1):\n\t\tmp = s[i+1:]\n\t\tfor j in range(i+1, length):\n\t\t\tif s[i]>s[j]:\n\t\t\t\tcnt+=1\n\treturn cnt\nn=int(input())\ns=[]\nfor i in range(n):\n\ts.append(int(input()))\nprint(revNum(s))\n","sub_path":"1019.py","file_name":"1019.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"316753427","text":"# -*- coding: utf-8 -*-\n'''\nЗадание 9.4a\n\nЗадача такая же, как и задании 9.4.\nПроверить работу функции надо на примере файла config_r1.txt\n\nОбратите внимание на конфигурационный файл.\nВ нем есть разделы с большей вложенностью, например, разделы:\n* interface Ethernet0/3.100\n* router bgp 100\n\nНадо чтобы функция config_to_dict обрабатывала следующий уровень вложенности.\nПри этом, не привязываясь к конкретным разделам.\nОна должна быть универсальной, и сработать, если это будут другие разделы.\n\nЕсли уровня вложенности два:\n* то команды верхнего уровня будут ключами словаря,\n* а команды подуровней - списками\n\nЕсли уровня вложенности три:\n* самый вложенный уровень должен быть списком,\n* а остальные - словарями.\n\nНа примере interface Ethernet0/3.100:\n\n{'interface Ethernet0/3.100':{\n 'encapsulation dot1Q 100':[],\n 'xconnect 10.2.2.2 12100 encapsulation mpls':\n ['backup peer 10.4.4.4 14100',\n 'backup delay 1 1']}}\n\n\nОграничение: Все задания надо выполнять используя только пройденные темы.\n\n'''\ndef ignore_command(command, ignore):\n '''\n Функция проверяет содержится ли в команде слово из списка ignore.\n command - строка. Команда, которую надо проверить\n ignore - список. Список слов\n\n Возвращает\n * True, если в команде содержится слово из списка ignore\n * False - если нет\n '''\n return any(word in command for word in ignore)\n\ndef config_map(fname,ign_command = False):\n '''\n Функция возвращает словарь, получая имя файла конфигурации\n fname - имя файла\n ignore_command - игнорирование определенных параметров конфигурации из списка\n '''\n f = open(fname)\n maps = {}\n config_all=(f.read()).split('\\n')\n # очистка от ненужных строк\n config = []\n for line in config_all :\n if line.startswith('!') or line =='' : continue\n if ign_command and ignore_command(line,ignore): continue\n config.append(line)\n #Добавление пустого элемента в конец\n config.append('end')\n # Определение уровня максимального уровня вложенности для текущего элемента словаря i\n # Для этого создается список с указанием строк и уровня вложенности\n # и т.д.\n level = []\n #перебор строк\n for i in range(0,len(config),1) :\n #перебор символов в строке до первого символа - не пробела\n l = 0\n while ( config[i][l] == ' ' ) : l = l + 1\n level.append(l)\n\n #тип создаваемого значения - ключ словаря - 0 или добавление в список - 1\n level_type = []\n for i in range(0,len(level)-1,1) :\n key = 0\n j = i\n while ( (level[i] > 0) and (j <= len(level)) and (level[j] >= level[i]) ) :\n if level[j] > level[i] : \n key = 0\n break\n else :\n key = 1\n j = j + 1\n level_type.append(key) \n print(level)\n\n #Преобразование списка в словарь\n for i in range(0,len(level)-1,1) :\n if level[i] == 0 :\n maps[config[i]] = []\n maps_a = config[i]\n #print(maps_a)\n elif level[i] == 1 and level_type[i] == 0 :\n maps[maps_a] = {}\n maps[maps_a][config[i]] = []\n maps_b = config[i]\n #print(maps_a,maps_b)\n elif level[i] == 1 and level_type[i] == 1 :\n maps[maps_a].append(config[i])\n maps_b = config[i]\n #print(maps_a,maps_b,config[i])\n elif level[i] == 2 and level_type[i] == 0 :\n maps[maps_a][maps_b] = {}\n maps[maps_a][maps_b][config[i]] = []\n print(maps_a,maps_b,config[i],0)\n elif level[i] == 2 and level_type[i] == 1 :\n maps[maps_a][maps_b] = []\n maps[maps_a][maps_b].append(config[i])\n print(maps_a,maps_b,config[i],1)\n\n f.close()\n print(config)\n print('interface Ethernet0/3.100',maps['interface Ethernet0/3.100'])\n return maps\n\n# Main\nignore = ['duplex', 'alias', 'Current configuration']\n\n#res = config_map(fname = 'config_r1.txt', ign_command = True)\nres = config_map(fname = 'config_r1.txt', ign_command = False)\n","sub_path":"9/task_9_4a.py","file_name":"task_9_4a.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"301328550","text":"# !/usr/bin/env python3\n# This file is covered by the LICENSE file in the root of this project.\nimport imp\n\nimport __init__ as booger\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Add(nn.Module):\n def __init__(self):\n super(Add, self).__init__()\n\n def forward(self, x, y):\n return x + y\n\n\nclass resBlock_with_add(nn.Module):\n def __init__(self, conv, act, bn):\n super(resBlock_with_add, self).__init__()\n\n self.conv = conv\n self.act = act\n self.bn = bn\n\n def forward(self, x, y):\n res = self.conv(x)\n res = self.act(res)\n res = self.bn(res)\n return res + y\n\n\nclass Trans(nn.Module):\n def __init__(self, trans, trans_act, trans_bn):\n super(Trans, self).__init__()\n self.trans = trans\n self.trans_act = trans_act\n self.trans_bn = trans_bn\n\n def forward(self, x):\n upA = self.trans(x)\n upA = self.trans_act(upA)\n upA = self.trans_bn(upA)\n return upA\n\n\nclass AttentionBlock(nn.Module):\n def __init__(self, f_g, f_l, f_int):\n super(AttentionBlock, self).__init__()\n self.Wg = nn.Sequential(nn.Conv2d(f_g, f_int, kernel_size=1, padding=0, stride=1),\n nn.BatchNorm2d(f_int))\n\n self.Wx = nn.Sequential(nn.Conv2d(f_l, f_int, kernel_size=1, padding=0, stride=1),\n nn.BatchNorm2d(f_int))\n\n self.psi = nn.Sequential(nn.Conv2d(f_int, 1, kernel_size=1, padding=0, stride=1),\n nn.BatchNorm2d(1),\n nn.Sigmoid())\n self.relu = nn.ReLU()\n\n def forward(self, g, x):\n g1 = self.Wg(g)\n x1 = self.Wx(x)\n psi = self.relu(g1 + x1)\n psi = self.psi(psi)\n return x * psi\n\n\nclass ResContextBlock(nn.Module):\n def __init__(self, in_filters, out_filters, kernel_size=(3, 3), stride=1):\n super(ResContextBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_filters, out_filters, kernel_size=(1, 1), stride=stride)\n self.act1 = nn.LeakyReLU()\n\n self.conv2 = nn.Conv2d(in_filters, out_filters, kernel_size, padding=1)\n self.act2 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm2d(out_filters)\n\n self.conv3 = nn.Conv2d(out_filters, out_filters, kernel_size, padding=1)\n self.act3 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm2d(out_filters)\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut = self.act1(shortcut)\n\n resA = self.conv2(x)\n resA = self.act2(resA)\n resA = self.bn1(resA)\n\n resA = self.conv3(resA)\n resA = self.act3(resA)\n resA = self.bn2(resA)\n return resA + shortcut\n\n\nclass ResBlock(nn.Module):\n def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3), stride=1,\n pooling=True, drop_out=True):\n super(ResBlock, self).__init__()\n self.pooling = pooling\n self.drop_out = drop_out\n self.conv1 = nn.Conv2d(in_filters, out_filters, kernel_size=(1, 1), stride=stride)\n self.act1 = nn.LeakyReLU()\n\n self.conv2 = nn.Conv2d(in_filters, out_filters, kernel_size=kernel_size, padding=1)\n self.act2 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm2d(out_filters)\n\n self.conv3 = nn.Conv2d(out_filters, out_filters, kernel_size=kernel_size, padding=1)\n self.act3 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm2d(out_filters)\n\n if pooling:\n self.dropout = nn.Dropout2d(p=dropout_rate)\n self.pool = nn.AvgPool2d(kernel_size=kernel_size, stride=2, padding=1)\n else:\n self.dropout = nn.Dropout2d(p=dropout_rate)\n\n def forward(self, x):\n shortcut = self.conv1(x)\n shortcut = self.act1(shortcut)\n\n resA = self.conv2(x)\n resA = self.act2(resA)\n resA = self.bn1(resA)\n\n resA = self.conv3(resA)\n resA = self.act3(resA)\n resA = self.bn2(resA)\n resA = shortcut + resA\n\n if self.pooling:\n if self.drop_out:\n resB = self.dropout(resA)\n else:\n resB = resA\n resB = self.pool(resB)\n\n return resB, resA\n else:\n if self.drop_out:\n resB = self.dropout(resA)\n else:\n resB = resA\n return resB\n\n\nclass UpBlock(nn.Module):\n def __init__(self, in_filters, out_filters, dropout_rate, kernel_size=(3, 3),drop_out=True):\n super(UpBlock, self).__init__()\n self.drop_out = drop_out\n self.trans = nn.ConvTranspose2d(in_filters, out_filters, kernel_size, stride=(2, 2), padding=1)\n self.trans_act = nn.LeakyReLU()\n self.trans_bn = nn.BatchNorm2d(out_filters)\n\n self.dropout1 = nn.Dropout2d(p=dropout_rate)\n self.dropout2 = nn.Dropout2d(p=dropout_rate)\n\n self.conv1 = nn.Conv2d(out_filters, out_filters, kernel_size, padding=1)\n self.act1 = nn.LeakyReLU()\n self.bn1 = nn.BatchNorm2d(out_filters)\n\n self.conv2 = nn.Conv2d(out_filters, out_filters, kernel_size, padding=1)\n self.act2 = nn.LeakyReLU()\n self.bn2 = nn.BatchNorm2d(out_filters)\n\n self.conv3 = nn.Conv2d(out_filters, out_filters, kernel_size, padding=1)\n self.act3 = nn.LeakyReLU()\n self.bn3 = nn.BatchNorm2d(out_filters)\n self.dropout3 = nn.Dropout2d(p=dropout_rate)\n\n def forward(self, x, skip):\n upA = self.trans(x)\n if upA.shape != skip.shape:\n upA = F.pad(upA, (0, 1, 0, 1), mode='replicate')\n upA = self.trans_act(upA)\n upA = self.trans_bn(upA)\n if self.drop_out:\n upA = self.dropout1(upA)\n upB = upA + skip\n if self.drop_out:\n upB = self.dropout2(upB)\n\n upE = self.conv1(upB)\n upE = self.act1(upE)\n upE = self.bn1(upE)\n\n upE = self.conv2(upE)\n upE = self.act2(upE)\n upE = self.bn2(upE)\n\n upE = self.conv3(upE)\n upE = self.act3(upE)\n upE = self.bn3(upE)\n if self.drop_out:\n upE = self.dropout3(upE)\n\n return upE\n\n\nclass SalsaNet(nn.Module):\n def __init__(self, ARCH, nclasses, path=None, path_append=\"\", strict=False):\n super(SalsaNet, self).__init__()\n self.ARCH = ARCH\n self.nclasses = nclasses\n self.path = path\n self.path_append = path_append\n self.strict = False\n\n self.downCntx = ResContextBlock(5, 32)\n self.resBlock1 = ResBlock(32, 32, 0.2, pooling=True, drop_out=False)\n self.resBlock2 = ResBlock(32, 2 * 32, 0.2, pooling=True)\n self.resBlock3 = ResBlock(2 * 32, 4 * 32, 0.2, pooling=True)\n self.resBlock4 = ResBlock(4 * 32, 8 * 32, 0.2, pooling=True)\n self.resBlock5 = ResBlock(8 * 32, 16 * 32, 0.2, pooling=True)\n self.resBlock6 = ResBlock(16 * 32, 16 * 32, 0.2, pooling=False)\n\n self.upBlock1 = UpBlock(16 * 32, 16 * 32, 0.2)\n self.upBlock2 = UpBlock(16 * 32, 8 * 32, 0.2)\n self.upBlock3 = UpBlock(8 * 32, 4 * 32, 0.2)\n self.upBlock4 = UpBlock(4 * 32, 2 * 32, 0.2)\n self.upBlock5 = UpBlock(2 * 32, 32, 0.2, drop_out=False)\n\n self.logits = nn.Conv2d(32, nclasses, kernel_size=(1, 1))\n\n def forward(self, x):\n downCntx = self.downCntx(x)\n down0c, down0b = self.resBlock1(downCntx)\n down1c, down1b = self.resBlock2(down0c)\n down2c, down2b = self.resBlock3(down1c)\n down3c, down3b = self.resBlock4(down2c)\n down4c, down4b = self.resBlock5(down3c)\n down5b = self.resBlock6(down4c)\n\n up4e = self.upBlock1(down5b, down4b)\n up3e = self.upBlock2(up4e, down3b)\n up2e = self.upBlock3(up3e, down2b)\n up1e = self.upBlock4(up2e, down1b)\n up0e = self.upBlock5(up1e, down0b)\n\n logits = self.logits(up0e)\n logits = F.softmax(logits, dim=1)\n return logits","sub_path":"train/tasks/semantic/modules/segmentator.py","file_name":"segmentator.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"499375633","text":"from django.contrib.auth.decorators import login_required\nfrom django.contrib import messages\nimport requests\nfrom allauth.socialaccount.models import SocialAccount\nfrom django.shortcuts import render, get_object_or_404, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import RegForm, UserUpdateForm, ProfileUpdateForm\n\ndef gmail_profile(request, user_signed_up):\n if sociallogin.account.provider == 'google':\n user_data = user.socialaccount_set.filter(provider='google')[0].extra_data\n\n email = user_data['email']\n first_name = user_data['first_name']\n last_name = user_data['last_name']\n\n user.email = email\n user.save()\n\n context = {\n 'first_name':first_name,\n 'last_name':last_name,\n 'pic':picture,\n }\n\ndef register(request):\n if request.method == 'POST':\n form = RegForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n email = form.cleaned_data.get('email')\n messages.success(request, f'Account created successfully for {username}')\n return redirect('blog-home')\n else :\n form = RegForm()\n return render(request, 'users/register.html', {'form' : form})\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user)\n p_form = ProfileUpdateForm(request.POST,\n request.FILES,\n instance=request.user.profile)\n\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n\n else:\n u_form = UserUpdateForm(instance=request.user)\n p_form = ProfileUpdateForm(instance=request.user.profile)\n\n context = {\n 'u_form' : u_form,\n 'p_form' : p_form\n }\n return render(request, 'users/profile.html', context)\n\n@login_required\ndef add_comment(request, pk):\n post = get_object_or_404(Post, pk=pk)\n if request.method == \"POST\":\n form = CommentForm(request.POST)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.post = post\n comment.author = request.user\n comment.save()\n return redirect('post_detail', pk=post.pk)\n else:\n form = CommentForm(instance=post)\n\n return render(request, 'blog/add_comment.html', {'form':form, 'post':post})\n","sub_path":"django_test/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"354885713","text":"import time\n\nfrom boaapi.boa_client import BoaClient\nfrom decouple import config\n\nfrom backend.discoveryrunner import DiscoveryRunner as DR\n\n\nclass DiscoveryRunner(DR):\n template_start = \"\"\"o: output collection of string;\n\nfiltered := true;\nsnapshot: array of ChangedFile;\n\n# ensure there is at least 1 source file in the snapshot\nvisit(input, visitor {\n before node: CodeRepository -> {\n snapshot = getsnapshot(node, \"SOURCE_\");\n if (len(snapshot) > 0)\n filtered = false;\n stop;\n }\n});\n\n\"\"\"\n\n template_end = \"\"\"if (!filtered)\n o << input.project_url;\"\"\"\n\n def translate_filter(self, filtr):\n s = 'if (!filtered) {\\n'\n\n #\n if filtr.pfilter.flter.name == 'Minimum number of commits':\n s += \"\"\" if (len(input.code_repositories) < 1 || len(input.code_repositories[0].revisions) < {})\n filtered = true;\n\"\"\".format(filtr.value)\n\n #\n elif filtr.pfilter.flter.name == 'Maximum number of commits':\n s += \"\"\" if (len(input.code_repositories) < 1 || len(input.code_repositories[0].revisions) > {})\n filtered = true;\n\"\"\".format(filtr.value)\n\n #\n elif filtr.pfilter.flter.name == 'Minimum number of source files':\n s += \"\"\" min_file_count := 0;\n foreach (i: int; def(snapshot[i]))\n if (iskind(\"SOURCE_\", snapshot[i].kind))\n min_file_count = min_file_count + 1;\n if (min_file_count < {})\n filtered = true;\n\"\"\".format(filtr.value)\n\n #\n elif filtr.pfilter.flter.name == 'Maximum number of source files':\n s += \"\"\" max_file_count := 0;\n foreach (i: int; def(snapshot[i]))\n if (iskind(\"SOURCE_\", snapshot[i].kind))\n max_file_count = max_file_count + 1;\n if (max_file_count > {})\n filtered = true;\n\"\"\".format(filtr.value)\n\n #\n elif filtr.pfilter.flter.name == 'Minimum number of committers':\n s += \"\"\" min_committers: set of string;\n visit(input, visitor {{\n before n: Revision -> add(min_committers, n.committer.username);\n }});\n if (len(min_committers) < {})\n filtered = true;\n\"\"\".format(filtr.value)\n\n #\n elif filtr.pfilter.flter.name == 'Maximum number of committers':\n s += \"\"\" max_committers: set of string;\n visit(input, visitor {{\n before n: Revision -> add(max_committers, n.committer.username);\n }});\n if (len(max_committers) > {})\n filtered = true;\n\"\"\".format(filtr.value)\n # failsafe\n else:\n return ''\n\n return s + '}\\n\\n'\n\n def build_query(self, flters):\n query = '# PAClab project selection\\n' + self.template_start\n for f in flters:\n query += self.translate_filter(f)\n return query + self.template_end\n\n def run(self):\n if self.verbosity >= 1:\n print(' -> boa backend processing: ' + self.selector.slug)\n\n query = self.build_query(self.all_filters())\n if self.verbosity >= 3:\n print(query)\n\n client = BoaClient()\n client.login(config('BOA_USER'), config('BOA_PW'))\n\n job = client.query(query, client.get_dataset('2019 October/GitHub'))\n if self.verbosity >= 2:\n print(' -> boa job: http://boa.cs.iastate.edu/boa/index.php?q=boa/job/' + str(job.id))\n\n for f in self.filters():\n self.filter_start(f)\n\n while job.compiler_status == 'Running' or job.exec_status == 'Running' or job.compiler_status == 'Waiting' or job.exec_status == 'Waiting':\n job.refresh()\n time.sleep(3)\n\n if job.compiler_status == 'Error':\n print('job ' + str(job.id) + ' had compile error')\n elif job.exec_status == 'Error':\n print('job ' + str(job.id) + ' had exec error')\n else:\n try:\n output = job.output().decode('utf-8')\n\n for line in output.splitlines(False):\n self.discovered_project(line[6:])\n\n if self.verbosity >= 3:\n print(\" -> finished processing boa job\")\n except:\n pass\n\n self.done()\n\n client.close()\n\n def debug(self):\n print(self.build_query(self.all_filters()))\n","sub_path":"boa_backend/discoveryrunner.py","file_name":"discoveryrunner.py","file_ext":"py","file_size_in_byte":4310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"107480160","text":"# Write a class to hold player information, e.g. what room they are in\n# currently.\nfrom room import Room\nclass Player:\n def __init__(self, name, current_room, inventory):\n self.name = name\n self.current_room = current_room\n self.inventory = inventory\n self.maxhealth = 100\n self.health = self.maxhealth\n self.mp = 100\n self.attack = 10\n self.status_effects = []\n def __repr__(self):\n return \"')\n # self.inventory.remove(item)\n # Room['items'].append(item)\n ","sub_path":"src/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"144483587","text":"from unittest import TestCase\n\nfrom hyperquant.api import item_format_by_endpoint, Endpoint, Direction, convert_items_obj_to_list, \\\n convert_items_dict_to_list, convert_items_list_to_dict, convert_items_obj_to_dict, ParamName\nfrom hyperquant.clients import Trade, ItemObject\n\n\nclass TestConverting(TestCase):\n endpoint = None\n item_format = None\n\n obj_items = None\n list_items = None\n dict_items = None\n \n obj_item_short = None\n list_item_short = None\n dict_item_short = None\n\n def setUp(self):\n super().setUp()\n\n if not self.endpoint:\n self.skipTest(\"Base test\")\n\n self.item_format = item_format_by_endpoint[self.endpoint]\n\n def test_convert_items_obj_to_list(self):\n # Items to items\n self._test_convert_items(self.obj_items, self.list_items, convert_items_obj_to_list)\n # Item to item\n self._test_convert_items(self.obj_items[0], self.list_items[0], convert_items_obj_to_list)\n # Check for items which are shorter than item_format (i.e. item is ItemObject, and item_format is for Trade)\n self._test_convert_items(self.obj_item_short, self.list_item_short, convert_items_obj_to_list)\n\n # Empty to empty, None to None\n self._test_convert_items([], [], convert_items_obj_to_list)\n self._test_convert_items([None, None], [None, None], convert_items_obj_to_list)\n self._test_convert_items(None, None, convert_items_obj_to_list)\n\n def test_convert_items_dict_to_list(self):\n self._test_convert_items(self.dict_items, self.list_items, convert_items_dict_to_list)\n self._test_convert_items(self.dict_items[0], self.list_items[0], convert_items_dict_to_list)\n self._test_convert_items(self.dict_item_short, self.list_item_short, convert_items_dict_to_list)\n\n self._test_convert_items([], [], convert_items_dict_to_list)\n self._test_convert_items([None, None], [None, None], convert_items_dict_to_list)\n self._test_convert_items(None, None, convert_items_dict_to_list)\n\n def test_convert_items_list_to_dict(self):\n self._test_convert_items(self.list_items, self.dict_items, convert_items_list_to_dict)\n self._test_convert_items(self.list_items[0], self.dict_items[0], convert_items_list_to_dict)\n self._test_convert_items(self.list_item_short, self.dict_item_short, convert_items_list_to_dict)\n\n self._test_convert_items([], [], convert_items_list_to_dict)\n self._test_convert_items([None, None], [None, None], convert_items_list_to_dict)\n self._test_convert_items(None, None, convert_items_list_to_dict)\n\n def test_convert_items_obj_to_dict(self):\n self._test_convert_items(self.obj_items, self.dict_items, convert_items_obj_to_dict)\n self._test_convert_items(self.obj_items[0], self.dict_items[0], convert_items_obj_to_dict)\n self._test_convert_items(self.obj_item_short, self.dict_item_short, convert_items_obj_to_dict)\n\n self._test_convert_items([], [], convert_items_obj_to_dict)\n self._test_convert_items([None, None], [None, None], convert_items_obj_to_dict)\n self._test_convert_items(None, None, convert_items_obj_to_dict)\n\n def _test_convert_items(self, items, expected, fun):\n result = fun(items, self.item_format)\n\n self.assertEqual(expected, result)\n\n\nclass TestConvertingTrade(TestConverting):\n endpoint = Endpoint.TRADE\n\n obj_item1 = Trade()\n obj_item1.platform_id = None # None needed to test convert_items_list_to_dict() with 1 item in params\n obj_item1.symbol = \"ETHUSD\"\n obj_item1.timestamp = 143423531\n obj_item1.item_id = \"14121214\"\n obj_item1.price = \"23424546543.3\"\n obj_item1.amount = \"1110.0034\"\n obj_item1.direction = Direction.SELL\n obj_item2 = Trade()\n obj_item2.platform_id = 2\n obj_item2.symbol = \"BNBUSD\"\n obj_item2.timestamp = 143423537\n obj_item2.item_id = 15121215\n obj_item2.price = 23.235656723\n obj_item2.amount = \"0.0034345452\"\n obj_item2.direction = Direction.BUY\n\n obj_items = [obj_item1, obj_item2]\n list_items = [[None, \"ETHUSD\", 143423531, \"14121214\", \"23424546543.3\", \"1110.0034\", Direction.SELL],\n [2, \"BNBUSD\", 143423537, 15121215, 23.235656723, \"0.0034345452\", Direction.BUY]]\n dict_items = [{ParamName.PLATFORM_ID: None, ParamName.SYMBOL: \"ETHUSD\",\n ParamName.TIMESTAMP: 143423531, ParamName.ITEM_ID: \"14121214\",\n ParamName.PRICE: \"23424546543.3\", ParamName.AMOUNT: \"1110.0034\", ParamName.DIRECTION: 1},\n {ParamName.PLATFORM_ID: 2, ParamName.SYMBOL: \"BNBUSD\",\n ParamName.TIMESTAMP: 143423537, ParamName.ITEM_ID: 15121215,\n ParamName.PRICE: 23.235656723, ParamName.AMOUNT: \"0.0034345452\", ParamName.DIRECTION: 2}]\n\n obj_item_short = ItemObject()\n obj_item_short.platform_id = None # None needed to test convert_items_list_to_dict() with 1 item in params\n obj_item_short.symbol = \"ETHUSD\"\n obj_item_short.timestamp = 143423531\n obj_item_short.item_id = \"14121214\"\n list_item_short = [None, \"ETHUSD\", 143423531, \"14121214\"]\n dict_item_short = {ParamName.PLATFORM_ID: None, ParamName.SYMBOL: \"ETHUSD\",\n ParamName.TIMESTAMP: 143423531, ParamName.ITEM_ID: \"14121214\"}\n","sub_path":"hyperquant/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":5304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"539402832","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nj=np.complex(0,1)\na=input(\"enter length of input\")\nx1=[]\nfor b in range(0,a):\n\tc=input(\"enter value\")\n\tx1.append(c)\na1=input(\"enter length of input\")\nx2=[]\nfor b1 in range(0,a1):\n\tc1=input(\"enter value\")\n\tx2.append(c1)\ndef DFT(x):\n\tN=1000\n\tw=np.linspace(-np.pi,np.pi,N)\t\n\ty=[]\n\tfor n in range(0,N):\n\t\tw_t=w[n]\n\t\to=0\t\t\t\n\t\tfor k in range(0,a):\n\t\t\to+=(x[k]*np.exp(-2*j*k*n*np.pi/N))\n\t\ty.append(o)\n\treturn y\ny1=DFT(x1)\ny2=DFT(x2)\ny4=[]\nN=1000\nfor e in range(0,N):\n\tf=y1[e]*y2[e]\n\ty4.append(f)\nprint(y4)\nplt.subplot(321)\nplt.plot(x1)\nplt.subplot(322)\nplt.plot(y1)\nplt.subplot(323)\nplt.plot(x2)\nplt.subplot(324)\nplt.plot(y2)\nplt.subplot(325)\nplt.plot(y4)\nplt.show()\n","sub_path":"lab7-cir_convo.py","file_name":"lab7-cir_convo.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"139704408","text":"from CSP import CSP\n\nclass ResourceCSP(CSP):\n def __init__(self, components, constraints):\n\n initial_domains = []\n #give domains of 1 - 10 which will restrain the percentage values to multiples of ten\n for each in range(10):\n if each != 0:\n initial_domains.append(each)\n domains = {}\n\n\n result_constraints = {}\n\n for each_c in components:\n domains[each_c] = set()\n\n for each_d in initial_domains:\n domains[each_c].add(each_d)\n\n neighbors = {}\n #create neighbors connecting all nodes to each other as they all are connected to a single financial account\n for each in components:\n neighbors[each] = set()\n for other in components:\n if each != other:\n neighbors[each].add(other)\n\n #implement constraints from the ratios given from the user\n for each_n1 in neighbors.keys():\n for each_n2 in neighbors[each_n1]:\n result_constraints[(each_n1, each_n2)] = set()\n for each_d1 in domains[each_n1]:\n for each_d2 in domains[each_n2]:\n ratio = constraints[(each_n1, each_n2)]\n less_mult = ratio[1]\n great_mult = ratio[0]\n if less_mult > great_mult:\n if (each_d1*less_mult) <= (each_d2*great_mult):\n result_constraints[(each_n1, each_n2)].add((each_d1, each_d2))\n elif less_mult < great_mult:\n if (each_d1*less_mult) >= (each_d2*great_mult):\n result_constraints[(each_n1, each_n2)].add((each_d1, each_d2))\n else:\n if (each_d1*less_mult) == (each_d2*great_mult):\n result_constraints[(each_n1, each_n2)].add((each_d1, each_d2))\n\n\n #initialize in CSP parent class\n super().__init__(components, domains, result_constraints, neighbors)\n\n result = super().BacktrackSearch()\n #add nice percentages to the ratio values\n for key,value in result.items():\n current_value = result[key]\n result[key] = (str(current_value * 10) + \"%\")\n print(result)\n\n\n\n\ncomponents = [\"gas\", \"food\", \"clothing\"]\nconstraints = {(\"gas\",\"food\"): [1,2], (\"gas\",\"clothing\"): [3,1], (\"food\",\"gas\"): [2,1], (\"food\",\"clothing\"): [3,1], (\"clothing\",\"food\"): [1,3], (\"clothing\",\"gas\"): [1,3]}\n\nmap = ResourceCSP(components, constraints)\n","sub_path":"ResourceCSP.py","file_name":"ResourceCSP.py","file_ext":"py","file_size_in_byte":2618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"298986195","text":"# -*- coding: utf-8 -*-\nimport pandas as pd\nimport numpy as np\nfrom pandas import DataFrame,Series\nfrom sklearn.cluster import KMeans\nfrom sklearn.cluster import Birch\n \n#读取文件\ndatafile = 'MachineLearning\\\\DataSet\\\\go_track_trackspoints.csv'\noutfile = 'out.csv'\ndata = pd.read_csv(datafile,usecols=[\"latitude\",\"longitude\"])\nd = DataFrame(data)\nd.head()\n# ----------------------------------聚类-------------------------------------------\n\nmod = KMeans(n_clusters=3, n_jobs = 4, max_iter = 500)#聚成3类数据,并发数为4,最大循环次数为500\nmod.fit_predict(d)#y_pred表示聚类的结果\n \n#聚成3类数据,统计每个聚类下的数据量,并且求出他们的中心\nr1 = pd.Series(mod.labels_).value_counts()\nr2 = pd.DataFrame(mod.cluster_centers_)\nr = pd.concat([r2, r1], axis = 1)\nr.columns = list(d.columns) + [\"Clustering\"]\nprint(r)\n \n#给每一条数据标注上被分为哪一类\nr = pd.concat([d, pd.Series(mod.labels_, index = d.index)], axis = 1)\nr.columns = list(d.columns) + [\"Clustering\"]\nprint(r.head())\nr.to_csv(outfile)#如果需要保存到本地,就写上这一列\n\n# ------------------------------------可视化过程------------------------------------------\n\nfrom sklearn.manifold import TSNE\n \nts = TSNE()\nts.fit_transform(r)\nts = pd.DataFrame(ts.embedding_, index = r.index)\n \nimport matplotlib.pyplot as plt\n \na = ts[r[\"Clustering\"] == 0]\nplt.plot(a[0], a[1], 'r.')\na = ts[r[\"Clustering\"] == 1]\nplt.plot(a[0], a[1], 'go')\na = ts[r[\"Clustering\"] == 2]\nplt.plot(a[0], a[1], 'b*')\nplt.show()","sub_path":"MachineLearning/Clustering/K-Means.py","file_name":"K-Means.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"24009413","text":"import os\nimport sys\nimport cv2\nimport time\nimport random\nsys.path.append(r\"/home/shushi/projects/3face/face_api/src\")\nfrom common.api_faceplusplus import FacePlusPlus,FaceSet,Face\nfrom utils.base import draw_face_rectangle\n\ndef MyCompare(path1,path2,output_path1,output_path2,num,confidence,result_file):\n files1 = os.listdir(path1)\n files2 = os.listdir(path2)\n num1 = len(files1)\n num2 = len(files2)\n j = 0\n while j < num:\n ran1 = random.randint(0, num1 - 1)\n ran2 = random.randint(0, num2 - 1)\n\n file1 = path1 + files1[ran1]\n file_outpath1 = output_path1 + files1[ran1]\n file2 = path2 + files2[ran2]\n file_outpath2 = output_path2 + files2[ran2]\n time.sleep(1)\n img1 = cv2.imread(file1)\n img2 = cv2.imread(file2)\n result = faceplus.compare(file1, file2)\n try:\n face1 = result['faces1']\n draw_face_rectangle(face1, img1, file_outpath1)\n face2 = result['faces2']\n draw_face_rectangle(face2, img2, file_outpath2)\n\n if len(face1)!=0 and len(face2)!=0:\n buf = file1 + \" vs \" + file2 + \"\\n\" + \"confidence:\" + str(result[\"confidence\"]) + \" thresholds:\" + str(\n result[\"thresholds\"]) + \"\\n\"\n confidence += result[\"confidence\"]\n result_file.writelines(buf)\n j += 1\n elif len(face1)==0:\n buf = file1 + \" vs \" + file2 + \"\\n\" + \"no face :\" + str(file1) + \"\\n\"\n result_file.writelines(buf)\n elif len(face2)==0:\n buf = file1 + \" vs \" + file2 + \"\\n\" + \"no face :\" + str(file2) + \"\\n\"\n result_file.writelines(buf)\n except:\n print(\"error\")\n continue\n result_file.writelines(\"\\n\\n\\n\")\n return confidence\n\n\npath1 = '/home/shushi/Pic/ZhuJiaWen/'\npath2 = '/home/shushi/Pic/ZhuJiaYi/'\noutput_path = '../result/compare_twins/faceplusplus/'\noutput_path1 = '../result/compare_twins/faceplusplus/ZhuJiaWen/'\noutput_path2 = '../result/compare_twins/faceplusplus/ZhuJiaYi/'\n\nif not os.path.exists(output_path1):\n os.makedirs(output_path1)\nif not os.path.exists(output_path2):\n os.makedirs(output_path2)\n\nfaceplus = FacePlusPlus()\nfile_name = output_path+\"compare.txt\"\nresult_file = open(file_name, 'a+')\n\nnum = 100\nconfidence1 = 0.0\nconfidence2 = 0.0\n\nconfidence1 = MyCompare(path1,path1,output_path1,output_path2,num,confidence1,result_file)\nconfidence1 = MyCompare(path2,path2,output_path1,output_path2,num,confidence1,result_file)\nconfidence2 = MyCompare(path1,path2,output_path1,output_path2,num*2,confidence2,result_file)\n\nprint(\"confidence1: \"+str(confidence1/num/2))\nprint(\"confidence2: \"+str(confidence2/num/2))\nresult_file.flush()\nresult_file.close()","sub_path":"src/test_compare_twins/test_faceplusplus.py","file_name":"test_faceplusplus.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"175285558","text":"import requests\nimport json\ndef parsuj(file):\n atoms = []\n chain_symbol = \"A\"\n atomy = {}\n sekwencje = {}\n sekwencja = ''\n seq_id = 1\n do_wyslania_chain = []\n for line in file:\n list = line.split()\n id =list[0]\n if id == 'ATOM':\n chain = list[4]\n seq = list[5]\n if chain_symbol == chain:\n atom = {'symbol':list[11],'x_cor':list[6],'y_cor':list[7],'z_cor':list[8]}\n atoms.append(atom)\n if seq_id != seq:\n sekwencja = sekwencja + list[3]\n seq_id = seq\n else:\n atomy[chain_symbol] = {'atoms':atoms}\n sekwencje = {'chain_symbol':chain_symbol,\n 'sequence': sekwencja}\n do_wyslania_chain.append(sekwencje)\n chain_symbol = chain\n atoms = []\n sekwencje = {}\n sekwencja = ''\n elif id=='ENDMDL':\n atomy[chain_symbol] = {'atoms':atoms}\n sekwencje = {'chain_symbol':chain_symbol,\n 'sequence': sekwencja}\n do_wyslania_chain.append(sekwencje)\n break\n print(atomy.keys())\n send_chain = {'chains':do_wyslania_chain}\n\n return {'atoms':atomy, \"chains\":send_chain}\n","sub_path":"front/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"212820628","text":"#coding: utf-8\nimport csv\nimport ujson\nimport os\nimport threading\nimport shutil\nimport xlrd\nimport xlwt\n\n\nfrom utils.FormatTools import format_telnum_BQC\n\n\ndef get_web_out_tel(fn='data_input/web_out_tels.csv'):\n ret = {}\n with open(fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n for line in cr:\n # 序号,名称,区号,查得号码,查得地址,url,状态\n name = line[1]\n tel_part = line[3].strip()\n if tel_part:\n tels = line[3].split(';')\n else:\n tels = []\n address = line[4]\n url = line[5]\n status = line[6]\n ret[name] = (tels, address, url, status)\n print(\"get web out tel done\", len(ret))\n return ret\n\n\ndef load_dict(dn):\n with open(dn, 'r', encoding='utf8') as fh:\n result = ujson.load(fh)\n return result\n\n\ndef get_web_out_names(fn='data_input/web_out_names.csv'):\n ret = {}\n with open(fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n cr.__next__()\n for line in cr:\n # 序号,电话,查得名称,查得地址,url\n tel = line[1]\n names = line[2]\n address = line[3]\n url = line[4]\n ret[tel] = [names, address, url]\n print(\"get web out name done\", len(ret))\n return ret\n\n\ndef get_es_out_tel(fn='data_input/match_name_for_tel_ret.csv'):\n ret = {}\n with open(fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n cr.__next__()\n for line in cr:\n # no,ori_name,top_0\n name = line[1]\n tmp = line[2].split('|')\n tel_str = tmp[1].strip()\n if tel_str:\n tel_list = tmp[1].split(',')\n else:\n tel_list = []\n address = tmp[2]\n if address == '无':\n address = ''\n tel_list = [t for t in tel_list if t != '无' ]\n ret[name] = (tel_list, address)\n print(\"get es out tel done\", len(ret))\n return ret\n\n\ndef get_es_out_names(fn='data_input/term_tel_for_name_ret.csv'):\n ret = {}\n with open(fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n cr.__next__()\n for line in cr:\n #no,tel,name,address\n tel = line[1]\n name = line[2]\n if name == '无':\n name = ''\n address = line[3]\n if address == '无':\n address = ''\n ret[tel] = (name, address)\n print(\"get es out name done\", len(ret))\n return ret\n\n\ndef get_md_out_tel(fn='data_input/telfile.csv'):\n ret = {}\n with open(fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n cr.__next__()\n for line in cr:\n #\"编号\",\"名称\",\"区号\",\"查得号码\",\"查得地址\"\n name = line[1]\n tels_str = line[3].strip()\n if tels_str:\n tels = line[3].split(',')\n else:\n tels = []\n address = line[4].split(',')[0]\n tels = [t for t in tels if t]\n ret[name] = (tels, address)\n print(\"get md out tel done\", len(ret))\n return ret\n\n\ndef get_md_out_names(fn='data_input/namefile.csv'):\n ret = {}\n with open(fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n cr.__next__()\n for line in cr:\n #\"编号\",\"电话\",\"结果名称\",\"查得地址\"\n tel = line[1]\n name = line[2]\n address = line[3]\n ret[tel] = (name, address)\n print(\"get md out name done\", len(ret))\n return ret\n\n\ndef split_data_1(data_all: list, process_size: int):\n part_size = int(len(data_all) / process_size)\n parts = []\n e = 0\n for i in range(process_size):\n s = i * part_size\n e = (i + 1) * part_size\n parts.append(data_all[s:e])\n left = data_all[e:]\n if left:\n j = 0\n while j < len(left):\n parts[j].append(left[j])\n j += 1\n return parts\n\n\ndef split_data_by_n(data_list, part_per_n):\n ret = []\n i = 0\n tlen = len(data_list)\n while i < tlen:\n k = part_per_n\n t = []\n while k and i < tlen:\n t.append(data_list[i])\n k -= 1\n i += 1\n ret.append(t)\n return ret\n\ndef write_csv(name, header, data):\n with open(name, 'w', encoding='utf8', newline='') as wh:\n cw = csv.writer(wh)\n cw.writerow(header)\n cw.writerows(data)\n\n\ndef tel_save_to_litter(header, data, n=15, data_dir='', prefix='tel'):\n data_parts = split_data_1(data, n)\n i = 1\n t_list = []\n for data in data_parts:\n t = threading.Thread(target=write_csv, args=(f\"{data_dir}/{prefix}_split_{i}.csv\", header, data))\n t.start()\n i += 1\n for t in t_list:\n t.join()\n\n\ndef web_only_save_to_litter(header, data, n=5, data_dir='', prefix='web_only'):\n data_parts = split_data_1(data, n)\n i = 1\n t_list = []\n for data in data_parts:\n t = threading.Thread(target=write_csv, args=(f\"{data_dir}/{prefix}_split_{i}.csv\", header, data))\n t.start()\n i += 1\n for t in t_list:\n t.join()\n\n\n\ndef name_save_to_litter(header, data, n=60, data_dir='', prefix='name'):\n def split_to_18(data3, pn=18):\n data_parts = split_data_1(data3, pn)\n i = 1\n t_list = []\n for d in data_parts:\n t = threading.Thread(target=write_csv, args=(f\"{data_dir}/{prefix}_sp{pn}_{i}.csv\", header, d))\n t.start()\n i += 1\n for t in t_list:\n t.join()\n\n def split_to_14(data3, pn=18):\n data_parts = split_data_1(data3, pn)\n i = 20\n t_list = []\n for d in data_parts:\n t = threading.Thread(target=write_csv, args=(f\"{data_dir}/{prefix}_sp{pn}_{i}.csv\", header, d))\n t.start()\n i += 1\n for t in t_list:\n t.join()\n def split_by_80(data2):\n i = 1\n t_list = []\n data_parts = split_data_by_n(data2, 80)\n for d in data_parts:\n t = threading.Thread(target=write_csv, args=(f\"{data_dir}/{prefix}_sp_{i}.csv\", header, d))\n t.start()\n i += 1\n for t in t_list:\n t.join()\n\n all_d = len(data)\n split_len = int(all_d / 3)\n if split_len == 0:\n split_len = 1\n data3 = data[:2*split_len]\n data2 = data[2*split_len:]\n split_to_18(data3, pn=19)\n split_to_14(data2, pn=13)\n\n\n\n# 名称差号码结果融合\ndef format_tel_output(ori_fn='data_out/input_name.csv', merge_out=\"data_out/tel_merge_out.csv\", web_only_file='data_out/tel_web_only_file.csv'):\n header = None\n name_keys = []\n all_dict_have = []\n correct_name_dict = load_dict('dict/name_dict.json')\n with open(ori_fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n header = cr.__next__()\n header.extend(('查得电话','查得地址','url', '状态'))\n for l in cr:\n # 序号,名称,区号\n name_keys.append(l)\n print('read ori input name done', len(name_keys))\n web_tel_data = get_web_out_tel()\n es_tel_data = get_es_out_tel()\n md_tel_data= get_md_out_tel()\n web_only = []\n all_miss = []\n out_30_dir = 'tel_out30'\n if os.path.exists(out_30_dir):\n shutil.rmtree(out_30_dir)\n os.makedirs(f'./{out_30_dir}')\n print('mkdir done')\n finally_ret = []\n for xh, name, area_code in name_keys:\n tels = []\n md_have = False\n web_have = False\n es_have = False\n dict_have = False\n tmp_web_only = None\n addr, url, status = '', '', '',\n if name in md_tel_data:\n vv = md_tel_data[name]\n md_tel_list = vv[0]\n if md_tel_list:\n md_have = True\n tels.extend(md_tel_list)\n addr = vv[1]\n if name in es_tel_data:\n vv = es_tel_data[name]\n es_tel_list = vv[0]\n if es_tel_list:\n es_have = True\n tels.extend(es_tel_list)\n es_addr = vv[1]\n if not addr and es_addr:\n addr = es_addr\n #web\n if name in web_tel_data:\n vv = web_tel_data[name]\n #tels, address, url, status\n web_tels = vv[0]\n if web_tels:\n web_have = True\n tels.extend(web_tels)\n web_address =vv[1]\n web_url = vv[2]\n web_status = vv[3]\n # web_tels = list(set(web_tels))\n # tmp_web_only = (xh, name, area_code, ';'.join(web_tels), web_address, web_url, web_status)\n if not addr and web_address:\n addr = web_address\n url = web_url\n status = web_status\n\n # dict deal\n if name in correct_name_dict:\n tels = [correct_name_dict[name]]\n all_dict_have.append(name)\n dict_have = True\n\n if web_have and not es_have and not md_have and not dict_have:\n # web_only.append(tmp_web_only)\n pass\n else:\n status = ''\n if not web_have and not es_have and not md_have and not dict_have:\n all_miss.append([xh, name, area_code, url, '', ''])\n tels = list(set(tels))\n # input dict filter\n input_tel_dict = load_dict('dict/input_tel.json')\n dict_find_tel = []\n for t in tels:\n if t in input_tel_dict:\n dict_find_tel.append(t)\n if dict_find_tel:\n tels = dict_find_tel\n finally_ret.append((xh, name, area_code, ';'.join(tels), addr, url, status))\n write_csv(merge_out, header, finally_ret)\n print(\"all done !\")\n tel_save_to_litter(['序号', '名称', '区号', '链接','查得号码','查得地址'], all_miss, n=15, data_dir=out_30_dir)\n print(\"all_miss split done !\", len(all_miss))\n # write_csv(web_only_file, header, web_only)\n # print(\"web_only split done !\", len(web_only))\n write_json()\n print('write json done!')\n print(\"all_dict have : \",all_dict_have)\n print(\"all_dict have : \",len(all_dict_have))\n\n\ndef fotmat_all_miss(data):\n l = len(data)\n i = 0\n while i < l:\n tel = data[i][1]\n tel, area_code = format_telnum_BQC(tel)\n if tel:\n data[i][1] = tel\n i += 1\n\n\n# 号码查名称结果融合\ndef format_name_output(ori_fn='data_out/input_tel.csv', merge_out=\"data_out/name_merge_out.csv\", web_only_file='data_out/name_web_only_file.csv'):\n header = None\n tel_keys = []\n with open(ori_fn, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n header = cr.__next__()\n for l in cr:\n tel_keys.append(l)\n all_dict_have = []\n correct_tel_dict = load_dict('dict/tel_dict.json')\n print('read ori input tel done', len(tel_keys))\n header.extend(('查得名称','查得地址','url'))\n # names, address, url\n web_name_data = get_web_out_names()\n # name, address\n es_name_data = get_es_out_names()\n # name, address\n md_name_data= get_md_out_names()\n web_only = []\n all_miss = []\n out_30_dir = 'name_out30'\n if os.path.exists(out_30_dir):\n shutil.rmtree(out_30_dir)\n os.makedirs(f'./{out_30_dir}')\n print('mkdir done')\n finally_ret = []\n for xh, tel in tel_keys:\n md_have = False\n web_have = False\n es_have = False\n dict_have = False\n tmp_web_only = None\n name = ''\n addr = ''\n url = ''\n if tel in md_name_data:\n vv = md_name_data[tel]\n md_name = vv[0]\n md_addr = vv[1]\n if md_name:\n md_have = True\n name = md_name\n addr = md_addr\n\n if tel in es_name_data:\n vv = es_name_data[tel]\n es_name = vv[0]\n es_addr = vv[1]\n if es_name:\n es_have = True\n if not name:\n name = es_name\n if es_addr and not addr:\n addr = es_addr\n\n if tel in web_name_data:\n vv = web_name_data[tel]\n web_name = vv[0]\n web_addr = vv[1]\n web_url = vv[2]\n if web_name :\n web_have = True\n if not name:\n name = web_name\n if web_addr and not addr:\n addr = web_addr\n tmp_web_only = (xh, tel, web_name, web_addr, web_url)\n url = web_url\n # dict deal\n formated_tel, area_code = format_telnum_BQC(tel)\n if formated_tel:\n if formated_tel in correct_tel_dict:\n name = correct_tel_dict[formated_tel]\n all_dict_have.append(name)\n dict_have = True\n if web_have and not es_have and not md_have and not dict_have:\n web_only.append(tmp_web_only)\n if not (web_have or es_have or md_have or dict_have):\n all_miss.append([formated_tel,''])\n finally_ret.append((xh, tel, name, addr, url))\n\n write_csv(merge_out, header, finally_ret)\n print(\"all done !\")\n fotmat_all_miss(all_miss)\n name_save_to_litter(['电话', '查得名称'], all_miss, n=30, data_dir=out_30_dir, prefix='name')\n print(\"all miss split done !\", len(all_miss))\n web_only_save_to_litter(header, web_only, n=5, data_dir='name_web_only/')\n print(\"all web_only write done !\", len(web_only))\n print(\"all_dict have : \",all_dict_have)\n print(\"all_dict have : \",len(all_dict_have))\n\n\ndef write_json(fh='data_out/tel_merge_out.csv', out='data_out/out_tels_json.txt'):\n exists_names = set()\n with open(out, 'w', encoding='utf8') as wh:\n with open(fh, 'r', encoding='utf8') as rh:\n cr = csv.reader(rh)\n cr.__next__()\n for line in cr:\n name = line[1]\n if name in exists_names:\n continue\n exists_names.add(name)\n tels_str = line[3].strip()\n if tels_str:\n tels = tels_str.split(';')\n else:\n tels = []\n if tels and len(tels)>1:\n t = {\n 'name':name,\n 'tels':list(set(tels))\n }\n wh.write(ujson.dumps(t, ensure_ascii=False) + \"\\n\")\n\n\nif __name__ == \"__main__\":\n format_tel_output()\n print(\"-\"*80)\n format_name_output()\n\n","sub_path":"format_output.py","file_name":"format_output.py","file_ext":"py","file_size_in_byte":14536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"502651581","text":"import sys\nsys.stdin = open('boj_9205.txt', 'r')\n\nimport sys\nfrom collections import deque\nreadline = sys.stdin.readline\nt = int(input())\n\ndef can_go(a_x, a_y, b_x, b_y):\n if abs(a_y - b_y) + abs(a_x - b_x) <= 1000:\n return True\n else:\n return False\n\nfor _ in range(t):\n n = int(input())\n\n stores = [list(map(int, readline().split())) for _ in range(n + 2)]\n visited = [0] * (n + 2)\n cur = 0\n Q = deque([(stores[0][0], stores[0][1], 0)])\n visited[0] = 1\n isFinished = False\n\n while Q:\n x, y, idx = Q.popleft()\n\n for i in range(n + 2):\n if i != idx and not visited[i] and can_go(x, y, stores[i][0], stores[i][1]):\n visited[i] = 1\n if i == n + 1:\n isFinished = True\n Q.append((stores[i][0], stores[i][1], i))\n\n if isFinished:\n break\n\n print(\"happy\" if isFinished else \"sad\")","sub_path":"정준현/11_BFSDFS_03/boj_9205_맥주 마시면서 걸어가기.py","file_name":"boj_9205_맥주 마시면서 걸어가기.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"218419221","text":"###############################################################################################################\n# NB: all the sentences after # are comments\n\n# This is the code that I have used for our simulations\n\n\n# HERE WE ARE IMPORTING ONLY SOME LIBRARIES\nimport numpy as np # this is a good math library in python, released by Berkley University\nimport tensorflow as tf # importing TensorFlow\nfrom scipy.stats import multivariate_normal as normal # math library for calling a normal distribution\nimport time # library for fix the time\nimport sys # library for some default functions\n\n## Here the code imports some specifics from tensorflow library\n\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.contrib.layers.python.layers import initializers\nfrom tensorflow.python.training.moving_averages import assign_moving_average\n\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n# NEURAL NETWORK DEFINITION: START#\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n\n# Here we are defining the subnetwork\n# The function takes as INPUT a tensor x of dimension batch_size * d\n# and retunrs as OUTPUT a tensor output of size batch_size * d.\n# The function normalises the tensor x using batch normalisation\n# and then computes an iterative loop for obtaining the tensor output.\n# The Neural Network works in the following way:\n# 1) The INPUT layer is normalised using batch normalisation, the layer is composed by d neurons\n# 2) The second layer is fed with the OUTPUT of step 1.\n# 3) The third layer is fed with the OUTPUT of step 2.\n# 4) The fourth layer is fed with the OUTPUT of step 3.\ndef _subnetwork(x, neu__, name):\n with tf.variable_scope(name):\n # this is the first layer, i.e. the INPUT layer\n hiddens = _batch_norm(x, name='path_input_norm') # I give to function batch norm tensor x\n\n for i in range(1, len(neu__) - 1):\n hiddens = _dense_batch_layer(hiddens, neu__[i], activation_fn=tf.nn.relu, name='layer_{}'.format(i))\n # here the second and third layer are computed. The activation function is given to the layer\n\n output = _dense_batch_layer(hiddens, neu__[-1], activation_fn=None, name='final_layer')\n # here the last layer is computed\n return output\n\n# The following function describes the single layer.\n# As you can see this function takes as INPUT a tensor input_, the size of the layer, i.e. the number of neurons, and the activation function.\ndef _dense_batch_layer(input_, output_size, activation_fn=None, stddev=5.0, name='linear'):\n with tf.variable_scope(name):\n shape = input_.get_shape().as_list()\n weight = tf.get_variable('Matrix', [shape[1], output_size], tf.float64,\n tf.random_normal_initializer(stddev=stddev / np.sqrt(shape[1] + output_size)))\n hiddens = tf.matmul(input_,\n weight) # This is the multiplication between the matrix of weights and the vector of input\n hiddens_bn = _batch_norm(hiddens, is_training) # Again is normalised the result\n if activation_fn:\n # the activation function, i.e. the relu, is applied only on the second and third layer, as described in Implementation section of \"Solving high dimensional partial differential equations using deep learning\"\n return activation_fn(hiddens_bn)\n else:\n return hiddens_bn\n\n\n# The following function defines the batch normalisation\n# Again as INPUT is given a tensor x, other stuff are not important for the conceptual understanding of neural network.\n# This normalisation is really important for numerical studies using Deep Learning.\n# Indeed, training Deep Neural Network is complicated by the fact that the distribution of each Layer's input changes during training\n# as the parameters of the previous layers change. This slow down the training by requiring lower learning rates and carful\n# parameter initialisation. This methods, instead, normalising each batch, allows to using higher learning rates and allows to\n# speed up the algorithm.\n\n# Batch normalisation normalises each batch independetnly for having mean 0 and variance 1. For a layer with d dimensional input, for\n# example, batch normalisation will normalises each dimension, where the expectation and the variance are computed over the training data.\n# This normalisation speeds up the convergence, even when the features are correleted.\n# It is important to note that normlising each input of a layer may change what the layer can represent. To avoid this, batch normalisation\n# makes sure that the transformation inserted in the network can represent the identity transformation. To accomplish this, it introduces, for each activation input, i.e. for each dimension,\n# a pair of parameters gamma and beta, which scale and shift the nomalised value.\n# These parameters are learned along with the original model parameters, and restore the representation power of the network.\ndef _batch_norm(x, affine=True, name='batch_norm'):\n \"\"\"Batch normalization\"\"\"\n with tf.variable_scope(name):\n params_shape = [x.get_shape()[-1]]\n # beta parameter. It needs to be learned\n beta = tf.get_variable('beta', params_shape, tf.float64,\n initializer=tf.random_normal_initializer(\n 0.0, stddev=0.1, dtype=tf.float64))\n # gamma parameter. It needs to be learned\n gamma = tf.get_variable('gamma', params_shape, tf.float64,\n initializer=tf.random_uniform_initializer(\n 0.1, 0.5, dtype=tf.float64))\n moving_mean = tf.get_variable('moving_mean', params_shape, tf.float64,\n initializer=tf.constant_initializer(0.0, tf.float64),\n trainable=False)\n moving_variance = tf.get_variable('moving_variance', params_shape, tf.float64,\n initializer=tf.constant_initializer(1.0, tf.float64),\n trainable=False)\n # These ops will only be preformed when training\n mean, variance = tf.nn.moments(x, [0], name='moments')\n _extra_train_ops.append(assign_moving_average(moving_mean, mean, 0.99, True))\n _extra_train_ops.append(assign_moving_average(moving_variance, variance, 0.99, False))\n mean, variance = tf.cond(is_training,\n lambda: (mean, variance),\n lambda: (moving_mean, moving_variance))\n y = tf.nn.batch_normalization(x, mean, variance, beta, gamma, 1e-6)\n y.set_shape(x.get_shape())\n return y\n\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n# NEURAL NETWORK DEFINITION: END#\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n# DEFINITION FUNCTIONS FOR INTEGRATION: START#\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n# It is the implementation of equation 9 and 10 in \"Solving high dimensional partial differential equations using deep learning\"\ndef _f_t(y):\n return 0.\n\n\n# This function returns trajectories and white noises\ndef integrate(num_sample):\n _x_init = np.ones(d) * 0.0\n dw_sample = normal.rvs(size=[num_sample, d, N]) * np.sqrt(2.*dt)\n x_sample = np.zeros([num_sample, d, N + 1])\n x_sample[:, :, 0] = np.ones([num_sample, d]) * _x_init\n for i in range(0, N):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + ( dw_sample[:, :, i])\n return dw_sample, x_sample\n\n# This function returns trajectories and white noises\ndef integrate_init(num_sample, _rn_numb_init):\n dw_sample = normal.rvs(size=[num_sample, d, N]) * np.sqrt(2.*dt)\n x_sample = np.zeros([num_sample, d, N + 1])\n x_sample[:, :, 0] = _rn_numb_init\n for i in range(0, N):\n x_sample[:, :, i + 1] = x_sample[:, :, i] + ( dw_sample[:, :, i])\n return dw_sample, x_sample\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n# DEFINITION FUNCTIONS FOR INTEGRATION: END#\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n# FROM HERE WE START#\n\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n#####################################################################################################################################################################\n\n\n\n#exact solution\n#x is a d dimensional veector\ndef exact_sol(x):\n A=float(T)*float(d)\n B=np.sum(0.5*(x*x))\n return A+B\n\ndef rel_err(x,y):\n return np.abs((x-y)/x)\n\n\n# Defintion terminal conditions in tensor flow\ndef phi(x):\n B=tf.reduce_sum(0.5*(x*x), 1, keepdims=True)\n return B\n\n\n# This equation prints on file the values that we need to know\ndef _approximate():\n x=np.zeros([d])\n s=exact_sol(x)\n lr, gs, _loss, _u_0 = sess.run([learning_rate, global_step, loss, u_0], feed_dict=feed_dict_valid)\n t1_train = time.time()\n file_out.write('% i\\t '' % f \\t % f \\t % f \\t %f \\t %f \\n' % (gs, lr, t1_train - t0_train, _loss, _u_0[0], s))\n file_out.flush() # flush files\n\n\ntf.compat.v1.reset_default_graph() # default function in TensorFlow that needs to be used only at the beginning, never in Session() or in run()\ndtype = tf.float64 # Defining the type of the tensor.\n\nwith tf.compat.v1.Session() as sess: # TensorFlow's session starts\n sample = int(sys.argv[1])\n file_name = 'Example_' + str(sample) + '.txt'\n MC=10000\n T, N = 1., 40 # T is the final time, N is the number of the step we make for each trajectory\n dt = T / N # dt is the step interval that we need to use in the integration\n batch_size = int(sys.argv[2]) # this is the size for each batch\n interval_point= float(sys.argv[3])/2.\n d = int(sys.argv[4]) #d is the number of dimensions\n min = 0. - interval_point\n max = 0. + interval_point\n file_name_approx_values_single_experiment = 'approx_values_single_experiment_' + str(sample) + '_batch_size_' + str(batch_size) + '_d_' + str(d) + '.txt'\n file_name_rel_error = 'rel_error_batch_size_' + str(batch_size) + '_d_' + str(d) + '.txt'\n rn_numb_init = np.random.uniform(min, max,size=[batch_size, d]) # this is the tensor of random initial points where trajectories start\n neurons = [d, d + 10, d + 10, d] # this array tells how many neurons needs to have each layer, i.e. the first layer has d neurons, the second one d+10, the third one d+10 and the fourth one d\n _neurons_hessian = [d, d + 10, d + 10, 1]\n neurons_u0 = [d, d + 10, d + 10, 1]\n train_steps = 15000 # this is the number of training steps\n mc_freq = 100 # global variable used for printing\n lr_boundaries = [500, 1500, 2500, 5500, 10000] # defines when we want to change the learning value\n #lr_boundaries = [4000, 6000] # defines when we want to change the learning value\n #lr_values = [0.005,0.001, 0.0005] # learning values\n lr_values = [10., 5., 1., 0.1, 0.001, 0.0001] # learning values\n _extra_train_ops = [] # array for storing variables that needs to be optimised\n\n t0_train = time.time() # we collect the starting time\n\n # we are defining some instruction for TensorFlow.\n DX = tf.compat.v1.placeholder(tf.float64, [None, d, N + 1 ], name='X')\n DW = tf.compat.v1.placeholder(tf.float64, [None, d, N], name='dW')\n is_training = tf.compat.v1.placeholder(tf.bool)\n\n with tf.variable_scope(\"forward\"):\n # Definition of u_0.\n u_0 = _subnetwork(DX[:, :, 0], neurons_u0, str(0)) / d\n # definition of variable of u_0.\n grad_u_0 = _subnetwork(DX[:, :, 0], neurons, str(-1)) / d\n u_approx = u_0\n grad_u_t = grad_u_0\n ##############################################################################################################\n ##############################################################################################################\n ##############################################################################################################\n ##############################################################################################################\n # INTEGRATION FOR u_t*\n \n for t in range(0, N - 1):\n \n g_s_t = str(t + 1)\n u_approx = u_approx - dt * _f_t(u_approx) + tf.reduce_sum(grad_u_t * DW[:, :, t], 1, keepdims=True)\n grad_u_approx_t = _subnetwork(DX[:, :, t + 1], neurons, g_s_t) / d\n grad_u_t = grad_u_approx_t\n \n \n u_approx = u_approx - dt * _f_t(u_approx) + tf.reduce_sum(grad_u_t * DW[:, :, -1], 1, keepdims=True) \n \n # u_approx stores the last value of the integration, i.e. the one that we need for the loss function\n ##############################################################################################################\n ##############################################################################################################\n ##############################################################################################################\n ##############################################################################################################\n # END INTEGRATION FOR u_t\n\n phi_final = tf.convert_to_tensor(DX[:, :, -1], dtype=dtype)\n delta = u_approx - phi(phi_final)\n DELTA_CLIP = 50.0\n # definition of loss function, more precisely this function makes the following instructions:\n # if delta is less then 50 then the function computes the reduced mean square of delta\n # else the function does a trigger and computes 100 * |delta| - 50^2 for not having big numbers as output file\n loss = tf.reduce_mean(\n tf.where(tf.abs(delta) < DELTA_CLIP, tf.square(delta), 2 * DELTA_CLIP * tf.abs(delta) - DELTA_CLIP ** 2))\n\n # definition of variables for optimisation using TensorFlow\n global_step = tf.compat.v1.get_variable('global_step', [], tf.int32, tf.constant_initializer(0), trainable=False)\n learning_rate = tf.compat.v1.train.piecewise_constant(global_step, lr_boundaries, lr_values)\n\n # from here we use the sintax of tensor flow for minimising the loss function and optimising the weights of the neural networks\n trainable_variables = tf.trainable_variables()\n grads = tf.gradients(loss, trainable_variables)\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n #optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)\n apply_op = optimizer.apply_gradients(zip(grads, trainable_variables), global_step=global_step, name='train_step')\n all_ops = [apply_op] + _extra_train_ops\n train_op = tf.group(*all_ops)\n\n # open file for writing\n file_out = open(file_name, 'w')\n file_out_approx_values = open(file_name_approx_values_single_experiment, 'w')\n file_out_rel_error = open(file_name_rel_error, 'a')\n # test set, i.e. we create 256 trajectories\n dw_valid, x_valid = integrate(2)\n feed_dict_valid = {DW: dw_valid, DX: x_valid, is_training: False} # definition of a command for optimisation\n # random initialisation weights and variables\n sess.run(tf.compat.v1.global_variables_initializer())\n # loop for training our variables\n for step in range(train_steps + 1):\n\n if step % mc_freq == 0:\n print(step)\n # if the step is a multiple of mc_freq then print on file\n _approximate()\n # definition of training set of size equal to batch size.\n # these trajectories are computed at each step\n dw_train, x_train = integrate_init(batch_size, rn_numb_init)\n # command for training and optimise everything.\n sess.run(train_op, feed_dict={DX: x_train, DW: dw_train, is_training: True})\n # end for\n # we print last value on file\n _approximate()\n\n\n rn_numb_test = np.random.uniform(min, max,size=[MC, d]) # this is the tensor of random test points\n dw_test, x_test = integrate_init(MC, rn_numb_test)\n test_u_0 = sess.run([u_0], feed_dict={DX: x_test, DW: dw_test, is_training: False})\n # I prepare the array of approx value of u given by NN\n a_test=np.array(test_u_0)\n rel_err_arr=np.zeros([MC])\n\n for i in range(0, MC):\n x=rn_numb_test[i, :]\n y=a_test[0,i]\n res=np.array(exact_sol(x))\n re = np.array(rel_err(res, y))\n rel_err_arr[i]=re\n file_out_approx_values.write('%f \\t %f \\t %f \\n' % (res, y, rel_err_arr[i]))\n file_out_approx_values.flush() # flush files\n\n\n sum_rel_error = np.sum(rel_err_arr)\n mean_rel_error = np.mean(rel_err_arr)\n std_rel_error = np.std(rel_err_arr)\n file_out_rel_error.write('%i \\t '' %f \\t %f \\t %f \\t %f \\n' % (batch_size, sum_rel_error, mean_rel_error, std_rel_error,train_steps))\n file_out_rel_error.flush() # flush files\n\n\n\n file_out.close()\n file_out_approx_values.close()\n file_out_rel_error.close()\n\n \n","sub_path":"HEAT-EQ.py","file_name":"HEAT-EQ.py","file_ext":"py","file_size_in_byte":22827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"521226513","text":"def triangles():\n L = [1]\n while True:\n yield L\n L = [1] + [x + L[i+1] for i,x in enumerate(L[:-1]) ] + [1]\n\nn = 0\nresults = []\nfor t in triangles():\n print(t)\n results.append(t)\n n = n + 1\n if n == 10:\n break","sub_path":"yanghuiTriangle.py","file_name":"yanghuiTriangle.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"522779029","text":"# coding: utf-8\nimport numpy as np\nfrom keras.utils import to_categorical\n\ndef tpfpfn(pred_labels, true_labels):\n\n TP = np.sum(np.logical_and(pred_labels == 1, true_labels == 1))\n FP = np.sum(np.logical_and(pred_labels == 1, true_labels == 0))\n FN = np.sum(np.logical_and(pred_labels == 0, true_labels == 1))\n TN = np.sum(np.logical_and(pred_labels == 0, true_labels == 0))\n\n return TP, FP, FN, TN\n\ndef Accuracy(TP, FP, FN, TN):\n \n if TP == 0 and TN == 0:\n return 0\n \n Accuracy = (TP+TN)/(TP+FP+FN+TN)\n \n return Accuracy\n\ndef Precision(TP, FP):\n \n if TP == 0:\n return 0\n \n Precision = TP/(TP+FP)\n \n return Precision\n\ndef Recall(TP, FN):\n\n if TP == 0:\n return 0\n \n Recall = TP/(TP+FN)\n \n return Recall\n\ndef IU(TP, FP, FN):\n \n if TP == 0:\n return 0\n \n IU = TP/(TP+FP+FN)\n \n return IU\n\ndef F1(TP, FP, FN):\n \n if TP == 0:\n return 0\n \n F1 = 2*TP/(2*TP + FP + FN)\n \n return F1\n\ndef mAccuracy(y_pred, y_true):\n \n mAccuracy = 0\n \n # Calculate per class, ignoring background\n for cl in range(1,3):\n pred_labels = to_categorical(y_pred, num_classes=3)[...,cl]\n true_labels = to_categorical(y_true, num_classes=3)[...,cl]\n TP, FP, FN, TN = tpfpfn(pred_labels, true_labels)\n mAccuracy += Accuracy(TP, FP, FN, TN)/2\n \n return mAccuracy\n\ndef mPrecision(y_pred, y_true):\n \n mPrecision = 0\n \n # Calculate per class, ignoring background\n for cl in range(1,3):\n pred_labels = to_categorical(y_pred, num_classes=3)[...,cl]\n true_labels = to_categorical(y_true, num_classes=3)[...,cl]\n TP, FP, _, _ = tpfpfn(pred_labels, true_labels)\n mPrecision += Precision(TP, FP)/2\n \n return mPrecision\n\ndef mRecall(y_pred, y_true):\n \n mRecall = 0\n \n # Calculate per class, ignoring background\n for cl in range(1,3):\n pred_labels = to_categorical(y_pred, num_classes=3)[...,cl]\n true_labels = to_categorical(y_true, num_classes=3)[...,cl]\n TP, _, FN, _ = tpfpfn(pred_labels, true_labels)\n mRecall += Recall(TP, FN)/2\n \n return mRecall\n\ndef mIU(y_pred, y_true):\n \n mIU = 0\n \n # Calculate per class, ignoring background\n for cl in range(1,3):\n pred_labels = to_categorical(y_pred, num_classes=3)[...,cl]\n true_labels = to_categorical(y_true, num_classes=3)[...,cl]\n TP, FP, FN, _ = tpfpfn(pred_labels, true_labels)\n mIU += IU(TP, FP, FN)/2\n \n return mIU\n\ndef mF1(y_pred, y_true):\n \n mF1 = 0\n\n # Calculate per class, ignoring background\n for cl in range(1,3):\n pred_labels = to_categorical(y_pred, num_classes=3)[...,cl]\n true_labels = to_categorical(y_true, num_classes=3)[...,cl]\n TP, FP, FN, _ = tpfpfn(pred_labels, true_labels)\n mF1 += F1(TP, FP, FN)/2\n\n return mF1","sub_path":"metrics_for_paper.py","file_name":"metrics_for_paper.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"526830622","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 12 12:10:23 2018\nplot data in sql database with saspy \n@author: WQQ\n\"\"\"\nimport tkinter as tk\nfrom tkinter import ttk\nfrom PIL import ImageTk, Image\nimport MySQLdb\nimport matplotlib\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nimport datetime\nfrom tkcalendar import Calendar, DateEntry\nimport plotly as py\nimport plotly.graph_objs as go\nimport pandas as pd\nLARGE_FONT= (\"Verdana\", 12)\nconn=MySQLdb.connect(host=\"utspv.mysql.database.azure.com\", user=\"sc@utspv\", passwd=\"LQcHRNawH8sMqT6M\")\ncursor=conn.cursor()\nUSERS={'site_1': 'pvdata_home1', 'site_2':'pvdata_home1'}\nTYPE=['overall_energy', 'FeedIn', 'Purchased', 'Consumption', 'SelfConsumption', 'Production']\nMODULES={'battery': 'batteries', 'energy_15min':'energy_15min', 'energy_15min_detailed': 'energy_15min_detailed', 'energy_day': 'energy_day', \n 'energy_day_detailed': 'energy_day_detailed', 'sitepower': 'sitepower', 'sitepower_detailed': 'sitepower_detailed'}\nclass win_plot(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n \n \n \n tk.Tk.wm_title(self, \"PV System\")\n \n container=tk.Frame(self)\n container.pack(side=\"top\", fill=\"both\", expand=True)\n \n container.grid_rowconfigure(0, weight=1)\n container.grid_columnconfigure(0, weight=1)\n self.frames={}\n for F in (StartPage, PageOne, PageTwo, PageThree, PageFour):\n frame=F(container, self)\n self.frames[F]=frame\n frame.grid(row=0, column=0, sticky=\"nsew\")\n self.show_frame(StartPage)\n def show_frame(self, cont):\n frame=self.frames[cont]\n frame.tkraise() #raise different frame according to their Z-axis order\nclass StartPage(tk.Frame):\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='GhostWhite')\n label = tk.Label(self, text=\"Visit Reports\", font=LARGE_FONT).pack(pady=20, padx=20)\n button = ttk.Button(self, text=\"Report 1\", \n command=lambda: controller.show_frame(PageOne)).pack(pady=5)\n button2 = ttk.Button(self, text=\"Report 2\",\n command=lambda: controller.show_frame(PageTwo)).pack(pady=5)\n button3 = ttk.Button(self, text=\"Report 3\",\n command=lambda: controller.show_frame(PageThree)).pack(pady=5)\n button4 = ttk.Button(self, text=\"Report 4\",\n command=lambda: controller.show_frame(PageFour)).pack(pady=5)\n#\n pilImage = Image.open(\"logo.jpg\")\n pilImage=pilImage.resize([600, 400])\n img = ImageTk.PhotoImage(pilImage) \n label=tk.Label(self, image=img)\n label.image=img\n label.pack(pady=20, padx=20)\n\n\nclass PageOne(tk.Frame):\n def __init__(self, parent, controller):\n #up\n tk.Frame.__init__(self, parent, bg='Navy')\n label = tk.Label(self, text=\"Report 1!\", font=LARGE_FONT).pack(pady=10, padx=10)\n button1 = ttk.Button(self, text=\"Back to Home\",\n command=lambda: controller.show_frame(StartPage)).pack(pady=5)\n button2 = ttk.Button(self, text=\"Report 2\",\n command=lambda: controller.show_frame(PageTwo)).pack(pady=5)\n if conn==None:\n label2=tk.Label(self, text=\"connection to database failed\").pack()\n else:\n \n #left_history\n user='site_1'\n datatype='overall_energy'\n selected_date='2018-05-01'\n \n f = Figure(figsize=(5,5), dpi=100)\n\n canvas = FigureCanvasTkAgg(f, self)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n \n toolbar = NavigationToolbar2TkAgg(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n\n \n self.plot_fig(user, datatype, selected_date, f)\n #list_user\n var_user=tk.StringVar()\n lb_user=tk.Listbox(self, fg='Maroon', font=(\"Verdana\", 10), width=10, height=10, selectmode='BROWSER', listvariable=var_user)\n def update_item_user(event):\n user=lb_user.get(lb_user.curselection())\n self.plot_fig(user, datatype, selected_date, f)\n print('selected user')\n lb_user.bind('', update_item_user)\n list_user=USERS.keys()\n for item in list_user:\n lb_user.insert('end', item)\n scrl_user=tk.Scrollbar(self)\n scrl_user.pack(side='right', fill='y')\n lb_user.configure(yscrollcommand=scrl_user.set)\n lb_user.pack(side='right', fill='both')\n scrl_user['command']=lb_user.yview\n \n #select datatype\n \n var_type=tk.StringVar()\n lb_type=tk.Listbox(self, fg='DarkCyan', font=(\"Verdana\", 10), width=15, height=15, selectmode='BROWSER', listvariable=var_type)\n def update_item_type(event):\n datatype=lb_type.get(lb_type.curselection())\n self.plot_fig(user, datatype, selected_date, f)\n print('selected datatype')\n lb_type.bind('', update_item_type)\n list_type=TYPE\n for item in list_type:\n lb_type.insert('end', item)\n scrl_type=tk.Scrollbar(self)\n scrl_type.pack(side='right', fill='y')\n lb_type.configure(yscrollcommand=scrl_type.set)\n lb_type.pack(side='right', fill='both')\n scrl_type['command']=lb_type.yview\n\n #select date\n \n cal=Calendar(self, font=('Verdana', 10), selectmode='day', cursor='hand1', year=2018, month=5, day=1, width=10, borderwidth=10)\n cal.pack(fill='both', expand=True, side='top', padx=5)\n def update_date():\n selected_date=str(cal.selection_get())\n self.plot_fig(user, datatype, selected_date, f)\n print('selected date')\n bt=tk.Button(self, text='ok', font=(\"Verdana\", 15), command=update_date).pack()\n \n def plot_fig(self, user, datatype, selected_date, f):\n print('update fig')\n #label_fig=tk.Label(self, text=datatype+' of '+ user + ' in ' + selected_date, font=(\"Verdana\", 12), fg='Plum').pack()\n dateTime_start=selected_date+\" 00:00:00\"\n dateTime_end=selected_date+\" 23:45:00\"\n\n f.suptitle(datatype+' of '+ user + ' in ' + selected_date)\n a = f.add_subplot(111)\n \n\n if datatype=='overall_energy':\n command=\"select DateTime_c, Energy from \"+ USERS[user]+'.'+ MODULES['energy_15min']+\" where DateTime_c>=\\\"\"+ \\\n dateTime_start+\"\\\" and DateTime_c<=\\\"\"+dateTime_end+\"\\\" order by DateTime_c\";\n else:\n command=\"select DateTime_c, Value_c from \"+ USERS[user]+'.'+ MODULES['energy_15min_detailed']+\" where DateTime_c>=\\\"\"+ \\\n dateTime_start+\"\\\" and DateTime_c<=\\\"\"+dateTime_end+\"\\\" and Type_c=\\\"\"+ datatype + \"\\\" order by DateTime_c\";\n \n cursor.execute(command)\n conn.commit()\n alldata=cursor.fetchall()\n xx, yy=zip(*alldata)\n x=list(xx)\n y=list(yy)\n a.plot(x, y)\n \n \nclass PageTwo(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='Navy')\n label = tk.Label(self, text=\"Report 2!\", font=LARGE_FONT).pack(pady=10,padx=10)\n\n button1 = ttk.Button(self, text=\"Back to Home\",\n command=lambda: controller.show_frame(StartPage)).pack()\n\n button2 = ttk.Button(self, text=\"Report 3\",\n command=lambda: controller.show_frame(PageThree)).pack()\n sql ='SELECT `result_demo`.`TimeStamp`,`result_demo`.`FeedIn Price` from `pvdata_home1`.`result_demo`'\n cursor.execute(sql) \n rows = cursor.fetchall()\n df=pd.DataFrame([[ij for ij in i] for i in rows])\n df.rename(columns={0:'time',1:'price'},inplace=True)\n trace1 = go.Scatter(\n x=df['time'],\n y=df['price'],\n mode = 'line+makers',\n name='lines+makers'\n )\n data=go.Data([trace1])\n layout=go.Layout(title=\"First Plot\", xaxis={'title':'time'}, yaxis={'title':'price'})\n figure2=go.Figure(data=data,layout=layout)\n py.offline.iplot(figure2,filename='user_cost')\n \n \n f = Figure(figsize=(5,3), dpi=100)\n canvas = FigureCanvasTkAgg(f, self)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n\n toolbar = NavigationToolbar2TkAgg(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n f.suptitle('ss')\n a = f.add_subplot(111)\n a.plot(df['time'], df['price'])\n \nclass PageThree(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='Navy')\n label = tk.Label(self, text=\"Report 3!\", font=LARGE_FONT).pack(pady=10,padx=10)\n\n button1 = ttk.Button(self, text=\"Back to Home\",\n command=lambda: controller.show_frame(StartPage)).pack()\n button2 = ttk.Button(self, text=\"Report 4\",\n command=lambda: controller.show_frame(PageFour)).pack()\n sql ='SELECT `result_demo`.`TimeStamp`,`result_demo`.`user_netcost_before`, `result_demo`.`user_netcost_after` from `pvdata_home1`.`result_demo`'\n cursor.execute(sql) \n rows = cursor.fetchall()\n df=pd.DataFrame([[ij for ij in i] for i in rows])\n df.rename(columns={0:'time',1:'before',2:'after'},inplace=True)\n f = Figure(figsize=(5,3), dpi=100)\n canvas = FigureCanvasTkAgg(f, self)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n\n toolbar = NavigationToolbar2TkAgg(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n f.suptitle('ss')\n a = f.add_subplot(111)\n a.plot(df['time'], df['before'],label='without aggregator')\n a.plot(df['time'], df['after'],label='with aggregator')\n \nclass PageFour(tk.Frame):\n\n def __init__(self, parent, controller):\n tk.Frame.__init__(self, parent, bg='Navy')\n label = tk.Label(self, text=\"Report 4!\", font=LARGE_FONT).pack(pady=10,padx=10)\n\n button1 = ttk.Button(self, text=\"Back to Home\",\n command=lambda: controller.show_frame(StartPage)).pack()\n button2 = ttk.Button(self, text=\"Report 1\",\n command=lambda: controller.show_frame(PageOne)).pack()\n sql ='SELECT `result_demo`.`TimeStamp`, `result_demo`.`Provider_Bene_before`, `result_demo`.`Provider_Bene_after` from `pvdata_home1`.`result_demo`'\n cursor.execute(sql) \n rows = cursor.fetchall()\n df=pd.DataFrame([[ij for ij in i] for i in rows])\n df.rename(columns={0:'time',1:'before',2:'after'},inplace=True)\n f = Figure(figsize=(5,3), dpi=100)\n canvas = FigureCanvasTkAgg(f, self)\n canvas.show()\n canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)\n\n toolbar = NavigationToolbar2TkAgg(canvas, self)\n toolbar.update()\n canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)\n f.suptitle('ss')\n a = f.add_subplot(111)\n a.plot(df['time'], df['before'],label='without aggregator')\n a.plot(df['time'], df['after'],label='with aggregator')\n \ndef main():\n\n showRs=win_plot()\n showRs.mainloop()\n# cursor.close()\n# conn.close()\n\n \n \n\n","sub_path":"PVSolar-master/plot_data_6.25.py","file_name":"plot_data_6.25.py","file_ext":"py","file_size_in_byte":12023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"519015881","text":"cipher = open('dump_425270').read()\nkey = open('dump_40E1BE').read()\nprint(len(cipher), len(key))\n\nflag = [ord(c) for c in cipher]\nfor i in range(0x90, -1, -1):\n pos = (ord(key[(68 * (i ^ 0x55)) * 4]) ^ 0x819f) % 50\n print(i, pos)\n flag[pos] ^= ord(key[68 * (i ^ 0x55) * 4])\n print(''.join([chr(f) for f in flag]))\n","sub_path":"Games/2020AIS3EOF/AssemblyLanguageBeast/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"389904015","text":"from geo.views import Location, User, LocationGuess\nfrom django.db.models.aggregates import Count\nfrom django.test import TestCase\nfrom rest_framework.test import APIClient\nfrom rest_framework import status\nfrom serializers import LocationSerializer, LocationGuessSerializer, UserSerializer\nfrom geo_server import confidential\n\n\nclass GeoTestCases(TestCase):\n\n def setUp(self):\n # assumes user with id == 2\n set_up_database()\n\n def test_geo(self):\n\n # need to be redone for auth tokens\n user = self._test_create_user()\n # user = User.objects.order_by('?').first()\n self._test_location_guess({\"user\": user.id, \"location\": user.current_location, \"lat\": 10, \"lon\": 10}, user.auth_token)\n self._test_location_guess({\"user\": 569549945, \"location\": user.current_location, \"lat\": 10, \"lon\": 10}, user.auth_token)\n self._test_location_guess({\"user\": user.id, \"location\": 958597597, \"lat\": 10, \"lon\": 10}, user.auth_token)\n self._test_location_guess({\"user\": user.id, \"location\": user.current_location, \"lat\": 200, \"lon\": 200}, user.auth_token)\n self._test_location_guess({}, user.auth_token)\n\n # print \"done testing location guesses:\"\n # print LocationGuess.objects.all()\n\n # need to be refactored for auth tokens\n self._test_add_location({\"user\": user.id, \"lat\": 10, \"lon\": 10}, user.auth_token)\n self._test_add_location({\"user\": user.id, \"lat\": 200, \"lon\": 200}, user.auth_token)\n self._test_add_location({}, user.auth_token)\n\n self._test_get_location()\n\n self._test_get_location_guesses(user)\n\n self._test_get_location_details(Location.objects.order_by('?').first())\n\n # self.test_locations_to_near_locations()\n\n self._test_user_profile_stats()\n\n # set up location guesses\n count = 0\n while count < 175:\n user = User.objects.order_by('?').first()\n self._test_location_guess({\"user\": user.id, \"location\": user.current_location, \"lat\": count/2, \"lon\": count}, user.auth_token)\n count += 1\n\n self._test_user_profile_stats()\n\n def _test_create_user(self):\n client = APIClient()\n\n response = client.post('/users/', {\"other_identifier\": \"123123123123\"}, format='json')\n # print response\n\n self.assertTrue(User.objects.filter(other_identifier='123123123123').exists())\n\n user = User.objects.filter(other_identifier='123123123123').get()\n self._assert_created_user(user)\n\n return user\n\n # self._test_location_guess_for_user()\n\n def _test_google_auth(self):\n auth_token = confidential.google_auth_token\n\n client = APIClient()\n request = client.post('user/google_auth/',data={'auth_token':auth_token})\n\n self.assertEqual(request.status_cod, status.HTTP_200_OK)\n serializer = UserSerializer(data=request.data)\n self.assertTrue(serializer.is_valid(raise_exception=False))\n\n def _test_location_guess(self, data, auth_token):\n client = APIClient()\n\n serializer = LocationGuessSerializer(data=data)\n valid = serializer.is_valid(raise_exception=False)\n\n if valid:\n user = User.objects.get(id=data['user'])\n valid = user.auth_token == auth_token\n last_score = user.total_score\n last_current_location = user.current_location\n\n response = client.post('/locationGuess/?auth_token='+auth_token, data, format='json')\n\n if valid:\n self.assertTrue(response.status_code, status.HTTP_201_CREATED)\n user = User.objects.get(id=user.id)\n\n self.assertNotEqual(last_current_location, int(user.current_location))\n\n self.assertTrue(LocationGuess.objects.filter(user__id=user.id).exists())\n # print LocationGuess.objects.all()\n\n self.assertTrue(last_score < user.total_score)\n\n else:\n self.assertTrue(response.status_code >= 400)\n # self.assertIn('error',response.data)\n\n def _test_add_location(self, data, auth_token):\n client = APIClient()\n\n serializer = LocationSerializer(data=data)\n valid = serializer.is_valid(raise_exception=False)\n\n if valid:\n user = User.objects.get(id=data['user'])\n # print \"test add location user validity check:\"\n # print \"user: \"+ str(user)\n # print \"given auth token: \"+str(auth_token)\n valid = user.auth_token == auth_token\n user_locations_count = Location.objects.filter(users__id=user.id).count()\n\n response = client.post('/locations/?auth_token='+auth_token, data, format='json')\n\n if valid:\n self.assertTrue(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Location.objects.filter(users__id=user.id).count() == (user_locations_count + 1))\n\n else:\n self.assertTrue(response.status_code >= 400)\n # self.assertIn('error', response.data)\n\n def _test_get_location(self):\n\n request = self.client.get('/locations/'+str(Location.objects.order_by('?').first().id)+\"/\")\n # print request.status_code\n\n self.assertTrue(request.status_code == 200)\n\n def _test_get_location_details(self, location):\n\n request = self.client.get('/locations/'+str(location.id)+'/details/')\n data = request.data\n # print data\n self.assertEqual(str(location.lat), str(data['place']['lat']))\n self.assertEqual(str(location.lon), str(data['place']['lon']))\n self.assertTrue('location_guesses' in data)\n\n def _test_get_location_guesses(self, user):\n # print \"all location guesses:\"\n # for location_guess in LocationGuess.objects.all():\n # print location_guess\n request = self.client.get('/users/'+str(user.id)+'/locationGuesses/')\n # print \"location guesses for user:\"\n # print request\n\n # bypass view create location test\n def _test_location_guess_for_user(self):\n user = User.objects.order_by('?').first()\n # print \"test_location_guess_for_user\"\n # print user\n last_current_location = user.current_location\n # print \"getting current_user_location \"+str(user.current_location)\n # print Location.objects.all()\n current_user_location = Location.objects.get(id=user.current_location)\n\n for i in range(0, 10):\n location_guess = self._create_location_guess(current_user_location, user)\n user.save_location_guess(location_guess)\n\n location = Location.objects.get(id=user.current_location)\n # print \"got new location id = \"+str(location.id)\n # print \"user:\"\n # print user\n\n self.assertIsNotNone(location) # valid location returned\n self.assertNotEqual(location.user.id, user.id) # location is not user's location\n self.assertIn(str(last_current_location)+',', user.guessed_locations) #last_current_location in user guesses\n # self.assertNotIn(str(location.id)+',', user.guessed_locations) # new location id not already in guessed_locations\n self.assertNotRegexpMatches(user.guessed_locations, r','+str(location.id)+',')\n\n last_current_location = user.current_location\n\n def _create_location_guess(self, location, user, index=1):\n location_guess = LocationGuess.objects.create(user=user, location=location, lat=10, lon=10)\n\n self.assertIsNotNone(location_guess)\n\n return location_guess\n\n def _assert_created_user(self, user):\n self.assertTrue(Location.objects.filter(id=user.current_location).exists())\n self.assertTrue(user.email or user.other_identifier)\n\n\n def test_locations_to_near_locations(self):\n import views\n user = User.objects.all().first()\n # original_location = Location.objects.create(lat=27.900911, lon=-82.660154)\n data = {'lat':27.900911, 'lon':-82.660154}\n views._save_location(data, user.id)\n original_location = Location.objects.all().reverse().first()\n # original_location.users.add(user)\n # original_location.save()\n\n users = User.objects.all()\n # data = {'lat':70.00, 'lon':70.00}\n # views._save_location(data, users[3].id)\n\n data = {'lat':27.901040, 'lon':-82.660318}\n views._save_location(data, users[1].id)\n data = {'lat':27.900767, 'lon':-82.659999}\n views._save_location(data, users[2].id)\n data = {'lat':27.901145, 'lon':-82.660417}\n views._save_location(data, users[3].id)\n\n data = {'lat': 27.903030, 'lon': -82.662390} # a little more than 100 meters\n views._save_location(data, users[4].id)\n\n ids_of_users = []\n # print \"original location id = \"+ str(original_location.id)\n self.assertTrue(Location.objects.get(id=original_location.id).users.count() == 4)\n # print \"number of users for original location = \"+ str(Location.objects.get(id=original_location.id).users.count())\n for user in Location.objects.get(id=original_location.id).users.all():\n ids_of_users.append(user.id)\n\n self.assertTrue(users[1].id in ids_of_users)\n self.assertTrue(users[2].id in ids_of_users)\n self.assertTrue(users[3].id in ids_of_users)\n\n self.assertTrue(users[4].id not in ids_of_users)\n\n # test duplicate users aren't added\n data = {'lat':27.901040, 'lon':-82.660318}\n views._save_location(data, users[1].id)\n data = {'lat':27.900767, 'lon':-82.659999}\n views._save_location(data, users[2].id)\n\n self.assertTrue(Location.objects.get(id=original_location.id).users.count() == 4)\n\n # similar location was submitted, should be withtin ~ 100 - 500 meters\n # so it should be too close to user[4]'s other location\n data = {'lat': 27.903030, 'lon': -82.662390} # a little more than 100 meters\n response = views._save_location(data, users[4].id)\n # self.assertTrue(response.status_code == 400)\n\n def _test_user_profile_stats(self):\n # user = User.objects.all().first()\n user = User.objects.all().order_by('?').first()\n\n request = self.client.get('/users/'+str(user.id)+'/profile_stats/?auth_token='+str(user.auth_token))\n\n # for i in range(0, 20):\n # user = User.objects.get(id=user.id)\n # self._test_location_guess({\"user\": user.id, \"location\": user.current_location, \"lat\": i, \"lon\": (i+2)}, user.auth_token)\n\n # request = self.client.get('/users/'+str(user.id)+'/profile_stats/?auth_token='+str(user.auth_token))\n\n # print \"user profile status request: \"\n # print request\n\n\ndef set_up_database():\n count = User.objects.aggregate(count=Count('id'))['count']\n while count < 5:\n user = User.objects.create(email='test'+str(count)+'@gmail.com')\n count += 1\n\n count = Location.objects.aggregate(count=Count('id'))['count']\n\n while count < 50:\n location = Location.objects.create(lat=count, lon=count)\n location.users.add(User.objects.order_by('?').first())\n count += 1\n","sub_path":"geo/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":11168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"484221301","text":"from django.shortcuts import redirect, render\nfrom django.http import HttpResponse\nfrom lists.models import Item, List\nfrom django.core.exceptions import ValidationError\n\ndef home_page(request):\n\treturn render(request, 'home.html')\n\ndef view_list(request, id_list):\n\tlist_ = List.objects.get(id=id_list)\n\terror = None\n\n\tif request.method == 'POST':\n\t\titem = Item.objects.create(text = request.POST['item_text'], list = list_)\n\t\ttry:\n\t\t\titem.full_clean()\n\t\t\titem.save()\n\t\t\treturn redirect('/lists/%d/' % (list_.id,))\n\t\texcept ValidationError:\n\t\t\terror = \"You can't have an empty list item\"\n\t\t\n\treturn render(request, 'list.html', {'list' : list_, 'error':error})\n\ndef new_list(request):\n\tlist_ = List.objects.create()\n\titem = Item.objects.create(text = request.POST['item_text'], list = list_)\n\ttry:\n\t\titem.full_clean()\n\t\titem.save()\n\texcept ValidationError:\n\t\tlist_.delete()\n\t\treturn render(request, 'home.html', {'error' : \"You can't have an empty list item\"})\n\treturn redirect('/lists/%d/' % (list_.id,))\n\ndef add_item(request, id_list):\n\tlist_ = List.objects.get(id=id_list)\n\tItem.objects.create(text=request.POST['item_text'], list=list_)\n\treturn redirect('/lists/%d/' % (list_.id,))","sub_path":"lists/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"502529668","text":"# based on https://www.kaggle.com/fareise/multi-head-self-attention-for-text-classification/comments\r\nimport math\r\nimport numpy as np\r\nfrom keras import backend as K\r\nfrom keras.engine.topology import Layer\r\nfrom keras.layers import Lambda, dot, concatenate,Dense, Input, LSTM, Embedding, Dropout, Activation, Conv1D, GRU, CuDNNGRU, CuDNNLSTM, BatchNormalization\r\nfrom keras.layers import Bidirectional, GlobalMaxPool1D, MaxPooling1D, Add, Flatten, TimeDistributed, Activation\r\nfrom keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D, concatenate, SpatialDropout1D\r\nfrom keras.models import Model, load_model\r\nfrom keras import initializers, regularizers, constraints, optimizers, layers, callbacks\r\nfrom keras import backend as K\r\nfrom keras.engine import InputSpec, Layer\r\nfrom keras.optimizers import Adam\r\nfrom keras.initializers import Ones, Zeros\r\nimport keras\r\nimport tensorflow as tf\r\n\r\n \r\n \r\nclass Position_Embedding(Layer):\r\n \r\n def __init__(self, size=None, mode='sum', **kwargs):\r\n self.size = size\r\n self.mode = mode\r\n super(Position_Embedding, self).__init__(**kwargs)\r\n \r\n def call(self, x):\r\n if (self.size == None) or (self.mode == 'sum'):\r\n self.size = int(x.shape[-1])\r\n batch_size,seq_len = K.shape(x)[0],K.shape(x)[1]\r\n position_j = 1. / K.pow(10000., \\\r\n 2 * K.arange(self.size / 2, dtype='float32' \\\r\n ) / self.size)\r\n position_j = K.expand_dims(position_j, 0)\r\n position_i = K.cumsum(K.ones_like(x[:,:,0]), 1)-1 \r\n position_i = K.expand_dims(position_i, 2)\r\n position_ij = K.dot(position_i, position_j)\r\n position_ij = K.concatenate([K.cos(position_ij), K.sin(position_ij)], 2)\r\n if self.mode == 'sum':\r\n return position_ij + x\r\n elif self.mode == 'concat':\r\n return K.concatenate([position_ij, x], 2)\r\n \r\n def compute_output_shape(self, input_shape):\r\n if self.mode == 'sum':\r\n return input_shape\r\n elif self.mode == 'concat':\r\n return (input_shape[0], input_shape[1], input_shape[2]+self.size)\r\n\r\ndef gelu(x):\r\n return 0.5 * x * (1 + K.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * K.pow(x, 3))))\r\n \r\n\r\n'''\r\noutput dimention: [batch_size, time_step, nb_head*size_per_head]\r\nevery word can be represented as a vector [nb_head*size_per_head]\r\n'''\r\nclass MultiHeadAttention(Layer):\r\n\r\n def __init__(self, nb_head, size_per_head, **kwargs):\r\n self.nb_head = nb_head\r\n self.size_per_head = size_per_head\r\n self.output_dim = nb_head*size_per_head\r\n super(MultiHeadAttention, self).__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n self.WQ = self.add_weight(name='WQ', \r\n shape=(input_shape[0][-1], self.output_dim),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n self.WK = self.add_weight(name='WK', \r\n shape=(input_shape[1][-1], self.output_dim),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n self.WV = self.add_weight(name='WV', \r\n shape=(input_shape[2][-1], self.output_dim),\r\n initializer='glorot_uniform',\r\n trainable=True)\r\n super(MultiHeadAttention, self).build(input_shape)\r\n \r\n def Mask(self, inputs, seq_len, mode='mul'):\r\n if seq_len == None:\r\n return inputs\r\n else:\r\n mask = K.one_hot(seq_len[:,0], K.shape(inputs)[1])\r\n mask = 1 - K.cumsum(mask, 1)\r\n for _ in range(len(inputs.shape)-2):\r\n mask = K.expand_dims(mask, 2)\r\n if mode == 'mul':\r\n return inputs * mask\r\n if mode == 'add':\r\n return inputs - (1 - mask) * 1e12\r\n \r\n def call(self, x, **kwargs):\r\n if len(x) == 3:\r\n Q_seq,K_seq,V_seq = x\r\n Q_len,V_len = None,None\r\n elif len(x) == 5:\r\n Q_seq,K_seq,V_seq,Q_len,V_len = x\r\n Q_seq = K.dot(Q_seq, self.WQ)\r\n Q_seq = K.reshape(Q_seq, (-1, K.shape(Q_seq)[1], self.nb_head, self.size_per_head))\r\n Q_seq = K.permute_dimensions(Q_seq, (0,2,1,3))\r\n K_seq = K.dot(K_seq, self.WK)\r\n K_seq = K.reshape(K_seq, (-1, K.shape(K_seq)[1], self.nb_head, self.size_per_head))\r\n K_seq = K.permute_dimensions(K_seq, (0,2,1,3))\r\n V_seq = K.dot(V_seq, self.WV)\r\n V_seq = K.reshape(V_seq, (-1, K.shape(V_seq)[1], self.nb_head, self.size_per_head))\r\n V_seq = K.permute_dimensions(V_seq, (0,2,1,3))\r\n A = K.batch_dot(Q_seq, K_seq, axes=[3,3]) / self.size_per_head**0.5\r\n A = K.permute_dimensions(A, (0,3,2,1))\r\n A = self.Mask(A, V_len, 'add')\r\n A = K.permute_dimensions(A, (0,3,2,1)) \r\n A = K.softmax(A)\r\n O_seq = K.batch_dot(A, V_seq, axes=[3,2])\r\n O_seq = K.permute_dimensions(O_seq, (0,2,1,3))\r\n O_seq = K.reshape(O_seq, (-1, K.shape(O_seq)[1], self.output_dim))\r\n O_seq = self.Mask(O_seq, Q_len, 'mul')\r\n return O_seq\r\n \r\n def compute_output_shape(self, input_shape):\r\n return (input_shape[0][0], input_shape[0][1], self.output_dim)\r\n \r\n def get_config(self):\r\n config = {\r\n 'nb_head': self.nb_head,\r\n 'size_per_head':self.size_per_head,\r\n \r\n }\r\n base_config = super().get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n \r\n\r\nclass LayerNormalization(Layer):\r\n def __init__(self, eps: float = 1e-5, **kwargs) -> None:\r\n self.eps = eps\r\n super().__init__(**kwargs)\r\n\r\n def build(self, input_shape):\r\n self.gamma = self.add_weight(name='gamma', shape=input_shape[-1:], initializer=Ones(), trainable=True)\r\n self.beta = self.add_weight(name='beta', shape=input_shape[-1:], initializer=Zeros(), trainable=True)\r\n super().build(input_shape)\r\n\r\n def call(self, x, **kwargs):\r\n u = K.mean(x, axis=-1, keepdims=True)\r\n s = K.mean(K.square(x - u), axis=-1, keepdims=True)\r\n z = (x - u) / K.sqrt(s + self.eps)\r\n return self.gamma * z + self.beta\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n def get_config(self):\r\n config = {\r\n 'eps': self.eps,\r\n }\r\n base_config = super().get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n \r\n \r\n# https://github.com/Separius/BERT-keras/blob/master/transformer/model.py\r\n\r\nclass Gelu(Layer):\r\n def __init__(self, accurate= False, **kwargs):\r\n super().__init__(**kwargs)\r\n self.accurate = accurate\r\n\r\n def call(self, inputs, **kwargs):\r\n if not self.accurate:\r\n return gelu(inputs)\r\n if K.backend() == 'tensorflow':\r\n erf = K.tf.erf\r\n else:\r\n erf = K.T.erf\r\n return inputs * 0.5 * (1.0 + erf(inputs / math.sqrt(2.0)))\r\n\r\n def compute_output_shape(self, input_shape):\r\n return input_shape\r\n\r\n def get_config(self):\r\n config = {\r\n 'accurate': self.accurate,\r\n }\r\n base_config = super().get_config()\r\n return dict(list(base_config.items()) + list(config.items()))\r\n \r\n \r\n \r\nclass PositionWiseFF:\r\n def __init__(self, n_state, d_hid, layer_id, accurate_gelu=True):\r\n self.c_fc = Conv1D(d_hid, 1)\r\n self.activation = Gelu(accurate=accurate_gelu)\r\n self.c_ffn_proj = Conv1D(n_state, 1)\r\n\r\n def __call__(self, x):\r\n output = self.activation(self.c_fc(x))\r\n return self.c_ffn_proj(output)\r\n\r\n\r\n\r\nclass TransformerEncoderLayer:\r\n def __init__(self, n_state, n_head, d_hid, attention_dropout, residual_dropout, **kwargs):\r\n self.attention = MultiHeadAttention(n_head, n_state)\r\n self.drop1 = Dropout(attention_dropout)\r\n self.add1 = Add()\r\n self.ln1 = LayerNormalization()\r\n self.ffn = PositionWiseFF(n_head*n_state, d_hid, True)\r\n self.drop2 = Dropout(residual_dropout)\r\n self.add2 = Add()\r\n self.ln2 = LayerNormalization()\r\n\r\n def __call__(self, x, **kwargs):\r\n a = self.attention([x,x,x]) #output: [batch_size, time_step, nb_head*size_per_head]\r\n n = self.ln1(self.add1([x, self.drop1(a)]))\r\n f = self.ffn(n)\r\n return self.ln2(self.add2([n, self.drop2(f)]))\r\n\r\n\r\n#x = Position_Embedding()(x)\r\n#x = class TransformerEncoderLayer(256, 2, 1024, 0.2, 0.2)(x)\r\n\r\n\r\ndef extract_axis_1(data, ind):\r\n \"\"\"\r\n Get specified elements along the first axis of tensor.\r\n :param data: Tensorflow tensor that will be subsetted.\r\n :param ind: Indices to take (one for each element along axis 0 of data).\r\n :return: Subsetted tensor.\r\n \"\"\"\r\n\r\n batch_range = tf.range(tf.shape(data)[0])\r\n indices = tf.stack([batch_range, ind], axis=1)\r\n res = tf.gather_nd(data, indices)\r\n\r\n return res\r\n\r\n\r\nclass EncoderDecoderLayer:\r\n def __init__(self, enc_units,dec_units, context_units):\r\n self.enc_lstm=keras.layers.CuDNNGRU(enc_units,\r\n return_sequences=True,\r\n name = 'lstm_encoder')\r\n self.dec_lstm = keras.layers.CuDNNGRU(dec_units, \r\n return_sequences=True, \r\n name = 'lstm_decoder')\r\n \r\n self.dense1 = TimeDistributed(Dense(context_units,\r\n activation=\"tanh\"))\r\n\r\n def __call__(self, x, **kwargs):\r\n x_enc, x_dec = x\r\n encoder = self.enc_lstm (x_enc)\r\n encoder_last = Lambda(lambda t: t[:,-1,:])(encoder)\r\n decoder= self.dec_lstm(x_dec,initial_state=[encoder_last])\r\n attention = dot([decoder, encoder], axes=[2, 2])\r\n attention = Activation('softmax')(attention)\r\n context = dot([attention, encoder], axes=[2,1])\r\n decoder_combined_context = concatenate([context, decoder])\r\n output = self.dense1(decoder_combined_context)\r\n return output \r\n\r\nclass EncoderDecoderMultiHeadAttentionLayer:\r\n def __init__(self, enc_units,dec_units, n_state, n_head, context_units):\r\n \r\n self.attention = MultiHeadAttention(n_head, n_state)\r\n \r\n self.enc_lstm=keras.layers.CuDNNGRU(enc_units,\r\n return_sequences=True,\r\n name = 'lstm_encoder')\r\n self.dec_lstm = keras.layers.CuDNNGRU(dec_units, \r\n return_sequences=True, \r\n name = 'lstm_decoder')\r\n \r\n self.dense1 = TimeDistributed(Dense(context_units,\r\n activation=\"tanh\"))\r\n\r\n def __call__(self, x, **kwargs):\r\n x_enc, x_dec = x\r\n encoder = self.enc_lstm (x_enc)\r\n encoder_last = Lambda(lambda t: t[:,-1,:])(encoder)\r\n decoder = self.dec_lstm(x_dec,initial_state=[encoder_last])\r\n context = self.attention ([ decoder, encoder, encoder])\r\n decoder_combined_context = concatenate([context, decoder])\r\n output = self.dense1(decoder_combined_context)\r\n return output \r\n \r\n\r\nclass DecoderMultiHeadAttentionLayer:\r\n def __init__(self, dec_units, n_state, n_head, context_units):\r\n \r\n self.attention = MultiHeadAttention(n_head, n_state)\r\n \r\n self.dec_lstm = keras.layers.CuDNNGRU(dec_units, \r\n return_sequences=True, \r\n name = 'lstm_decoder')\r\n \r\n self.dense1 = TimeDistributed(Dense(context_units,\r\n activation=\"tanh\"))\r\n\r\n def __call__(self, x, **kwargs):\r\n encoder, x_dec = x\r\n encoder_last = Lambda(lambda t: t[:,-1,:])(encoder)\r\n decoder = self.dec_lstm(x_dec,initial_state=[encoder_last])\r\n context = self.attention ([ decoder, encoder, encoder])\r\n decoder_combined_context = concatenate([context, decoder])\r\n output = self.dense1(decoder_combined_context)\r\n return output \r\n\r\n \r\n#https://stackoverflow.com/questions/58372387/scheduled-sampling-in-keras","sub_path":"librispeech_work/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":12464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"65591448","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@date: 2019-07-30\n@author: Shell.Xu\n@copyright: 2019, Shell.Xu \n@license: BSD-3-clause\n'''\nfrom __future__ import absolute_import, division,\\\n print_function, unicode_literals\nimport os\nimport json\nimport shutil\nimport argparse\nimport datetime\nimport tempfile\nimport subprocess\nimport configparser\nfrom os import path\n\nimport requests\n\n\nUSERNAME = ''\nTOKEN = ''\nDESTDIR = ''\n\n\ndef jsondump(filename, obj):\n with open(path.join(DESTDIR, filename), 'w') as fo:\n json.dump(obj, fo)\n\n\ndef grab_repo(repo):\n name = repo['name']\n print('grab %s' % name)\n issues_url = repo['issues_url'][:-9]\n resp = requests.get(issues_url, headers=HEADERS)\n jsondump(name+'_issues.json', resp.json())\n subprocess.run(['git', 'clone', '--mirror',\n repo['git_url'], path.join(DESTDIR, name)])\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-c', '--config')\n parser.add_argument('-u', '--username')\n parser.add_argument('dest', nargs='?', help='destdir')\n args = parser.parse_args()\n\n with open(args.config or path.expanduser('~/.github-backup.json'),\n 'r') as fi:\n cfg = json.load(fi)\n\n global USERNAME\n global TOKEN\n global HEADERS\n global DESTDIR\n USERNAME = args.username or cfg.get('username', None)\n TOKEN = cfg['token']\n HEADERS = {'Authorization': 'token '+TOKEN}\n DESTDIR = args.dest or cfg.get('dest')\n now = datetime.datetime.now()\n DESTDIR = path.abspath(path.expanduser(now.strftime(DESTDIR)))\n\n if cfg.get('zip', True):\n DESTZIP = DESTDIR\n tmpdir = tempfile.TemporaryDirectory()\n DESTDIR = tmpdir.name\n\n resp = requests.get('https://api.github.com/users/%s/repos' % USERNAME,\n headers=HEADERS)\n jsondump(USERNAME+'_repos.json', resp.json())\n for repo in resp.json():\n grab_repo(repo)\n\n if cfg.get('zip', True):\n CURDIR = os.getcwd()\n os.chdir(DESTDIR)\n subprocess.run(['7z', 'a', DESTZIP, \"*\"])\n os.chdir(CURDIR)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"github-backup.py","file_name":"github-backup.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"348603180","text":"# We have a list of points on the plane. Find the K closest points to the origin (0, 0).\n# (Here, the distance between two points on a plane is the Euclidean distance.)\n# You may return the answer in any order. The answer is guaranteed to be unique \n# (except for the order that it is in.)\n\n# Input: points = [[1,3],[-2,2]], K = 1, Output: [[-2,2]]\nimport math\ndef kClosest(points,K):\n d=[]\n for i in range(0,len(points)):\n d.append((dist(points[i]),i))\n\n d=sorted(d)\n print(d)\n a=[]\n for i in d:\n a.append(points[i[1]])\n K-=1\n if K==0:\n break\n return a\n\ndef dist(arr):\n return math.sqrt(pow(arr[0],2)+pow(arr[1],2))\n\n\nprint(kClosest([[1,3],[-2,2]], 1))\n# class Solution(object):\n# def kClosest(self, points, K):\n# points.sort(key = lambda P: P[0]**2 + P[1]**2)\n# return points[:K]\n\n# public int dist(int[] point) {\n# return point[0] * point[0] + point[1] * point[1];\n# }","sub_path":"leetcode/973.k_closest_origin.py","file_name":"973.k_closest_origin.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"365160116","text":"from unittest.mock import Mock, patch\nfrom flask_testing import TestCase\nfrom flask import url_for\nfrom maintain_frontend import main\nfrom maintain_frontend.constants.permissions import Permissions\nfrom maintain_frontend.dependencies.session_api.session import Session\nfrom unit_tests.utilities import Utilities\n\ntest_json_result = [{'item': {\n 'amount-originally-secured': 'an amount',\n 'charge-creation-date': '2011-01-01',\n 'charge-geographic-description': 'a description',\n 'charge-type': 'a charge type',\n 'end-date': '2011-01-01',\n 'expiry-date': '2011-01-01',\n 'further-information-location': 'a location',\n 'further-information-reference': 'a reference',\n 'instrument': 'An instrument',\n 'land-capacity-description': 'a description',\n 'land-compensation-paid': 'compensation',\n 'land-works-particulars': 'particulars',\n 'local-land-charge': 1,\n 'migrating-authority': 'another authority',\n 'migration-supplier': 'a supplier',\n 'old-register-part': '1a',\n 'originating-authority': 'an authority',\n 'rate-of-interest': 'a rate',\n 'registration-date': '2011-01-01',\n 'start-date': '2011-01-01',\n 'statutory-provision': 'a provision',\n 'originating-authority-charge-identifier': 'an identifier',\n 'unique-property-reference-numbers': [123, 456]\n}}]\n\n\nclass TestSearchByReference(TestCase):\n SEARCH_PATH = 'maintain_frontend.search.search_by_reference'\n\n def create_app(self):\n main.app.testing = True\n Utilities.mock_session_cookie_flask_test(self)\n return main.app\n\n def setUp(self):\n main.app.config['Testing'] = True\n main.app.config['WTF_CSRF_ENABLED'] = False\n\n def test_get_search_by_reference_success(self):\n self.client.set_cookie('localhost', Session.session_cookie_name,\n 'cookie_value')\n\n self.mock_session.return_value.user.permissions = [Permissions.browse_llc]\n self.mock_session.return_value.commit = Mock()\n\n response = self.client.get(url_for('search.get_search_by_reference'))\n\n self.assert200(response)\n self.assert_template_used('search-by-reference.html')\n\n @patch(\"{}.ReferenceValidator\".format(SEARCH_PATH))\n @patch(\"{}.SearchByReference\".format(SEARCH_PATH))\n def test_post_search_by_reference_trailing_whitespace_success(\n self,\n mock_search_by_reference,\n mock_reference_validator\n ):\n self.client.set_cookie('localhost', Session.session_cookie_name,\n 'cookie_value')\n\n self.mock_session.return_value.user.permissions = [Permissions.browse_llc]\n self.mock_session.return_value.commit = Mock()\n\n mock_validation_return_object = Mock()\n mock_validation_return_object.errors = None\n mock_reference_validator.validate.return_value = mock_validation_return_object\n\n mock_response = {'status_code': 200, 'results': test_json_result}\n\n mock_search_by_reference.return_value.process.return_value = mock_response\n\n # Test that trailing whitespace is stripped from the reference\n response = self.client.post(\n url_for('search.post_search_by_reference'),\n data={'search-reference': ' LLC-1 '}\n )\n\n mock_search_by_reference.return_value.process.assert_called_with('LLC-1')\n self.assertRedirects(response, url_for('view_land_charge.view_land_charge', local_land_charge='LLC-1'))\n","sub_path":"unit_tests/search/test_search_by_reference.py","file_name":"test_search_by_reference.py","file_ext":"py","file_size_in_byte":3460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"67176825","text":"import datetime\nimport tensorflow as tf\nfrom tensorflow.models.rnn import rnn, rnn_cell\nimport numpy as np\nfrom word_dataset import WordDataSet,WordDataItem\nimport dirs\nimport random\nimport os\n\n# Read data set\nfixed_timestep_count = 50\ndataset = WordDataSet(dirs.KNMP_PROCESSED_WORD_BOXES_DIR_PATH)\n\nprint(\"Total items:\",dataset.get_total_item_count())\nprint(\"Training items:\",dataset.get_train_item_count())\nprint(\"Test items:\",dataset.get_test_item_count())\nprint(\"Max label length:\",dataset.get_max_label_length())\n\n# Parameters\nlearning_rate = 0.0005\nprint(\"Learning rate:\",learning_rate)\nn_batch_size = 256\nprint(\"Batch size:\",n_batch_size)\ndropout_input_keep_prob_value = 0.5\nprint('Dropout input keep probability:',dropout_input_keep_prob_value)\ndropout_output_keep_prob_value = 0.5\nprint('Dropout output keep probability:',dropout_output_keep_prob_value)\nn_features = dataset.get_feature_count() # Features = image height\nprint(\"Features:\",n_features)\nn_steps = fixed_timestep_count # Timesteps = image width\nprint(\"Time steps:\",n_steps)\nn_cells = 2 # Number of cells/layers\nprint(\"Cells:\", n_cells)\nn_hidden = 128 # hidden layer num of features\nprint(\"Hidden units:\",n_hidden)\nn_classes = len(dataset.get_unique_chars()) # Classes (A,a,B,b,c,...)\nprint(\"Classes:\",n_classes)\ndisplay_time_interval_sec = 60\n\n# Saved models\nmodel_dir_path = dirs.KNMP_MODEL_DIR_PATH\nlast_model_file_path = os.path.join(model_dir_path,\"last.model\")\nmax_acc_model_file_path = os.path.join(model_dir_path,\"max_acc.model\")\nif not os.path.exists(model_dir_path):\n os.makedirs(model_dir_path)\n\n# Placeholders\ndefault_dropout_prob = tf.constant(1,\"float\")\ndropout_input_keep_prob = tf.placeholder_with_default(default_dropout_prob,[])\ndropout_output_keep_prob = tf.placeholder_with_default(default_dropout_prob,[])\nx = tf.placeholder(\"float\", [None, n_steps, n_features]) # (n_batch_size, n_steps, n_features)\ny = tf.placeholder(\"float\", [None, n_classes])\nbatch_size = tf.shape(x)[0]\n\n# Weights\nw_hidden = tf.Variable(tf.random_normal([n_features, n_hidden]))\nb_hidden = tf.Variable(tf.random_normal([n_hidden]))\nw_out = tf.Variable(tf.random_normal([2*n_hidden, n_classes]))\nb_out = tf.Variable(tf.random_normal([n_classes]))\n\n# Transform input data for RNN (mystical part)\nx1 = tf.transpose(x, [1, 0, 2]) # (n_steps,n_batch_size,n_features)\nx2 = tf.reshape(x1, [-1, n_features]) # (n_steps*n_batch_size, n_features) (2D list with 28*256 vectors with 28 features each)\nx_hidden = tf.matmul(x2, w_hidden) + b_hidden # (n_steps*n_batch_size=28*256,n_hidden=128)\nrnn_inputs = tf.split(0, n_steps, x_hidden) # [(n_batch_size, n_features),(n_batch_size, n_features),...,(n_batch_size, n_features)]\n\n# RNN\nlstm_fw_cell = rnn_cell.LSTMCell(n_hidden)\nlstm_fw_cell_dropout = rnn_cell.DropoutWrapper(lstm_fw_cell, input_keep_prob=dropout_input_keep_prob, output_keep_prob=dropout_output_keep_prob)\nmulti_lstm_fw_cell = lstm_fw_cell_dropout\nif n_cells > 1:\n multi_lstm_fw_cell = rnn_cell.MultiRNNCell([lstm_fw_cell_dropout] * n_cells)\n\nlstm_bw_cell = rnn_cell.LSTMCell(n_hidden)\nlstm_bw_cell_dropout = rnn_cell.DropoutWrapper(lstm_bw_cell, input_keep_prob=dropout_input_keep_prob, output_keep_prob=dropout_output_keep_prob)\nmulti_lstm_bw_cell = lstm_bw_cell_dropout\nif n_cells > 1:\n multi_lstm_bw_cell = rnn_cell.MultiRNNCell([lstm_bw_cell_dropout] * n_cells)\n\ninitial_state_fw = multi_lstm_fw_cell.zero_state(batch_size, tf.float32)\ninitial_state_bw = multi_lstm_bw_cell.zero_state(batch_size, tf.float32)\n\nrnn_outputs, rnn_output_state_fw, rnn_output_state_bw = rnn.bidirectional_rnn(multi_lstm_fw_cell, multi_lstm_bw_cell, rnn_inputs,\n initial_state_fw=initial_state_fw,\n initial_state_bw=initial_state_bw)\n\nrnn_output = rnn_outputs[-1]\ny_pred = tf.matmul(rnn_output, w_out) + b_out\n\n# Optimization\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_pred, y)) # Softmax loss\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer\n\ncorrect_pred = tf.equal(tf.argmax(y_pred,1), tf.argmax(y,1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n# Initializing the variables\ninit = tf.initialize_all_variables()\n\n# EXECUTION\n\n# Launch the graph\nwith tf.Session() as sess:\n sess.run(init)\n\n # Restore model, if necessary\n restore_saver = tf.train.Saver()\n restore_saver.restore(sess, max_acc_model_file_path)\n\n step = 1\n prev_output_time = datetime.datetime.now()\n best_test_acc = 0\n batch_losses = []\n\n while True:\n # Training\n dataset.prepare_next_train_batch(n_batch_size)\n batch_xs = dataset.get_train_batch_data(time_step_count=fixed_timestep_count) # (batch_size,n_steps,n_input)\n batch_ys = dataset.get_train_batch_first_char_one_hot_labels() # (batch_size,n_classes)\n\n sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys,\n dropout_input_keep_prob: dropout_input_keep_prob_value,\n dropout_output_keep_prob: dropout_output_keep_prob_value})\n\n from_prev_output_time = datetime.datetime.now() - prev_output_time\n if step == 1 or from_prev_output_time.seconds > display_time_interval_sec:\n # Calculate training batch accuracy\n batch_acc = sess.run(accuracy, feed_dict={x: batch_xs, y: batch_ys})\n # Calculate training batch loss\n batch_loss = sess.run(cost, feed_dict={x: batch_xs, y: batch_ys})\n batch_losses.append(batch_loss)\n avg_count = 10\n last_batch_losses = batch_losses[-min(avg_count, len(batch_losses)):]\n average_batch_loss = sum(last_batch_losses) / len(last_batch_losses)\n\n # Calculate test accuracy\n test_xs = dataset.get_test_data(time_step_count=fixed_timestep_count)\n test_ys = dataset.get_test_first_char_one_hot_labels()\n\n test_acc = sess.run(accuracy, feed_dict={x: test_xs, y: test_ys})\n\n print (\"Iteration \" + str(step*n_batch_size) + \", Minibatch Loss = \" + \"{:.5f}\".format(batch_loss) + \\\n \" [{:.5f}]\".format(average_batch_loss) + \\\n \", Training Accuracy = \" + \"{:.4f}\".format(batch_acc) + \\\n \", Test Accuracy = \" + \"{:.4f}\".format(test_acc),\n \"*\" if test_acc > best_test_acc else \"\")\n\n saver = tf.train.Saver()\n saver.save(sess, last_model_file_path)\n\n if (test_acc > best_test_acc):\n best_test_acc = test_acc\n saver.save(sess, max_acc_model_file_path)\n\n prev_output_time = datetime.datetime.now()\n\n step += 1\n","sub_path":"other_nets/train_word_rnn2.py","file_name":"train_word_rnn2.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"641181916","text":"import abc\nimport threading\n\nfrom .abstract import TopKQueryResult\nfrom .types import Status\n\n\nclass AbstractFuture:\n @abc.abstractmethod\n def result(self, **kwargs):\n \"\"\"Return deserialized result.\n\n It's a synchronous interface. It will wait executing until\n server respond or timeout occur(if specified).\n\n This API is thread-safe.\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def cancel(self):\n \"\"\"Cancle gRPC future.\n\n This API is thread-safe.\n \"\"\"\n raise NotImplementedError()\n\n @abc.abstractmethod\n def done(self):\n \"\"\"Wait for request done.\n\n This API is thread-safe.\n \"\"\"\n raise NotImplementedError()\n\n\nclass Future(AbstractFuture):\n def __init__(self, future, done_callback=None):\n self._future = future\n self._done_cb = done_callback\n self._condition = threading.Condition()\n self._canceled = False\n self._done = False\n self._response = None\n self._results = None\n self._exception = None\n\n self.__init()\n\n def __del__(self):\n self._future = None\n\n @abc.abstractmethod\n def on_response(self, response):\n ''' Parse response from gRPC server and return results.\n '''\n raise NotImplementedError()\n\n def __init(self):\n \"\"\" Register request done callback of gRPC future\n Callback function can be executed in individual subthread of gRPC, so\n there need to notify main thread when callback function finished.\n \"\"\"\n\n def async_done_callback(future):\n with self._condition:\n # delete gRCP future manually\n # self._future.__del__()\n # self._future = None\n\n # If user specify done callback function, execute it.\n try:\n self._response = future.result()\n self._results = self.on_response(self._response)\n if self._done_cb:\n if isinstance(self._results, tuple):\n self._done_cb(*self._results)\n else:\n self._done_cb(self._results)\n except Exception as e:\n self._exception = e\n finally:\n self._done = True\n self._condition.notify_all()\n\n self._future.add_done_callback(async_done_callback)\n\n def result(self, **kwargs):\n self.exception()\n with self._condition:\n # future not finished. wait callback being called.\n to = kwargs.get(\"timeout\", None)\n if not self._future.done() or not self._response:\n self._response = self._future.result(timeout=to)\n # if not self._done and not self._canceled:\n # to = kwargs.get(\"timeout\", None)\n # self._condition.wait(to)\n #\n # if not self._done and not self._canceled:\n # self._condition.notify_all()\n # raise FutureTimeoutError(\"Wait timeout\")\n\n self._condition.notify_all()\n\n if kwargs.get(\"raw\", False) is True:\n # just return response object received from gRPC\n return self._response\n\n if self._results:\n return self._results\n\n return self.on_response(self._response)\n\n def cancel(self):\n with self._condition:\n self._future.cancel()\n # if not self._canceled or self._done:\n # self._future.cancel()\n # self._canceled = True\n self._condition.notify_all()\n\n def is_done(self):\n return self._done\n\n def done(self):\n # self.exception()\n with self._condition:\n if self._future and not self._future.done():\n try:\n self._future.result()\n except Exception as e:\n self._exception = e\n\n self._condition.notify_all()\n\n def exception(self):\n if self._exception:\n raise self._exception\n\n\nclass SearchFuture(Future):\n\n def on_response(self, response):\n if response.status.error_code == 0:\n return Status(message='Search successfully!'), TopKQueryResult(response)\n\n return Status(code=response.status.error_code, message=response.status.reason), None\n\n\nclass InsertFuture(Future):\n def on_response(self, response):\n status = response.status\n if status.error_code == 0:\n return Status(message='Add vectors successfully!'), list(response.vector_id_array)\n\n return Status(code=status.error_code, message=status.reason), []\n\n\nclass CreateIndexFuture(Future):\n def on_response(self, response):\n return Status(code=response.error_code, message=response.reason)\n\n\nclass CompactFuture(Future):\n def on_response(self, response):\n return Status(code=response.error_code, message=response.reason)\n\n\nclass FlushFuture(Future):\n def on_response(self, response):\n return Status(code=response.error_code, message=response.reason)\n","sub_path":"milvus/client/asynch.py","file_name":"asynch.py","file_ext":"py","file_size_in_byte":5180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"520420819","text":"#!/opt/app-root/bin/python\n\n\"\"\"\n--------------------------p3_identity_mgmt_tc_1.py-----------------------\nDescription: This python script is to validate the negative test cases\n for P3 identity management. This script validates if the\n tenant are allowed to:\n a. create users\n b. create roles\n c. create domain\n d. change domain\n in the P3 platform.\n\nUsage:\npython p3_identity_mgmt_tc_1.py -u -t [-i -s ]\n\nAuthor: Devaraj Acharya ; January 9th, 2019\n\nCopyright (c) 2019 Cisco Systems.\nAll rights reserved.\n-------------------------------------------------------------------------\n\"\"\"\n\nimport argparse\nimport csv\nimport datetime\nimport json\nimport openstack\nimport os\nimport re\nimport sys\nimport time\n\nfrom os import environ as env\nsys.path.append(os.environ[\"CLONED_REPO_DIR\"] + \"/library\")\nimport common_lib\nimport general_util\nimport p3_lib\n\nfilename = os.path.abspath(__file__).split(\"/\")[-1].split(\".py\")[0]\ntc = filename.replace(\"_\", \"-\").upper()\nseq_nums_list = list()\nparams_list = list()\n\n\ndef create_new_role(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid):\n \"\"\"\n This method is to validate that tenant are not allowed to create the new role in\n P3 platform.\n :param conn: connection handle to OpenStack project\n :param project_name: project name\n :param seq_nums_list: empty list\n :param params_list: empty list\n :param scan_id: ScanID received from AWS SQS\n :param team_id: TeamID\n :param session: session handle to Kinesis\n :return: Compliant | Non-Compliant | None\n \"\"\"\n try:\n role = []\n for new_role in conn.identity.create_role():\n role = new_role\n print(\"ERROR: Tenant are being allowed to create a role\")\n compliance_status = \"Non-compliant\"\n return compliance_status\n\n except openstack.exceptions.HttpException as exp_err:\n print(\"INFO: Error Received while attempting to create role - %s\" % str(exp_err))\n if str(exp_err).find(\"You are not authorized\"):\n print(\"INFO: Tenant are not allowed to create a role\")\n compliance_status = \"Compliant\"\n else:\n compliance_status = \"Non-compliant\"\n\n resource = project_name + \"-\" + \"create_role\"\n if scanid_valid and teamid_valid:\n if general_util.params_list_update(scan_id, tc, team_id, resource, compliance_status, params_list):\n print(\"INFO: Updating params_list\")\n else:\n print(\"ERROR: Issue observed while updating params_list\")\n return None\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid,\"\n \" hence ignoring Kinesis part\")\n\n return compliance_status\n\n except Exception as e:\n print(\"ERROR: Issue observed while calling create_role() API - %s\" % str(e))\n return None\n\n\ndef create_new_user(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid):\n \"\"\"\n This method is to validate that tenant are not allowed to create the new user\n in P3 platform.\n :param conn: connection handle to OpenStack project\n :param project_name: project name\n :param seq_nums_list: empty list\n :param params_list: empty list\n :param scan_id: ScanID received from AWS SQS\n :param team_id: TeamID\n :return: Compliant | Non-Compliant | None\n \"\"\"\n try:\n user = []\n audit_time = int(time.time()) * 1000\n for new_user in conn.identity.create_user():\n user = new_user\n print(\"ERROR: Tenant are being allowed to create a user\")\n compliance_status = \"Non-compliant\"\n return compliance_status\n except openstack.exceptions.HttpException as exp_err:\n print(\"INFO: Error Received while attempting to create user - %s\" % str(exp_err))\n if str(exp_err).find(\"You are not authorized\"):\n print(\"INFO: Tenant are not allowed to create a user\")\n compliance_status = \"Compliant\"\n else:\n compliance_status = \"Non-compliant\"\n\n resource = project_name + \"-\" + \"create_user\"\n if scanid_valid and teamid_valid:\n if general_util.params_list_update(scan_id, tc, team_id, resource, compliance_status, params_list):\n print(\"INFO: Updating params_list\")\n else:\n print(\"ERROR: Issue observed while updating params_list\")\n return None\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid, hence ignoring Kinesis part\")\n\n return compliance_status\n except Exception as e:\n print(\"ERROR: Issue observed while calling create_user() API - %s\" % str(e))\n return None\n\n\ndef create_domain(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid):\n \"\"\"\n This method is to validate that tenant are not allowed to create the new domain\n in P3 platform.\n :param conn: connection handle to OpenStack project\n :param project_name: project name\n :param seq_nums_list: empty list\n :param params_list: empty list\n :param scan_id: ScanID received from AWS SQS\n :param team_id: TeamID\n :return: Compliant | Non-Compliant | None\n \"\"\"\n try:\n domain = []\n audit_time = int(time.time()) * 1000\n for create_domain in conn.identity.create_domain():\n new_domain = create_domain\n print(\"ERROR: Tenant are being allowed to create a domain\")\n compliance_status = \"Non-compliant\"\n return compliance_status\n\n except openstack.exceptions.HttpException as exp_err:\n print(\"INFO: Error Received while attempting to create domain - %s\" % str(exp_err))\n if str(exp_err).find(\"You are not authorized\"):\n print(\"INFO: Tenant are not allowed to create a domain\")\n compliance_status = \"Compliant\"\n else:\n compliance_status = \"Non-compliant\"\n\n resource = project_name + \"-\" + \"create_domain\"\n if scanid_valid and teamid_valid:\n if general_util.params_list_update(scan_id, tc, team_id, resource, compliance_status, params_list):\n print(\"INFO: Updating params_list\")\n else:\n print(\"ERROR: Issue observed while updating params_list\")\n return None\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid, hence ignoring Kinesis part\")\n\n return compliance_status\n\n except Exception as e:\n print(\"ERROR: Issue observed while calling create domain() API - %s\" % str(e))\n return None\n\n\ndef list_domain(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid):\n \"\"\"\n This method is to validate that tenant are not allowed to list/read the domains\n in P3 platform.\n :param conn: connection handle to OpenStack project\n :param project_name: project name\n :param seq_nums_list: empty list\n :param params_list: empty list\n :param scan_id: ScanID received from AWS SQS\n :param team_id: TeamID\n :return: Compliant | Non-Compliant | None\n \"\"\"\n try:\n domain_list = []\n audit_time = int(time.time()) * 1000\n for list_domain in conn.identity.domains():\n domain = json.dumps(list_domain)\n print(\"ERROR: Tenant are being allowed to list domain\")\n compliance_status = \"Non-compliant\"\n return compliance_status\n except openstack.exceptions.HttpException as exp_err:\n print(\"INFO: Error Received while attempting to list domains - %s\" % str(exp_err))\n if str(exp_err).find(\"You are not authorized\"):\n print(\"INFO: Tenant are not allowed to list domains\")\n compliance_status = \"Compliant\"\n else:\n compliance_status = \"Non-compliant\"\n\n resource = project_name + \"-\" + \"list_domain\"\n if scanid_valid and teamid_valid:\n if general_util.params_list_update(scan_id, tc, team_id, resource, compliance_status, params_list):\n print(\"INFO: Updating params_list\")\n else:\n print(\"ERROR: Issue observed while updating params_list\")\n return None\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid, hence ignoring Kinesis part\")\n\n return compliance_status\n except Exception as e:\n print(\"ERROR: Issue observed while calling listing domains() API - %s\" % str(e))\n return None\n\n\ndef change_domain(conn, domain_name, project_name, scan_id, team_id, scanid_valid, teamid_valid):\n \"\"\"\n This method is to validate that tenant are not allowed to change the domain\n in P3 platform.\n :param conn: Connection handle to OpenStack project\n :param domain_name: current domain name\n :param project_name: Project Name\n :param seq_nums_list: empty list\n :param params_list: empty list\n :param scan_id: ScanID received from AWS SQS\n :param team_id: TeamID\n :return: Compliant | Non-Compliant | None\n \"\"\"\n try:\n domain_change = []\n audit_time = int(time.time()) * 1000\n for change_domain in conn.identity.update_domain(domain_name, name=\"admin\", description=None, enabled=None):\n new_domain = change_domain\n print(\"ERROR: Tenant are being allowed to change a domain\")\n compliance_status = \"Non-compliant\"\n return compliance_status\n except openstack.exceptions.ResourceNotFound as exp_err:\n print(\"INFO: Error Received while attempting to change domain - %s\" % str(exp_err))\n if str(exp_err).find(\"Could not find domain\"):\n print(\"INFO: Tenant are not allowed to change a domain\")\n compliance_status = \"Compliant\"\n else:\n compliance_status = \"Non-compliant\"\n\n resource = project_name + \"-\" + \"change_domain\"\n if scanid_valid and teamid_valid:\n if general_util.params_list_update(scan_id, tc, team_id, resource, compliance_status, params_list):\n print(\"INFO: Updating params_list\")\n else:\n print(\"ERROR: Issue observed while updating params_list\")\n return None\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid, hence ignoring Kinesis part\")\n\n return compliance_status\n\n except Exception as e:\n print(\"ERROR: Issue observed while calling create domain() API - %s\" % str(e))\n return None\n\n\ndef compliant_status_of_tenant(project_name, role, user, domain, domain_list, domain_change, team_id):\n \"\"\"\n This method is to summarize compliant status of P3 identity management.\n :param project_name:\n :param role:\n :param user:\n :param domain_change:\n :param team_id:\n :return:\n \"\"\"\n try:\n compliant_status =[]\n for compliance_status in role:\n if compliance_status in role == 'Compliant':\n create_role = \"Not Allowed\"\n else:\n create_role = \"Allowed\"\n for compliance_status in user:\n if compliance_status in user == 'Compliant':\n create_user = \"Not Allowed\"\n else:\n create_user = \"Allowed\"\n for compliance_status in domain:\n if compliance_status in domain == 'Compliant':\n domain_create = \"Not Allowed\"\n else:\n domain_create = \"Allowed\"\n for compliance_status in domain_list:\n if compliance_status in domain_list == 'Compliant':\n domain_lists = \"Not Allowed\"\n else:\n domain_lists = \"Allowed\"\n for compliance_status in domain_change:\n if compliance_status in domain_change == 'Compliant':\n change_domain = \"Not Allowed\"\n else:\n change_domain = \"Allowed\"\n\n if role == user == domain == domain_list == domain_change == \"Compliant\":\n compliance_status = \"Compliant\"\n else:\n compliance_status = \"Non-compliant\"\n\n compliant_status.append([\n team_id,\n project_name,\n create_role,\n create_user,\n domain_create,\n domain_lists,\n change_domain,\n compliance_status\n ])\n\n headers = [\"Tenant Id\", \"Tenant Name\", \"Create Role\", \"Create User\", \"Create Domain\", \"List Domain\", \"Change Domain\", \"Compliance Status\"]\n date_stamp = datetime.datetime.now().strftime('%m%d%y')\n csv_filename = os.path.expanduser(\"~\") + \"/logs/p3_identity_mgmt_tc_1_\" + date_stamp + \".csv\"\n with open(csv_filename, 'a') as f:\n file_is_empty = os.stat(csv_filename).st_size == 0\n writer = csv.writer(f, lineterminator='\\n')\n if file_is_empty:\n writer.writerow(headers)\n writer.writerows(compliant_status)\n f.close()\n except Exception as e:\n print(\"ERROR: Issue observed while retrieving compliance status() API - %s\" % str(e))\n if str(e):\n headers = [\"Tenant Id\", \"Tenant Name\", \"Create Role\", \"Create User\", \"Create Domain\", \"List Domain\", \"Change Domain\", \"Compliance Status\"]\n Exception_list = [team_id, project_name, \"\", \"\", \"\", \"\", \"\", \"\"]\n date_stamp = datetime.datetime.now().strftime('%m%d%y')\n csv_filename = os.path.expanduser(\"~\") + \"/logs/p3_identity_mgmt_tc_1_\" + date_stamp + \".csv\"\n with open(csv_filename, 'a') as f:\n file_is_empty = os.stat(csv_filename).st_size == 0\n writer = csv.writer(f, lineterminator='\\n')\n if file_is_empty:\n writer.writerow(headers)\n writer.writerows([Exception_list])\n\n\ndef main(os_auth_url, project_name, scan_id, team_id):\n \"\"\"\n This main method is to validate the tenant are not allowed to create the users, roles and\n the domain in the P3 platform.\n :param os_auth_url: OpenStack's Horizon URL\n :param project_name: Project name\n :param scan_id: Scan ID received from AWS, required for Kinesis update\n :param team_id: required during Kinesis Update\n :return: Compliant or Non-compliant\n \"\"\"\n summary_report = {\n \"No_of_Compliant_Tenant(s)\": 0,\n \"No_of_Non_compliant_Tenant(s)\": 0\n }\n try:\n scanid_valid = False\n teamid_valid = False\n if scan_id and team_id is not None:\n scanid_valid = common_lib.scanid_validation(scan_id)\n teamid_valid = p3_lib.p3_teamid_validation(team_id)\n else:\n print(\"INFO: Valid ScanId or TeamId not found\")\n print(\"INFO: Execution will proceed without Kinesis update\")\n\n domain_name = env[\"OS_PROJECT_DOMAIN_NAME\"]\n region = os_auth_url.split(\".\")[0].split(\"//\")[1]\n conn = p3_lib.connect(os_auth_url, project_name, region)\n\n except Exception as e:\n print(\"ERROR: Connection failed with error => %s\" % str(e))\n return None, summary_report\n\n session = general_util.session_handle()\n if session:\n if scanid_valid and teamid_valid:\n print(\"INFO: Update the scan record with \\\"InProgress\\\" Status\")\n update = general_util.updateScanRecord(session, \"P3\", scan_id, team_id, tc, \"InProgress\")\n if update is None:\n raise Exception(\"ERROR: Issue observed with UpdateScanRecord API call for \\\"InProgress\\\" status\")\n return None, summary_report\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid, hence ignoring Kinesis part\")\n\n try:\n role = create_new_role(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid)\n user = create_new_user(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid)\n domain = create_domain(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid)\n domain_list = list_domain(conn, project_name, scan_id, team_id, scanid_valid, teamid_valid)\n domain_change = change_domain(conn, domain_name, project_name, scan_id, team_id, scanid_valid, teamid_valid)\n compliant_status_of_tenant(project_name, role, user, domain, domain_list, domain_change, team_id)\n\n list_of_return_vals = [role, user, domain, domain_list, domain_change]\n if any(val == \"Non-compliant\" for val in list_of_return_vals):\n print(\"INFO: One of the test is Non-compliant\")\n compliance_status = \"Non-compliant\"\n summary_report[\"No_of_Non_compliant_Tenant(s)\"] = 1\n\n elif any(val is None for val in list_of_return_vals):\n print(\"INFO: One of the test returned None\")\n compliance_status = \"None\"\n else:\n print(\"INFO: All checks are Compliant\")\n compliance_status = \"Compliant\"\n summary_report[\"No_of_Compliant_Tenant(s)\"] = 1\n\n except Exception as e:\n print(\"ERROR: Issue observed during execution - %s\" % str(e))\n if scanid_valid and teamid_valid:\n print(\"INFO: Update the scan record with \\\"Failed\\\" Status\")\n update = general_util.updateScanRecord(session, \"P3\", scan_id, team_id, tc, \"Failed\")\n if update is None:\n raise Exception(\"ERROR: Issue observed with UpdateScanRecord API call for \\\"Failed\\\" status\")\n return None, summary_report\n\n return None, summary_report\n else:\n raise Exception(\"ERROR: Failed to get the connection handle\")\n return None, summary_report\n\n if scanid_valid and teamid_valid:\n print(\"INFO: Adding result to Stream\")\n stream_info = general_util.add_result_to_stream(session, \"P3\", str(team_id), tc, params_list)\n if stream_info is None:\n raise Exception(\"ERROR: Issue observed while calling add_result_to_stream() API\")\n return None, summary_report\n seq_nums_list.append(stream_info)\n\n print(\"INFO: Sending result complete\")\n send_result = general_util.send_result_complete(session, \"P3\", scan_id, team_id, tc, seq_nums_list)\n if send_result:\n print(\"INFO: Successfully submitted the result to Kinesis\")\n else:\n raise Exception(\"ERROR: Failed to submit the result to Kinesis\")\n return None, summary_report\n else:\n print(\"INFO: ScanId or TeamId passed to main() method is not valid, hence ignoring Kinesis part\")\n\n conn.close()\n return compliance_status, summary_report\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Validate the negative test cases w.r.t. Identity Services in P3 platform...\")\n parser.add_argument(\"-u\", \"--auth_url\", help=\"OpenStack Horizon URL\", action=\"store\", dest=\"url\")\n parser.add_argument(\"-t\", \"--team_name\", help=\"Project/Tenant Name\", action=\"store\", dest=\"team\")\n parser.add_argument(\"-s\", \"--scan_id\", help=\"Scan ID from AWS\", action=\"store\", dest=\"scanid\")\n parser.add_argument(\"-i\", \"--team_id\", help=\"Project/Tenant ID\", action=\"store\", dest=\"teamid\")\n args = parser.parse_args()\n url = args.url\n p_name = args.team\n scan_id = args.scanid\n team_id = args.teamid\n url_valid = p3_lib.p3_url_validation(url)\n if url and p_name is not None:\n if url_valid:\n compliance_status, summary_report = main(url, p_name, scan_id, team_id)\n print(\"INFO: Process completed with:\\nCompliance Status as - %s\\nSummary_report as - %s\"\n % (compliance_status, summary_report))\n else:\n print(\"ERROR: Failed with validation\")\n else:\n print(\"ERROR:Need Tenant ID and Horizon URL to run the script\")\n","sub_path":"audit_scripts/p3_identity_mgmt_tc_1.py","file_name":"p3_identity_mgmt_tc_1.py","file_ext":"py","file_size_in_byte":20071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"169957625","text":"from PySide import QtCore, QtGui\nfrom SafetyPlayUtils import *\nfrom SegmentEditor import OffsetEditor\nimport os\n\n###############################################################################\n# #\n# SAF WINDOW #\n# #\n###############################################################################\n\nclass SafWindow(QtGui.QWidget):\n record_request = QtCore.Signal()\n saf_exit = QtCore.Signal()\n saved = QtCore.Signal(str)\n show_vid = QtCore.Signal()\n show_saf = QtCore.Signal()\n show_srt = QtCore.Signal()\n reset_view = QtCore.Signal()\n \n def __init__(self, parent=None):\n super(SafWindow, self).__init__(parent)\n self.setAcceptDrops(True)\n self.safelist = ['.saf']\n \n self.current_file = \"\"\n self.text_edited = False\n self.level = 0\n\n self.save_timer = SaveCloser()\n\n self.offset_editor = OffsetEditor()\n self.offset_editor.setWindowTitle('Safety Play - SAF Offset Editor')\n\n self.setupUi()\n self.setupActions()\n\n def reset_level(self):\n self.plainTextEdit.verticalScrollBar().setValue(self.level)\n \n def change_time(self, time):\n go = self.check_errors()\n if go:\n self.level = self.plainTextEdit.verticalScrollBar().value()\n lines = self.plainTextEdit.toPlainText().split('\\n')\n newlines = []\n for i in range(len(lines)):\n if not lines[i] == '' and not lines[i].startswith('#'):\n words = lines[i].replace('[', '').split(']')\n\n if words[0] == '00:00:00,000' and time < 0:\n t = words[0]\n\n elif (convert_to_seconds(words[0]) + time >\n convert_to_seconds('23:59:59,999')):\n t = '23:59:59,999'\n \n else:\n t = convert_to_seconds(words[0]) + time\n t = sec_to_hms(t)\n if '-' in t:\n t = '00:00:00,000'\n\n if words[1] == '00:00:00,000' and time < 0:\n t2 = words[1]\n\n elif (convert_to_seconds(words[1]) + time >\n convert_to_seconds('23:59:59,999')):\n t2 = '23:59:59,999'\n \n else:\n t2 = convert_to_seconds(words[1]) + time\n t2 = sec_to_hms(t2)\n if '-' in t2:\n t2 = '00:00:00,000'\n \n\n l = '[' + str(t) + '][' + str(t2) + ']' + words[2]\n newlines.append(l)\n else:\n newlines.append(lines[i])\n string = \"\"\n for i in range(len(newlines)):\n if not i == len(newlines) - 1:\n string = string + newlines[i] + '\\n'\n else:\n string = string + newlines[i]\n self.plainTextEdit.clear()\n self.plainTextEdit.appendPlainText(string)\n self.reset_level()\n\n def text_was_changed(self):\n self.text_edited = True\n self.setWindowTitle('Safety Play - SAF (edited)')\n\n def record(self):\n self.record_request.emit()\n\n def write_record(self, line):\n self.plainTextEdit.appendPlainText(line)\n\n def write_mute(self):\n self.plainTextEdit.insertPlainText('mute')\n\n def write_skip(self):\n self.plainTextEdit.insertPlainText('skip')\n\n def write_black(self):\n self.plainTextEdit.insertPlainText('black')\n\n def check_errors(self):\n self.whiten()\n lines = self.plainTextEdit.toPlainText().split('\\n')\n for i in range(len(lines)):\n result = check_line(lines[i].rstrip())\n if result == False:\n self.highlight(self.get_index(lines, i), i)\n return False\n return True\n\n def save_sequence(self):\n go = self.check_errors()\n if go:\n self.save_to_file()\n \n def save_to_file(self):\n try:\n file = open(self.current_file, 'w')\n text = self.plainTextEdit.toPlainText()\n file.write(text)\n file.close()\n self.show_save_message()\n self.setWindowTitle('Safety Play - SAF')\n self.colorize()\n except FileNotFoundError:\n self.browse_to_saf()\n\n def browse_to_saf(self):\n userhome = os.path.expanduser('~')\n file, _ = QtGui.QFileDialog.getSaveFileName(\n self, 'Save SAF File', userhome, \"*.saf\")\n if not file == '':\n self.current_file = file\n self.save_to_file()\n\n def get_index(self, lines, line_number):\n total = 0\n for i in range(0, line_number):\n total = total + len(lines[i]) + 1\n return total \n\n def show_save_message(self):\n self.save_message.resize(self.width(), self.height())\n self.save_message.show()\n self.save_closer.start()\n self.text_edited = False\n\n def hide_save(self):\n self.save_message.hide()\n self.text_edited = False\n self.saved.emit(self.current_file)\n\n def highlight(self, index, line):\n cursor = self.plainTextEdit.textCursor()\n form = QtGui.QTextCharFormat()\n form.setBackground(QtGui.QBrush(QtGui.QColor(\"pink\")))\n cursor.setPosition(index)\n cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.KeepAnchor, 1)\n cursor.mergeCharFormat(form)\n cursor.movePosition(QtGui.QTextCursor.EndOfLine, QtGui.QTextCursor.MoveAnchor, 1)\n self.plainTextEdit.verticalScrollBar().setValue(line)\n\n def whiten(self):\n lines = self.plainTextEdit.toPlainText().split('\\n')\n index = 0\n cursor = self.plainTextEdit.textCursor()\n form = QtGui.QTextCharFormat()\n form.setBackground(QtGui.QBrush(QtGui.QColor(\"white\")))\n for i in range(len(lines)):\n cursor.setPosition(index)\n cursor.movePosition(QtGui.QTextCursor.EndOfBlock, QtGui.QTextCursor.KeepAnchor, 1)\n cursor.mergeCharFormat(form)\n index = index + len(lines[i]) + 1\n\n def colorize(self):\n lines = self.plainTextEdit.toPlainText().split('\\n')\n index = 0\n cursor = self.plainTextEdit.textCursor()\n form = QtGui.QTextCharFormat()\n for i in range(len(lines)):\n if lines[i].startswith('#'):\n form.setForeground(QtGui.QBrush(QtGui.QColor(\"green\")))\n cursor.setPosition(index)\n cursor.movePosition(QtGui.QTextCursor.EndOfBlock, QtGui.QTextCursor.KeepAnchor, 1)\n cursor.mergeCharFormat(form)\n index = index + len(lines[i]) + 1\n elif lines[i].rstrip() == '':\n index = index + len(lines[i]) + 1\n else:\n form.setForeground(QtGui.QBrush(QtGui.QColor(\"blue\")))\n cursor.setPosition(index)\n for x in range(0, 14):\n cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor, 1)\n cursor.mergeCharFormat(form)\n index = index + 14\n form.setForeground(QtGui.QBrush(QtGui.QColor(\"purple\")))\n cursor.setPosition(index)\n for x in range(0, 14): \n cursor.movePosition(QtGui.QTextCursor.Right, QtGui.QTextCursor.KeepAnchor, 1)\n cursor.mergeCharFormat(form)\n index = index + 14\n form.setForeground(QtGui.QBrush(QtGui.QColor(\"black\")))\n cursor.setPosition(index)\n cursor.movePosition(QtGui.QTextCursor.EndOfBlock, QtGui.QTextCursor.KeepAnchor, 1)\n cursor.mergeCharFormat(form)\n index = index + len(lines[i]) - 27\n\n def insert_fancy_title(self, title):\n if len(title) > 0 and len(title) < 36:\n self.plainTextEdit.insertPlainText(fancy_title(title, 40))\n elif len(title) > 35:\n l = len(title) + 4\n self.plainTextEdit.insertPlainText(fancy_title(title, l))\n\n def shift_text_right(self):\n lines = self.plainTextEdit.toPlainText().split('\\n')\n for i in range(len(lines)):\n if not i == len(lines) - 1:\n lines[i] = '\\t' + lines[i] + '\\n'\n else:\n lines[i] = '\\t' + lines[i]\n self.plainTextEdit.clear()\n text = ''.join(lines)\n self.plainTextEdit.appendPlainText(text)\n self.reset_level()\n\n def shift_text_left(self):\n lines = self.plainTextEdit.toPlainText().split('\\n')\n for i in range(len(lines)):\n if not i == len(lines) - 1:\n if lines[i].startswith('\\t'):\n lines[i] = lines[i][1::] + '\\n'\n else:\n lines[i] = lines[i] + '\\n'\n else:\n if lines[i].startswith('\\t'):\n lines[i] = lines[i][1::]\n self.plainTextEdit.clear()\n text = ''.join(lines)\n self.plainTextEdit.appendPlainText(text)\n self.reset_level()\n\n def add_file(self, file):\n self.current_file = file\n self.level = self.plainTextEdit.verticalScrollBar().value()\n self.plainTextEdit.clear()\n try:\n f = open(file, 'r')\n lines = f.readlines()\n f.close()\n string = \"\"\n for i in range(len(lines)):\n string = string + lines[i]\n self.plainTextEdit.appendPlainText(string)\n self.text_edited = False\n except FileNotFoundError:\n pass\n self.reset_level()\n self.colorize()\n self.check_errors()\n self.setWindowTitle('Safety Play - SAF')\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls:\n try:\n path = str(event.mimeData().urls()[0].toLocalFile())\n ending = path[len(path)-4::]\n if ending in self.safelist:\n event.accept()\n else:\n event.ignore()\n except IndexError:\n pass\n \n def dragMoveEvent(self, event):\n if event.mimeData().hasUrls:\n event.setDropAction(QtCore.Qt.CopyAction)\n event.accept()\n else:\n event.ignore()\n\n def dropEvent(self, event):\n if event.mimeData().hasUrls:\n self.add_file(str(event.mimeData().urls()[0].toLocalFile()))\n else:\n event.ignore()\n\n def moveEvent(self, event):\n self.save_timer.start()\n\n def resizeEvent(self, event):\n self.save_timer.start()\n\n def save_pos_and_size(self):\n pos = [self.x(), self.y()]\n size = [self.width(), self.height()]\n save(saf_pos = pos, saf_size = size)\n\n def closeEvent(self, event):\n self.saf_exit.emit()\n self.offset_editor.close()\n self.save_timer.terminate()\n\n def setupActions(self):\n self.record_button.clicked.connect(self.record)\n self.mute_button.clicked.connect(self.write_mute)\n self.skip_button.clicked.connect(self.write_skip)\n self.black_button.clicked.connect(self.write_black)\n self.save_closer.time_up.connect(self.hide_save)\n self.plainTextEdit.textChanged.connect(self.text_was_changed)\n self.save_timer.time_up.connect(self.save_pos_and_size)\n\n self.offset_editor.ok_signal.connect(self.change_time)\n\n self.shcut_safAs = QtGui.QShortcut(self)\n self.shcut_safAs.setKey('Ctrl+Shift+S')\n self.connect(self.shcut_safAs,\n QtCore.SIGNAL(\"activated()\"),\n self.browse_to_saf)\n\n def setupUi(self):\n self.setWindowTitle('Safety Play - SAF')\n self.setWindowIcon(SafetyPlayIcon())\n self.settings = load()\n self.resize(self.settings['saf_size'][0],\n self.settings['saf_size'][1])\n self.move(self.settings['saf_pos'][0],\n self.settings['saf_pos'][1])\n \n self.vl = QtGui.QVBoxLayout(self)\n self.plainTextEdit = QtGui.QPlainTextEdit()\n self.plainTextEdit.setStyleSheet(\"font: 10pt 'Consolas'\")\n self.plainTextEdit.setLineWrapMode(QtGui.QPlainTextEdit.NoWrap)\n self.vl.addWidget(self.plainTextEdit)\n\n self.hl = QtGui.QHBoxLayout()\n\n self.record_button = QtGui.QPushButton()\n self.record_button.setText('Record Time')\n self.hl.addWidget(self.record_button)\n\n self.mute_button = QtGui.QPushButton()\n self.mute_button.setText('Mute')\n self.hl.addWidget(self.mute_button)\n\n self.skip_button = QtGui.QPushButton()\n self.skip_button.setText('Skip')\n self.hl.addWidget(self.skip_button)\n\n self.black_button = QtGui.QPushButton()\n self.black_button.setText('Black')\n self.hl.addWidget(self.black_button)\n\n self.vl.addLayout(self.hl)\n\n self.save_closer = SaveCloser()\n \n self.save_message = Saved(self)\n self.save_message.hide()\n\nif __name__ == '__main__':\n import sys\n app = QtGui.QApplication(sys.argv)\n app.setApplicationName(\"SAF Window\")\n window = SafWindow()\n window.show()\n sys.exit(app.exec_())\n","sub_path":"SafWindow.py","file_name":"SafWindow.py","file_ext":"py","file_size_in_byte":13642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"22703246","text":"import csv\n\nto_be_removed_chars = ['\"', \"(\", \")\", \"]\", \"[\", \",\", \";\", \"\\n\", \".\", \"?\", \"1\"]\nto_be_removed_letters = [\"and\", \"or\", \"add\", \"of\", \"in\", \"analysis\", \"with\", \"to\", \"a\", \"from\", \"are\", \"is\", \"as\",\n \"that\", \"i\", \"at\", \"the\"]\n\nfname = \"WellSolved.csv\"\nfin = open(\"../%s\" % fname, encoding=\"ISO-8859-1\")\n\nunique_words = {}\nfor a_line in fin:\n a_line = a_line.lower()\n for a_char in to_be_removed_chars:\n a_line = a_line.replace(\"%s\" % a_char, \"\")\n for a_word in to_be_removed_letters:\n a_line = a_line.replace(\" %s \" % a_word, \" \")\n # a_line = a_line.replace(\"%s \" % a_word, \" \")\n # a_line = a_line.replace(\" %s\" % a_word, \" \")\n if not a_line:\n continue\n content = a_line.split()\n for _ in content:\n if _ not in unique_words:\n unique_words[_] = 0\n unique_words[_] += 1\n\nfout = open(\"../%s\" % (fname.replace(\".csv\", \"_unique.csv\")), \"w\")\nwriter = csv.writer(fout)\nwriter.writerow([\"Word\", \"Count\"])\nfor _ in unique_words:\n writer.writerow([_, unique_words[_]])\n # fout.write(\"%s\\n\" % _)\nfout.close()\n","sub_path":"TagListsAndScripts/parse_sentences_WellSolved.py","file_name":"parse_sentences_WellSolved.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"628990703","text":"#!/usr/bin/env python\n#\n# FileName: udscan.py\n# Description: It provides UDS S/W Reprogramming Services\n#\n# History :\n# 13 2015-02-02 LeeMinHyuck, Chaged RSA Loagic\n# 12 2014-08-12 Jin-Su Jang, Changed RSA Logic\n# 11 2014-05-27 Jin-Su Jang, Added RSA\n# 10 2012-06-05 Jin-Su Jang, Support Extended ID\n# 9 2011-12-06 Jin-Su Jang, Add memory read function\n# _ 2011-10-22 Jin-Su Jang, Support CALIB\n# _ 2011-08-26 Jin-Su Jang, Support XPC56\n# _ 2011-08-22 Jin-Su Jang, Optimized time interval\n# _ 2011-08-12 Jin-Su Jang, Support XC2K\n# piggy value must be [0,0,0,0] in case of XC2K\n# _ 2011-04-28 Jin-Su Jang, created\n##########################################################################\nimport wx\nimport os\n\nfrom ctypes import *\n\nfrom Uds import monitorjob\nfrom Uds import protocol\nfrom Uds import vcan\nfrom Uds import rsa_escrypt\nfrom Hex import rtswconv\nimport user_cfg\n\nimport wx.grid as gridlib\n\n\n# diagnostic service constant\nSID_DIAG_REQ = 0x10\nSID_DIAG_RSP = 0x50\nDIAG_DEF_SES = 0x1 #ES95486\nDIAG_PROG_SES = 0x2 #ES95486\nDIAG_EXT_SES = 0x3 #ES95486\nDIAG_ES_STD_SES = 0x81 #ES95485/6\nDIAG_ES_PROG_SES = 0x85 #ES95485/6\nDIAG_ES_EXT_SES = 0x90 #ES95485/6\n\nSID_READ_DTC_REQ = 0x19\nSID_READ_DTC_RSP = 0x59\n\nSID_READ_DATA_REQ = 0x22\nSID_READ_DATA_RSP = 0x62\n\nSID_WRITE_DATA_REQ = 0x2E\nSID_WRITE_DATA_RSP = 0x6E\n\n\n\nSID_CTRLDTC_REQ = 0x85\nSID_CTRLDTC_RSP = 0xC5\nCTRLDTC_ON = 0x1\nCTRLDTC_OFF = 0x2\n\nSID_COM_REQ = 0x28\nSID_COM_RSP = 0x68\n\n\n\nCOMCTRL_ENABLE_RX_TX = 0x0 #ES95486\nCOMCTRL_ENABLE_RX_DISABLE_TX = 0x1 #ES95486\nCOMCTRL_DISABLE_RX_ENABLE_TX = 0x2 #ES95486\nCOMCTRL_DISABLE_RX_TX = 0x3 #ES95486\nCOMCTRL_ES_RSP_REQ = 0x1 #ES95485/6\nCOMCTRL_ES_NORSP_REQ = 0x2 #ES95485/6\nCOMTYPE_NORMAL_MSG = 0x1;\nCOMTYPE_NM_MSG = 0x2\n\nSID_SEC_REQ = 0x27\nSID_SEC_RSP = 0x67\nSEC_SEED_REQ = 0x1\nSEC_KEY_REQ = 0x2\nSEC_CERTI_REQ = 0x61 # RSA\nSEC_CERTI_SEED_REQ = 0x62 # RSA\nSEC_CERTI_KEY_REQ = 0x63 # RSA\n\nSID_TP_REQ = 0x3E\nTP_NORSP = 0x80\n\nSID_RTNCTRL_REQ = 0x31\nSID_RTNCTRL_RSP = 0x71\nRTNCTRL_START = 0x1\nRTNCTRL_ERASE = 0xFF00 #ES95486\nRTNCTRL_CHECK = 0xFF01 #ES94486\nRTNCTRL_ES_C_ERASE = 0x0 #ES95485\nRTNCTRL_ES_D_ERASE = 0x1 #ES95485\nRTNCTRL_ES_CHECK = 0x2 #ES95485\nRTNCTRL_BOOT_ID = 0x0\nRTNCTRL_APP_ID = 0x1\nRTNCTRL_CALIB_ID = 0x2\nRTNCTRL_MEMRD_ID = 0xFF\n\nSID_REQDNLD_REQ = 0x34\nSID_REQDNLD_RSP = 0x74\nADDR_LEN_FMT_ID = 0x33 # address(3-byte),size(3-byte)\n\nSID_TXDATA_REQ = 0x36\nSID_TXDATA_RSP = 0x76\n\nSID_REQTXEXIT_REQ = 0x37\nSID_REQTXEXIT_RSP = 0x77\n\nSID_ECURESET_REQ = 0x11\nSID_ECURESET_RSP = 0x51\nECURESET_HARDRESET = 0x1\n\n\nSID_EN_NORM_TX_REQ = 0x29\nEN_NORM_TX_RSP = 0x1\nEN_NORM_TX_NORSP = 0x2\n\nSID_TESTER_PRESENT_REQ = 0x3E\nSID_TESTER_PRESENT_RSP = 0x7E\nTESTER_PRESENT_NORSP = 0x80\n\nSID_READ_MEMBYADDR_REQ = 0x23\nSID_READ_MEMBYADDR_RSP = 0x63\n \nSID_NEG_RSP = 0x7F\nNEG_NRC_78 = 0x78\nNEG_GPROG_ERR = 0x72\n\n# RSA ReadCertification and SecurityAccess\nRSA_DIAGDATA = 'rsa_diag_data.cfg' # Diagnostic Terminal(Device) data for ReadCertification\nRSA_CERTKEYDATA = 'rsa_certkey_data.cfg' # RSA Key data for ReadCertification \nRSA_SAKEYDATA = 'rsa_sakey_data.cfg' # RSA Key data for SecurityAccess\nRSA_SFKEYDATA = 'rsa_sfkey_data.cfg' # RSA Key data for SecureFlashing\nUdsServices=['DiagSessionCtrlExtend',\n 'ControlDTCSetting',\n 'CommunicationCtrl',\n 'DiagSessionCtrlProg',\n 'ReadCertification', # RSA Certification\n 'SecurityAccessSC', # RSA Security Access\n 'SecurityAccess',\n 'RoutineCtrl_Erase',\n 'RequestDownload',\n 'RoutineCtrl_Check',\n 'ECUReset',\n 'EnableNormTx'\n ]\n\nDiagServices = ['SessionControlReq(10hex)',\n 'ECUReset(11hex)',\n 'SecurityAccess(27hex)',\n 'CommunicationControl(28hex)',\n 'EnableNormalMsgTx(29hex)',\n 'ReadDTCInformation(19hex)',\n 'ReadDataByIdentifier(22hex)',\n 'WriteDataByIdentifier(2Ehex)',\n 'ControlDTCSetting(85hex)',\n 'StopDiagnosticSession(20hex)',\n 'ClearDiagnosticInformation(14hex)',\n ]\n\n\n\nclass UdsPanel:\n\n #survice id\n\n DIAG = 0x1\n DIAG_E = 0x2\n DIAG_E_RWAIT = 0x3\n DIAG_P = 0x4\n DIAG_P_RWAIT = 0x5\n DIAG_D = 0x6\n DIAG_D_RWAIT = 0x7\n \n ReadDTC = 0x8\n ReadDTC_RWAIT = 0x9\n \n ReadData = 0xa\n ReadData_RWAIT = 0xb\n \n WriteData = 0xc\n WriteData_RWAIT = 0xd\n \n ComCtrl = 0xe\n ComCtrl_RWAIT = 0xf\n \n \n \n def __init__(self, frame, parent):\n self.parent = parent\n self.frame = frame\n\n self.todo = self.DIAG\n\n self.fnctxidval = 0\n self.phytxidval = 0\n self.phyrxidval = 0\n self.phytxidvalcert = 0 # certification target tx id\n self.phyrxidvalcert = 0 # certification target rx id\n self.speedval = 0\n\n self.trigger=None\n self.protocol=None\n self.usercfg = None\n \n # add by mh 2016-05-30\n self.session_list = ['defaultSession(01h)',\n 'programmingSession(02h)',\n 'extendedDiagnosticSession(03h)', \n 'safetySystemDiagnosticSession(04h)',\n 'programmingSession(05h)',\n 'extendedDiagnosticSession(10h)',\n 'StandardDiagnosticMode(81h)',\n 'EcuProgrammingMode(85h)',\n 'ExtendedDiagnosticMode(90h)',\n 'QuiescentCurrDiagStart(D0)',\n 'SleepModeStartRequest(D1)'\n ]\n \n self.dic_sessioninfo = {'defaultSession(01h)' : '0x01',\n 'programmingSession(02h)':'0x02',\n 'extendedDiagnosticSession(03h)':'0x03', \n 'safetySystemDiagnosticSession(04h)':'0x04',\n 'programmingSession(05h)':'0x05',\n 'extendedDiagnosticSession(10h)':'0x10',\n 'StandardDiagnosticMode(81h)':'0x081',\n 'EcuProgrammingMode(85h)':'0x85',\n 'ExtendedDiagnosticMode(90h)':'0x90',\n 'QuiescentCurrDiagStart(D0)':'0xD0',\n 'SleepModeStartRequest(D1)':'0xD1'\n \n } \n \n self.LayoutObject(frame, parent)\n\n self.traceFrame = False\n self.filter = False\n\n #-----------------------------\n # initialize other variables\n #-----------------------------\n self.groupstart = None\n self.groupsize = None\n self.addrlist= None\n self.eolimage= None\n\n # CAN Type: standard,extended\n self.stdcan = True \n\n self.startok = False\n \n self.ckbox_periodictx = False\n \n self.img_sign = []\n self.filteredcanid = []\n \n self.rxdispatch = { self.DIAG_E_RWAIT : {'rx':self.RcvDiagExt},\n self.DIAG_P_RWAIT : {'rx':self.RcvDiagProg},\n self.DIAG_D_RWAIT : {'rx':self.RcvDiagDef},\n self.ReadDTC_RWAIT : {'rx':self.RcvReadDTC},\n self.ReadData_RWAIT : {'rx':self.RcvReadData}, \n self.WriteData_RWAIT : {'rx':self.RcvWriteData},\n self.ComCtrl_RWAIT : {'rx':self.RcvComCtrl}, \n }\n \n \n \n \n \n \n self.txtimer = wx.Timer(frame)\n self.txtimer_started = False\n frame.Bind(wx.EVT_TIMER, self.Ontxperiod, self.txtimer)\n \n \n \n self.updatecfgfile()\n\n def updatecfgfile(self):\n \n cfg_list = []\n \n f = open(\"default.cfg\")\n line = f.readline()\n f.close()\n cfg_list = line.split(', ')\n \n self.spec.SetValue(cfg_list[0])\n self.canchannel.SetValue(cfg_list[1])\n self.speed.SetValue(cfg_list[2])\n self.fnctxid.SetValue(cfg_list[3])\n self.phytxid.SetValue(cfg_list[4])\n self.phyrxid.SetValue(cfg_list[5])\n \n \n def Close(self):\n\n if self.trigger:\n self.trigger.close()\n\n self.Print('\\nUds close')\n \n\n def Print(self, data):\n \n self.frame._trace['DIAG'].AppendText(data)\n\n\n #--------------------------------------------------------\n # Diagnostic services Handlers\n #--------------------------------------------------------\n \n \n \"\"\"\n def OnDiagSessionCtrlExtend(self, event):\n #Functional request\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n # now start diagnostic services\n if self.protocol:\n self.protocol.SendDiagExt(self.fnctxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n\n # clear status\n for service in UdsServices: \n self.exec_result[service].SetValue( '-' )\n\n \n \n def OnControlDTCSetting(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendControlDTC(self.fnctxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnCommunicationCtrl(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendCommCtrl(self.fnctxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnDiagSessionCtrlProg(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n self.protocol.SendDiagProg(self.phytxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnReadCertification(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n if self.phytxidvalcert:\n # change physical connection to GW\n self.trigger.ChangePhyConnection(self.phytxidvalcert, self.phyrxidvalcert)\n self.protocol.SendReadCertification(self.phytxidvalcert, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnSecurityAccessSC(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n if self.phytxidvalcert:\n # change physical connection to GW\n self.trigger.ChangePhyConnection(self.phytxidvalcert, self.phyrxidvalcert)\n self.protocol.SendSeedReq(self.phytxidvalcert, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnSecurityAccess(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendSeedReq(self.phytxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnRoutineCtrl_Erase(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendRoutineCtrl_Erase(self.phytxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n \n\n def OnRequestDownload(self,evt):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n self.protocol.SendRequestDownload( self.phytxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnRoutineCtrl_Check(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.imgtypeval != 'DUMP':\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendRoutineCtrl_Check(self.phytxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n else:\n self.Print('\\nNot allowed command in DUMP mode')\n\n\n def OnECUReset(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendECUReset(self.phytxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n\n def OnEnableNormTx(self, event):\n\n if self.startok == False:\n self.Print('\\nCommunication Start is not OK, Try Start Button!!!')\n return\n\n if self.protocol:\n # change physical connection to target ECU\n self.trigger.ChangePhyConnection(self.phytxidval, self.phyrxidval)\n self.protocol.SendEnableNormTx(self.fnctxidval, onReq=True)\n else:\n self.Print('\\nCAN TP is not available')\n\n \"\"\"\n\n #--------------------------------------------------------\n # CAN Data process\n #-------------------------------------------------------- \n \n def CanRxEventCallback(self, msg, cmd, canid, rxtime=0):\n\n wx.CallAfter( self.CanRxService, msg, cmd, canid, rxtime)\n\n\n def CanRxService( self, msg, command=True, canid = None, rxtime=0 ):\n #self.filteredcanid = []\n \n if command and self.protocol and ((self.phyrxidval == canid) or (canid and (self.phyrxidvalcert == canid))):\n self.ScheduleService(msg)\n \n if not command and self.traceFrame:\n \n msg_len = len(msg)\n \n \n if self.filter and self.filteredcanid !=[]:\n if canid in self.filteredcanid : \n if msg_len == 0:\n self.Print('\\nt=%d ms ID=%X'% \\\n (rxtime, canid))\n elif msg_len == 1:\n self.Print('\\nt=%d ms ID=%X Data=%02X '% \\\n (rxtime, canid, msg[0]))\n elif msg_len == 2:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X '% \\\n (rxtime, canid, msg[0],msg[1]))\n \n elif msg_len == 3:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2]))\n \n elif msg_len == 4:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3]))\n \n elif msg_len == 5:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4]))\n \n elif msg_len == 6:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5]))\n \n elif msg_len == 7:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6]))\n elif msg_len == 8:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X %02X %02X'% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6],msg[7]))\n else:\n \n if msg_len == 0:\n self.Print('\\nt=%d ms ID=%X'% \\\n (rxtime, canid))\n elif msg_len == 1:\n self.Print('\\nt=%d ms ID=%X Data=%02X '% \\\n (rxtime, canid, msg[0]))\n elif msg_len == 2:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X '% \\\n (rxtime, canid, msg[0],msg[1]))\n \n elif msg_len == 3:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2]))\n \n elif msg_len == 4:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3]))\n \n elif msg_len == 5:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4]))\n \n elif msg_len == 6:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5]))\n \n elif msg_len == 7:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X %02X '% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6]))\n elif msg_len == 8:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X %02X %02X'% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6],msg[7]))\n\n \"\"\"\n if command and self.protocol and ((self.phyrxidval == canid) or (canid and (self.phyrxidvalcert == canid))):\n self.protocol.ScheduleService(msg)\n \"\"\"\n #if not command and (canid in [self.phyrxidval,self.phytxidval,self.fnctxidval]):, 2012.02.17\n #->if not command and ((canid & 0x700) == 0x700):\n \n \"\"\"\n if not command and self.traceFrame:\n if self.stdcan:\n if (canid & 0x700) == 0x700:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X %02X %02X'% \\\n (rxtime, canid, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6],msg[7]))\n else:\n if canid in [self.fnctxidval, self.phytxidval, self.phyrxidval]:\n self.Print('\\nt=%d ms ID=%X Data=%02X %02X %02X %02X %02X %02X %02X %02X'% \\\n (rxtime, canid & 0x7FFFFFFF, msg[0],msg[1],msg[2],msg[3],msg[4],msg[5],msg[6],msg[7]))\n \"\"\"\n\n \n\n #--------------------------------------------------------\n # GUI Layout configuration\n #-------------------------------------------------------- \n def LayoutObject(self, frame, parent):\n \"\"\"Top level layout routine\"\"\"\n \n infosizer = self.MakeInformationBoxSizer(frame, parent)\n cmdsizer = self.MakeCommandBoxSizer(frame, parent)\n datasizer = self.MakeDataBoxSizer(frame, parent)\n txsixer = self.MaxkTxBoxSizer(frame, parent)\n\n sizer = wx.GridBagSizer(hgap=5, vgap=5) \n sizer.Add(infosizer, pos=(0,0), flag=wx.ALIGN_CENTER)\n sizer.Add(cmdsizer, pos=(1,0), flag=wx.ALIGN_CENTER|wx.EXPAND)\n sizer.Add(datasizer, pos=(2,0), flag=wx.ALIGN_CENTER|wx.EXPAND)\n sizer.Add(txsixer, pos=(3,0), flag=wx.ALIGN_CENTER|wx.EXPAND)\n \n \n parent.SetSizer(sizer)\n sizer.Fit(parent) \n\n # Activate/Deactivate CRC location\n #self.OnSecuritySelection(None)\n\n def MakeDataBoxSizer (self, frame, parent):\n \n text_size = (100,-1)\n \n gbsizer = wx.GridBagSizer(hgap=10, vgap=2)\n Cfg = wx.StaticBox(parent, -1, '3.Transmit Data')\n sizer_cfg = wx.StaticBoxSizer(Cfg, wx.VERTICAL)\n \n canid_label = wx.StaticText(parent, -1, 'CANID: ')\n self.canTxID = wx.TextCtrl(parent, -1, '', size=text_size)\n FrameType_label = wx.StaticText(parent, -1, 'Message Type: ')\n self.frametype = wx.ComboBox(parent,-1, value = 'Normal_Message',\n size = text_size, choices =['Normal_Message', 'TP_Message'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n parent.Bind(wx.EVT_COMBOBOX, self.Onframetype, self.frametype)\n \n \n periodicCheckBox = wx.CheckBox(parent, -1, 'Periodic(ms)', size=(100,25))\n periodicCheckBox.SetToolTip( wx.ToolTip( 'Unchecked:transmit once, Checked: Transmit periodically' ) ) \n frame.Bind(wx.EVT_CHECKBOX, self.OnPeriodic, periodicCheckBox)\n \n self.TxPeriod = wx.TextCtrl(parent, -1, '', size=text_size)\n self.TxPeriod.SetBackgroundColour('gray')\n \n self.myGrid1 = gridlib.Grid(parent)\n self.myGrid1.CreateGrid(5, 8)\n \n for col in range(8):\n colnum = col +0\n self.myGrid1.SetColLabelValue(col, \"%s byte\"%colnum)\n \n self.SetGridValue(self.myGrid1,0,'0x00')\n self.SetGridActive(self.myGrid1,0, True)\n \n \n self.SetGridActive(self.myGrid1,1, False)\n self.SetGridValue(self.myGrid1,1,'-')\n self.SetGridActive(self.myGrid1,2, False)\n self.SetGridValue(self.myGrid1,2,'-')\n self.SetGridActive(self.myGrid1,3, False)\n self.SetGridValue(self.myGrid1,3,'-')\n self.SetGridActive(self.myGrid1,4, False)\n self.SetGridValue(self.myGrid1,4,'-')\n \n \n \n gbsizer.Add(canid_label, pos=(0,0), span=(1,1))\n gbsizer.Add(self.canTxID, pos=(0,1))\n gbsizer.Add(FrameType_label, pos=(0,2))\n gbsizer.Add(self.frametype, pos=(0,3))\n \n gbsizer.Add(periodicCheckBox, pos=(1,0), span=(1,1))\n gbsizer.Add(self.TxPeriod, pos=(1,1))\n \n \n gbsizer.Add(self.myGrid1, pos=(2,0), span=(1,4))\n \n \n sizer_cfg.Add(gbsizer, 0, wx.ALIGN_CENTER, 2)\n \n \n return sizer_cfg\n \n def OnPeriodic (self, event):\n ckbox = event.GetEventObject()\n self.ckbox_periodictx = ckbox.GetValue()\n \n \n \n if self.ckbox_periodictx ==True:\n self.TxPeriod.SetBackgroundColour('white')\n self.TxPeriod.SetForegroundColour('black')\n self.TxPeriod.SetValue('200')\n self.TxPeriod.Refresh()\n else:\n self.TxPeriod.SetBackgroundColour('gray')\n self.TxPeriod.SetForegroundColour('red')\n self.TxPeriod.SetValue('-')\n self.TxPeriod.Refresh() \n self.txperiodic_Stop()\n \n \n def SetGridActive (self, grid, low, active):\n if active == False:\n for col in range(8):\n grid.SetCellTextColour(low, col, wx.LIGHT_GREY)\n #grid.SetCellValue(low, col, '-')\n grid.SetReadOnly(low, col, True)\n else:\n for col in range(8):\n \n grid.SetCellTextColour(low, col, wx.BLACK)\n #grid.SetCellValue(low, col, '0x00')\n grid.SetReadOnly(low, col, False)\n \n def SetGridValue (self, grid, low, value):\n for col in range(8):\n grid.SetCellValue(low, col, value)\n \n def Onframetype (self, event):\n frametype = self.frametype.GetValue()\n \n if frametype == 'Normal_Message':\n self.SetGridActive(self.myGrid1,0, True)\n self.SetGridValue(self.myGrid1,0,'0x00')\n \n self.SetGridActive(self.myGrid1,1, False)\n self.SetGridValue(self.myGrid1,1,'-')\n self.SetGridActive(self.myGrid1,2, False)\n self.SetGridValue(self.myGrid1,2,'-')\n self.SetGridActive(self.myGrid1,3, False)\n self.SetGridValue(self.myGrid1,3,'-')\n self.SetGridActive(self.myGrid1,4, False)\n self.SetGridValue(self.myGrid1,4,'-')\n \n elif frametype == 'TP_Message':\n self.SetGridActive(self.myGrid1,0, True)\n self.SetGridValue(self.myGrid1,0,'0x00')\n \n self.SetGridActive(self.myGrid1,1, True)\n self.SetGridValue(self.myGrid1,1,'-')\n self.SetGridActive(self.myGrid1,2, True)\n self.SetGridValue(self.myGrid1,2,'-')\n self.SetGridActive(self.myGrid1,3, True)\n self.SetGridValue(self.myGrid1,3,'-')\n self.SetGridActive(self.myGrid1,4, True)\n self.SetGridValue(self.myGrid1,4,'-')\n \n \n \n \n #print frametype\n\n def MaxkTxBoxSizer (self, frame, parent):\n btn_size=(150,23)\n btn_size2=(310,23)\n \n btn = wx.Button(parent, -1, 'Transmit',size=btn_size2)\n btn_tpstart = wx.Button(parent, -1, 'TestPresentStart',size=btn_size)\n btn_tpstop = wx.Button(parent, -1, 'TestPresentStop',size=btn_size)\n \n btn.SetDefault()\n frame.Bind(wx.EVT_BUTTON, self.OnTx, btn)\n frame.Bind(wx.EVT_BUTTON, self.OnTpStart, btn_tpstart)\n frame.Bind(wx.EVT_BUTTON, self.OnTpStop, btn_tpstop)\n \n gbsizer = wx.GridBagSizer(hgap=10, vgap=2)\n Cfg = wx.StaticBox(parent, -1, '4.Execute (Push Transmit Button)')\n sizer_cfg = wx.StaticBoxSizer(Cfg, wx.VERTICAL)\n \n gbsizer.Add(btn_tpstart, pos=(0,0))\n gbsizer.Add(btn_tpstop, pos=(0,1))\n gbsizer.Add(btn, pos=(1,0), span=(1,3))\n \n sizer_cfg.Add(gbsizer, 0, wx.ALIGN_CENTER, 2)\n \n return sizer_cfg\n \n def OnTpStart (self, event):\n if self.protocol:\n self.protocol.TestPresentStart()\n self.Print('\\nTestPresent MSG Transfer is started(2ms cycle)')\n else:\n self.Print('\\nCAN TP is not available')\n \n \n def OnTpStop (self, event):\n if self.protocol:\n self.protocol.TestPresentStop()\n #self.txperiodic_Stop()\n self.Print('\\nTestPresent MSG Transfer is stopped')\n else:\n self.Print('\\nCAN TP is not available')\n \n \n def Ontxperiod (self, event):\n \n \n \n if (self.txtimer_started)and(self.protocol):\n self.txtimer.Start(self.period)\n \n int_canid = self.period_canid\n msg = self.period_msg\n \n self.protocol.sendNormalTx(int_canid,msg)\n \n \n def OnTx (self, event):\n \n msg = []\n msgtype = self.frametype.GetValue()\n canid = self.canTxID.GetValue()\n int_canid = int(canid, 16)\n \n \n \n if msgtype == 'Normal_Message':\n for col in range(8):\n databyte = self.myGrid1.GetCellValue(0,col)\n int_databyte = int(databyte, 16)\n \n msg.append(int_databyte)\n \n elif msgtype == 'TP_Message':\n for low in range(5):\n for col in range(8):\n databyte = self.myGrid1.GetCellValue(low,col)\n if databyte =='-':\n continue\n else:\n \n int_databyte = int(databyte, 16)\n msg.append(int_databyte)\n \n \n \n \n if canid == '':\n self.Print('\\nCAN ID does not exist')\n return\n \n \n if self.protocol:\n if msgtype == 'Normal_Message':\n if self.ckbox_periodictx:\n self.period = int(self.TxPeriod.GetValue())\n \n self.txperiodic_Start()\n \n self.period_canid = int_canid\n self.period_msg = msg\n self.protocol.sendNormalTx(int_canid,msg)\n else:\n self.protocol.sendNormalTx(int_canid,msg)\n elif msgtype == 'TP_Message':\n sid = msg[0]\n if sid == SID_DIAG_REQ:\n subfunc = msg[1]\n if subfunc == DIAG_EXT_SES:\n self.todo = self.DIAG_E_RWAIT\n \n elif subfunc == DIAG_PROG_SES:\n self.todo = self.DIAG_P_RWAIT\n elif subfunc == DIAG_DEF_SES:\n self.todo = self.DIAG_D_RWAIT\n self.Print('\\nUDS: Session Control Request' )\n elif sid == SID_READ_DTC_REQ:\n self.todo = self.ReadDTC_RWAIT\n self.Print('\\nUDS: Read DTC Information Request' )\n \n elif sid == SID_READ_DATA_REQ:\n self.todo = self.ReadData_RWAIT\n self.Print('\\nUDS: Read Data By Id Request' )\n \n elif sid == SID_WRITE_DATA_REQ:\n self.todo = self.WriteData_RWAIT\n self.Print('\\nUDS: Write Data By Id Request' )\n \n if sid == SID_COM_REQ:\n self.todo = self.ComCtrl_RWAIT\n self.Print('\\nUDS: Communication Control Request' )\n \n \n self.protocol.sendTPTx(int_canid,msg)\n else:\n self.Print('\\nCAN TP is not available')\n \n \n def txperiodic_Start (self):\n if self.txtimer_started == False:\n self.txtimer.Start(self.period)\n self.txtimer_started = True\n \n def txperiodic_Stop (self):\n if self.txtimer_started == True:\n self.txtimer.Stop()\n self.txtimer_started = False\n \n \n def MakeInformationBoxSizer(self, frame, parent):\n \"\"\" Configuration object layout \"\"\"\n\n text_size = (85,-1)\n \n \n \n fnctxidlabel = wx.StaticText(parent, -1, 'FncTxID: ')\n self.fnctxid = wx.TextCtrl(parent, -1, '', size=text_size) \n self.fnctxid.SetToolTip( wx.ToolTip( 'Functional Transmit CAN ID from CRT ex)Gst' ) )\n \n phytxidlabel = wx.StaticText(parent, -1, 'PhyTxID: ')\n self.phytxid = wx.TextCtrl(parent, -1, '', size=text_size) \n self.phytxid.SetToolTip( wx.ToolTip( 'Physical Transmit CAN ID from CRT ex)GstBCM ID 0x7A1' ) )\n \n phyrxidlabel = wx.StaticText(parent, -1, 'PhyRxID: ') \n self.phyrxid = wx.TextCtrl(parent, -1, '', size=text_size) \n self.phyrxid.SetToolTip( wx.ToolTip( 'Physical Reception CAN ID at CRT ex)BCMGst ID 0x701' ) ) \n\n \n speedlabel = wx.StaticText(parent, -1, 'SPEED: ')\n self.speed = wx.ComboBox(parent,-1, value = '',\n size = text_size, choices = ['500','100','250'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n self.speed.SetToolTip( wx.ToolTip( 'Select the CAN communication speed' ) ) \n\n speclabel = wx.StaticText(parent, -1, 'Specification: ')\n self.spec = wx.ComboBox(parent,-1, value = '',\n size = text_size, choices = ['ES95486','ES95485'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n self.spec.SetToolTip( wx.ToolTip( 'Select the UDS specification, although the requirement specification is ES95486,' +\\\n '\\nYou are recommanded to use ES95485 for the backward compatibility in CAN network' ) ) \n frame.Bind(wx.EVT_COMBOBOX, self.OnChangeSpec, self.spec)\n\n \n canchannel = wx.StaticText(parent, -1, 'CAN CH: ')\n self.canchannel = wx.ComboBox(parent,-1, value = '',\n size = text_size, choices = ['0','1'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n self.canchannel.SetToolTip( wx.ToolTip( '0: CANcaseXL channel 1, 1:CANcaseXL channel 2' ) ) \n\n\n\n\n UdsBtn = {}\n for label, handler, tip in [('VectorHwConfig',self.OnVectorHardwareConfig, \n 'Select the CAN channel in CANoe Hardware Configurator.\\n Refer to the sum_crt_too.doc'),\n ('Start',self.OnStart,'Start CAN communication'),\n ('Stop',self.OnStop,'Stop CAN communication')]:\n if label == 'VectorHwConfig':\n btn = wx.Button(parent, -1, label, size=(96,23))\n else:\n btn = wx.Button(parent, -1, label, size=(85,23))\n frame.Bind(wx.EVT_BUTTON, handler, btn)\n btn.SetDefault()\n UdsBtn[ label ] = btn\n btn.SetToolTip( wx.ToolTip( tip ) ) \n\n traceCheckBox = wx.CheckBox(parent, -1, 'Trace', size=(60,25))\n traceCheckBox.SetToolTip( wx.ToolTip( 'Unchecked:No CAN message trace, Checked: CAN message trace' ) ) \n frame.Bind(wx.EVT_CHECKBOX, self.OnTrace, traceCheckBox)\n \n \n filterCheckBox = wx.CheckBox(parent, -1, 'Filter', size=(60,25))\n filterCheckBox.SetToolTip( wx.ToolTip( 'Unchecked:All CAN messages trace , Checked: Filtered CAN message trace' ) )\n frame.Bind(wx.EVT_CHECKBOX, self.OnFilter, filterCheckBox) \n #filterIdlabel = wx.StaticText(parent, -1, 'Filtered ID: ')\n self.filtedID = wx.TextCtrl(parent, -1, '', style=wx.TE_RIGHT)\n self.filtedID.SetToolTip(wx.ToolTip('Enter can ID you wand to monitor as shown in the example ex) 0x104, 0x7df, 0x107'))\n\n \n filteredCANID = wx.StaticText(parent, -1, 'CAN ID(Filter): ')\n \n gbsizer = wx.GridBagSizer(hgap=10, vgap=2)\n Cfg = wx.StaticBox(parent, -1, '1.CONFIGURE')\n sizer_cfg = wx.StaticBoxSizer(Cfg, wx.VERTICAL)\n\n y_pos = 0\n gbsizer.Add(UdsBtn['VectorHwConfig'], pos=(y_pos,0))\n \n \n y_pos +=1\n gbsizer.Add(speclabel, pos=(y_pos,0))\n gbsizer.Add(self.spec, pos=(y_pos,1))\n gbsizer.Add(canchannel, pos=(y_pos,2))\n gbsizer.Add(self.canchannel, pos=(y_pos,3))\n gbsizer.Add(speedlabel, pos=(y_pos,4))\n gbsizer.Add(self.speed, pos=(y_pos,5))\n\n y_pos +=1\n gbsizer.Add(fnctxidlabel, pos=(y_pos,0))\n gbsizer.Add(self.fnctxid, pos=(y_pos,1))\n gbsizer.Add(phytxidlabel, pos=(y_pos,2))\n gbsizer.Add(self.phytxid, pos=(y_pos,3))\n gbsizer.Add(phyrxidlabel, pos=(y_pos,4))\n gbsizer.Add(self.phyrxid, pos=(y_pos,5))\n\n \n \n \n\n \n y_pos +=1\n gbsizer.Add(UdsBtn['Start'], pos=(y_pos,0)) \n gbsizer.Add(UdsBtn['Stop'], pos=(y_pos,1))\n \n gbsizer.Add(traceCheckBox, pos=(y_pos,4))\n gbsizer.Add(filterCheckBox, pos=(y_pos,5))\n y_pos +=1\n gbsizer.Add(filteredCANID, pos=(y_pos,0))\n #gbsizer.Add(filterIdlabel, pos=(y_pos,1))\n gbsizer.Add(self.filtedID, pos=(y_pos,1), span=(1,5), flag=wx.EXPAND)\n \n sizer_cfg.Add(gbsizer, 0, wx.ALL|wx.EXPAND, 2)\n \n return sizer_cfg \n\n\n def LoadUserConfig(self, config_file):\n \"\"\" Load User Configuration Parameter \"\"\"\n\n # read user's configuration parameters\n self.usercfg = user_cfg.UdsUserConfig( config_file )\n\n userparam = self.usercfg.getParams()\n\n self.cantype.SetValue(userparam['cantype'])\n self.fnctxid.SetValue(userparam['fnctxid'])\n self.phytxid.SetValue(userparam['phytxid'])\n self.phyrxid.SetValue(userparam['phyrxid'])\n self.phytxidcert.SetValue(userparam['phytxidcert'])\n self.phyrxidcert.SetValue(userparam['phyrxidcert'])\n self.secaddr.SetValue(userparam['secaddr'])\n self.micom.SetValue(userparam['micom'])\n self.speed.SetValue(userparam['speed'])\n self.spec.SetValue(userparam['spec'])\n self.imgtype.SetValue(userparam['imgtype'])\n self.automode.SetValue(userparam['mode'])\n self.comctrl.SetValue(userparam['comctrl'])\n\n if userparam.has_key('securi'):\n defparam = userparam['securi']\n else:\n defparam = 'NOT_SEC'\n self.secmode.SetValue(defparam)\n\n self.crcloc.SetValue(userparam['crcloc'])\n self.beginaddr.SetValue(userparam['b_addr'])\n self.endaddr.SetValue(userparam['e_addr'])\n self.canchannel.SetValue(userparam['channel'])\n self.imagePath.SetValue(userparam['imgpath'])\n\n\n def SaveUserConfig(self, config_file=None):\n \"\"\"\n Save user configuration setting to the configuration file\n Input: Gui Widget Value (user configuration settings)\n Output: user configuration file or param_udscan.cfg\n \"\"\"\n\n # read user's configuration parameters\n if config_file:\n self.usercfg = user_cfg.UdsUserConfig( config_file )\n \n if self.usercfg:\n # save user configurations\n for name,obj in [('cantype', self.cantype), \n ('fnctxid', self.fnctxid), \n ('phytxid', self.phytxid), \n ('phyrxid', self.phyrxid), \n ('phytxidcert', self.phytxidcert), \n ('phyrxidcert', self.phyrxidcert), \n ('secaddr', self.secaddr), \n ('micom', self.micom), \n ('speed', self.speed), \n ('spec', self.spec), \n ('imgtype', self.imgtype),\n ('mode', self.automode), \n ('crcloc', self.crcloc), \n ('securi', self.secmode), \n ('b_addr', self.beginaddr), \n ('e_addr', self.endaddr), \n ('comctrl', self.comctrl),\n ('channel', self.canchannel),\n ('imgpath', self.imagePath)]:\n self.usercfg.setParam(name, obj.GetValue())\n\n\n\n def MakeCommandBoxSizer(self, frame, parent):\n \"\"\" Application Map table GUI layout \"\"\"\n\n self.exec_btn = {}\n self.exec_result= {}\n\n btn_size=(200,23)\n txt_size=(90,23)\n text_size = (200,-1)\n \n \"\"\"\n UdsServices=['DiagSessionCtrlExtend',\n 'ControlDTCSetting',\n 'CommunicationCtrl',\n 'DiagSessionCtrlProg',\n 'ReadCertification', # RSA Certification\n 'SecurityAccessSC', # RSA Security Access\n 'SecurityAccess',\n 'RoutineCtrl_Erase',\n 'RequestDownload',\n 'RoutineCtrl_Check',\n 'ECUReset',\n 'EnableNormTx'\n ]\n \"\"\"\n \n \n DiagService = wx.StaticText(parent, -1, 'DiagService: ')\n Subfunction = wx.StaticText(parent, -1, 'SubFunction: ')\n Response = wx.StaticText(parent, -1, 'DiagResponse: ')\n #dataIdentifier_read = wx.StaticText(parent, -1, 'dataId: ')\n #dataIdentifier_write= wx.StaticText(parent, -1, 'dataId: ')\n #dataRecord=wx.StaticText(parent, -1, 'dataRecord: ')\n \n #TextCtrl\n #self.read_dataId = wx.TextCtrl(parent, -1, '', size=text_size)\n #self.write_dataId = wx.TextCtrl(parent, -1, '', size=text_size)\n #self.datarecord = wx.TextCtrl(parent, -1, '', size=text_size)\n \n #button \n btn = wx.Button(parent, -1, 'SessionControlReq(10hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['SessionControlReq(10hex)']=btn\n self.session = wx.ComboBox(parent,-1, value = 'defaultSession(01h)',\n size = text_size, choices = self.session_list,\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n \n \n \n \n btn = wx.Button(parent, -1, 'ReadDTCInformation(19hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['ReadDTCInformation(19hex)']=btn\n self.readDTCinfo = wx.ComboBox(parent,-1, value = 'reportNumberOfDTCByStatusMask(01h)',\n size = text_size, choices = ['reportNumberOfDTCByStatusMask(01h)','reportDTCByStatusMask(02h)','reportDTCSnapshotIdentification(03h)', 'reportDTCSnapshotRecordByDTCNumber(04h)'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n \n \n \n btn = wx.Button(parent, -1, 'ReadDataByIdentifier(22hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['ReadDataByIdentifier(22hex)']=btn\n \n btn = wx.Button(parent, -1, 'WriteDataByIdentifier(2Ehex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['WriteDataByIdentifier(2Ehex)']=btn\n \n btn = wx.Button(parent, -1, 'CommunicationControl(28hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['CommunicationControl(28hex)']=btn\n \n self.comctrl = wx.ComboBox(parent,-1, value = 'enableRxAndTx(00h)',\n size = text_size, choices = ['enableRxAndTx(00h)','disableNormalMsgTx(01h)','disableNormalMsgTx(02h)', 'disableRxAndTx(03h)'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n \n #result \n btn = wx.Button(parent, -1, 'ECUReset(11hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['ECUReset(11hex)']=btn\n self.ecureset = wx.ComboBox(parent,-1, value = 'hardReset(01h)',\n size = text_size, choices = ['hardReset(01h)'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n \n \n \n btn = wx.Button(parent, -1, 'EnableNormalMsgTx(29hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['EnableNormalMsgTx(29hex)']=btn\n \n btn = wx.Button(parent, -1, 'ControlDTCSetting(85hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['ControlDTCSetting(85hex)']=btn\n self.ctrldtc = wx.ComboBox(parent,-1, value = 'On(01h)',\n size = text_size, choices = ['On(01h)','Off(02h)'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n \n btn = wx.Button(parent, -1, 'StopDiagnosticSession(20hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['StopDiagnosticSession(20hex)']=btn\n \n \n btn = wx.Button(parent, -1, 'ClearDiagnosticInformation(14hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['ClearDiagnosticInformation(14hex)']=btn\n \n btn = wx.Button(parent, -1, 'SecurityAccess(27hex)',size=btn_size)\n btn.SetDefault()\n self.exec_btn['SecurityAccess(27hex)']=btn\n self.sec_access = wx.ComboBox(parent,-1, value = 'GetSeed(01h)',\n size = text_size, choices = ['GetSeed(01h)','CompareKey(02h)'],\n style = wx.CB_DROPDOWN|wx.CB_READONLY)\n \n \n \n \n #add Diag Services\n \n \n \n \n for service in DiagServices:\n result = wx.TextCtrl(parent, -1,'-', size=text_size, style=wx.TE_CENTER)\n result.SetForegroundColour('red')\n #result.SetBackgroundColour('light blue')\n self.exec_result[service]=result\n \n result.SetToolTip( wx.ToolTip( 'OK: in case of valid response, -: no response, NOK: invalid response' ) ) \n \n \n #print self.exec_result.keys()\n \n frame.Bind(wx.EVT_BUTTON, self.OnSessChange, self.exec_btn['SessionControlReq(10hex)'])\n parent.Bind(wx.EVT_COMBOBOX, self.OnSessChange, self.session)\n frame.Bind(wx.EVT_BUTTON, self.OnReadDTC, self.exec_btn['ReadDTCInformation(19hex)'])\n parent.Bind(wx.EVT_COMBOBOX, self.OnReadDTC, self.readDTCinfo)\n frame.Bind(wx.EVT_BUTTON, self.OnReadData, self.exec_btn['ReadDataByIdentifier(22hex)'])\n frame.Bind(wx.EVT_BUTTON, self.OnWriteData, self.exec_btn['WriteDataByIdentifier(2Ehex)'])\n frame.Bind(wx.EVT_BUTTON, self.OnComControl, self.exec_btn['CommunicationControl(28hex)'])\n parent.Bind(wx.EVT_COMBOBOX, self.OnComControl, self.comctrl)\n \n frame.Bind(wx.EVT_BUTTON, self.OnEcuReset, self.exec_btn['ECUReset(11hex)'])\n parent.Bind(wx.EVT_COMBOBOX, self.OnEcuReset, self.ecureset)\n \n frame.Bind(wx.EVT_BUTTON, self.OnEnableNormTx, self.exec_btn['EnableNormalMsgTx(29hex)'])\n \n frame.Bind(wx.EVT_BUTTON, self.OnCtrlDtc, self.exec_btn['ControlDTCSetting(85hex)'])\n parent.Bind(wx.EVT_COMBOBOX, self.OnCtrlDtc, self.ctrldtc)\n \n frame.Bind(wx.EVT_BUTTON, self.OnStopDiag, self.exec_btn['StopDiagnosticSession(20hex)'])\n \n frame.Bind(wx.EVT_BUTTON, self.OnClearDtc, self.exec_btn['ClearDiagnosticInformation(14hex)'])\n \n frame.Bind(wx.EVT_BUTTON, self.OnSec_Access, self.exec_btn['SecurityAccess(27hex)'])\n parent.Bind(wx.EVT_COMBOBOX, self.OnSec_Access, self.sec_access)\n \n #self.sessionchg = wx.TextCtrl(parent, -1,'-', size=txt_size, style=wx.TE_CENTER)\n \n \n \n gbsizer = wx.GridBagSizer(hgap=12, vgap=2)\n Cfg = wx.StaticBox(parent, -1, '2.Diagnostic Service')\n sizer_cfg = wx.StaticBoxSizer(Cfg, wx.VERTICAL)\n \n y_layer = 0\n gbsizer.Add(DiagService, pos=(y_layer,0))\n gbsizer.Add(Subfunction, pos=(y_layer,1))\n gbsizer.Add(Response, pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['SessionControlReq(10hex)'], pos=(y_layer,0))\n gbsizer.Add(self.session, pos=(y_layer,1))\n gbsizer.Add(self.exec_result['SessionControlReq(10hex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['ECUReset(11hex)'], pos=(y_layer,0))\n gbsizer.Add(self.ecureset, pos=(y_layer,1))\n gbsizer.Add(self.exec_result['ECUReset(11hex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['CommunicationControl(28hex)'], pos=(y_layer,0))\n gbsizer.Add(self.comctrl, pos=(y_layer,1))\n gbsizer.Add(self.exec_result['CommunicationControl(28hex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['EnableNormalMsgTx(29hex)'], pos=(y_layer,0))\n gbsizer.Add(self.exec_result['EnableNormalMsgTx(29hex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['ReadDTCInformation(19hex)'], pos=(y_layer,0))\n gbsizer.Add(self.readDTCinfo, pos=(y_layer,1))\n gbsizer.Add(self.exec_result['ReadDTCInformation(19hex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['ReadDataByIdentifier(22hex)'], pos=(y_layer,0))\n #gbsizer.Add(dataIdentifier_read, pos=(1,1))\n #gbsizer.Add(self.read_dataId, pos=(1,2))\n gbsizer.Add(self.exec_result['ReadDataByIdentifier(22hex)'], pos=(y_layer,2))\n \n \n y_layer += 1\n gbsizer.Add(self.exec_btn['WriteDataByIdentifier(2Ehex)'], pos=(y_layer,0))\n #gbsizer.Add(dataIdentifier_write, pos=(2,1))\n #gbsizer.Add(self.write_dataId, pos=(2,2))\n #gbsizer.Add(dataRecord, pos=(2,3))\n #gbsizer.Add(self.datarecord, pos=(2,4))\n gbsizer.Add(self.exec_result['WriteDataByIdentifier(2Ehex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['ControlDTCSetting(85hex)'], pos=(y_layer,0))\n gbsizer.Add(self.ctrldtc, pos=(y_layer,1))\n gbsizer.Add(self.exec_result['ControlDTCSetting(85hex)'], pos=(y_layer,2))\n \n \n \n y_layer += 1\n gbsizer.Add(self.exec_btn['StopDiagnosticSession(20hex)'], pos=(y_layer,0))\n gbsizer.Add(self.exec_result['StopDiagnosticSession(20hex)'], pos=(y_layer,2))\n \n \n y_layer += 1\n gbsizer.Add(self.exec_btn['ClearDiagnosticInformation(14hex)'], pos=(y_layer,0))\n gbsizer.Add(self.exec_result['ClearDiagnosticInformation(14hex)'], pos=(y_layer,2))\n \n y_layer += 1\n gbsizer.Add(self.exec_btn['SecurityAccess(27hex)'], pos=(y_layer,0))\n gbsizer.Add(self.sec_access, pos=(y_layer,1))\n gbsizer.Add(self.exec_result['SecurityAccess(27hex)'], pos=(y_layer,2))\n \n \n \n \n sizer_cfg.Add(gbsizer, 0, wx.ALL|wx.EXPAND, 2)\n return sizer_cfg\n\n \n \n \n \n\n def OnReadData (self, event):\n \n self.UpdateStage('ReadDataByIdentifier(22hex)', '-')\n \n phytxid = self.phytxid.GetValue()\n self.canTxID.SetValue(phytxid)\n \n self.frametype.SetValue('TP_Message')\n for low in range(5):\n self.SetGridActive(self.myGrid1,low, True)\n \n for col in [1,2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n self.myGrid1.SetCellValue(0, 0, '0x22')\n \n def OnComControl (self, event):\n \n self.UpdateStage('CommunicationControl(28hex)', '-')\n \n phytxid = self.phytxid.GetValue()\n self.canTxID.SetValue(phytxid)\n \n self.frametype.SetValue('TP_Message')\n for low in range(5):\n self.SetGridActive(self.myGrid1,low, True)\n \n comctrl = self.comctrl.GetValue()\n \n if comctrl == 'enableRxAndTx(00h)':\n self.myGrid1.SetCellValue(0, 0, '0x28')\n self.myGrid1.SetCellValue(0, 1, '0x00')\n self.myGrid1.SetCellValue(0, 2, '0x01')\n \n for col in [3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif comctrl == 'disableNormalMsgTx(01h)':\n self.myGrid1.SetCellValue(0, 0, '0x28')\n self.myGrid1.SetCellValue(0, 1, '0x01')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif comctrl == 'disableNormalMsgTx(02h)':\n self.myGrid1.SetCellValue(0, 0, '0x28')\n self.myGrid1.SetCellValue(0, 1, '0x02')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n \n elif comctrl == 'disableRxAndTx(03h)':\n self.myGrid1.SetCellValue(0, 0, '0x28')\n self.myGrid1.SetCellValue(0, 1, '0x03')\n self.myGrid1.SetCellValue(0, 2, '0x01')\n \n for col in [3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n \n \n \n def OnWriteData (self, event):\n \n self.UpdateStage('WriteDataByIdentifier(2Ehex)', '-')\n \n phytxid = self.phytxid.GetValue()\n self.canTxID.SetValue(phytxid)\n \n self.frametype.SetValue('TP_Message')\n for low in range(5):\n self.SetGridActive(self.myGrid1,low, True)\n \n for col in [1,2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n \n self.myGrid1.SetCellValue(0, 0, '0x2E')\n \n def OnSessChange (self, event):\n \n self.UpdateStage('SessionControlReq(10hex)', '-')\n \n phytxid = self.phytxid.GetValue()\n self.canTxID.SetValue(phytxid)\n \n self.frametype.SetValue('TP_Message')\n for low in range(5):\n self.SetGridActive(self.myGrid1,low, True)\n \n session = self.session.GetValue()\n \n \n self.myGrid1.SetCellValue(0, 0, '0x10')\n self.myGrid1.SetCellValue(0, 1, self.dic_sessioninfo[session])\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n \"\"\"\n \n if session == 'defaultSession(01h)':\n self.myGrid1.SetCellValue(0, 0, '0x10')\n self.myGrid1.SetCellValue(0, 1, '0x01')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif session == 'programmingSession(02h)':\n self.myGrid1.SetCellValue(0, 0, '0x10')\n self.myGrid1.SetCellValue(0, 1, '0x02')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif session == 'extendedDiagnosticSession(03h)':\n self.myGrid1.SetCellValue(0, 0, '0x10')\n self.myGrid1.SetCellValue(0, 1, '0x03')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif session == 'safetySystemDiagnosticSession(04h)':\n self.myGrid1.SetCellValue(0, 0, '0x10')\n self.myGrid1.SetCellValue(0, 1, '0x04')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \"\"\"\n \n def OnReadDTC (self, event):\n \n self.UpdateStage('ReadDTCInformation(19hex)', '-')\n \n phytxid = self.phytxid.GetValue()\n \n self.canTxID.SetValue(phytxid)\n \n subfunc = self.readDTCinfo.GetValue()\n \n self.frametype.SetValue('TP_Message')\n for low in range(5):\n self.SetGridActive(self.myGrid1,low, True)\n \n if subfunc == \"reportNumberOfDTCByStatusMask(01h)\":\n self.myGrid1.SetCellValue(0, 0, '0x19')\n self.myGrid1.SetCellValue(0, 1, '0x01')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif subfunc == \"reportDTCByStatusMask(02h)\":\n \n self.myGrid1.SetCellValue(0, 0, '0x19')\n self.myGrid1.SetCellValue(0, 1, '0x02')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif subfunc == \"reportDTCSnapshotIdentification(03h)\":\n \n self.myGrid1.SetCellValue(0, 0, '0x19')\n self.myGrid1.SetCellValue(0, 1, '0x03')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n elif subfunc == \"reportDTCSnapshotRecordByDTCNumber(04h)\":\n \n self.myGrid1.SetCellValue(0, 0, '0x19')\n self.myGrid1.SetCellValue(0, 1, '0x04')\n \n for col in [2,3,4,5,6,7]:\n self.myGrid1.SetCellValue(0, col, '-')\n \n for low in [1,2,3,4]:\n for col in range(5):\n self.myGrid1.SetCellValue(low, col, '-')\n \n \n \n def OnEcuReset (self, event):\n pass\n \n def OnEnableNormTx (self, event):\n pass\n \n def OnCtrlDtc (self, event):\n pass\n \n def OnStopDiag (self, event):\n pass\n\n def OnClearDtc (self, event):\n pass\n \n def OnSec_Access (self, event):\n pass\n\n def MakeProgressSizer(self, frame, parent):\n \"\"\" Progress Report GUI layout \"\"\"\n \n statusbox = wx.StaticBox(parent, -1, '3.PROGRESS') \n sizer = wx.StaticBoxSizer(statusbox, wx.VERTICAL)\n\n addrLabel = wx.StaticText(parent, -1, 'Address')\n txcntLabel = wx.StaticText(parent, -1, 'Tx Count')\n totalLabel = wx.StaticText(parent, -1, 'Total Count')\n errorLabel = wx.StaticText(parent, -1, 'Error Count')\n retransLabel = wx.StaticText(parent, -1, 'Retransmission')\n\n txt_size = (100,20)\n\n self.addrTxt = wx.TextCtrl(parent, -1, '0x00000000', size=txt_size)\n self.addrTxt.SetBackgroundColour('black') \n self.addrTxt.SetForegroundColour('red') \n self.txcntTxt = wx.TextCtrl(parent, -1, '0', size=txt_size)\n self.txcntTxt.SetBackgroundColour('black') \n self.txcntTxt.SetForegroundColour('red') \n self.totalTxt = wx.TextCtrl(parent, -1, '0', size=txt_size)\n self.totalTxt.SetBackgroundColour('black') \n self.totalTxt.SetForegroundColour('red') \n self.errorTxt = wx.TextCtrl(parent, -1, '0', size=txt_size)\n self.errorTxt.SetBackgroundColour('black') \n self.errorTxt.SetForegroundColour('red') \n self.retransTxt = wx.TextCtrl(parent, -1, '0', size=txt_size)\n self.retransTxt.SetBackgroundColour('black') \n self.retransTxt.SetForegroundColour('red') \n\n self.gauge = wx.Gauge(parent, -1, 100, size=(100,26))\n self.gauge.SetBezelFace(3)\n self.gauge.SetShadowWidth(3)\n \n gbsizer = wx.GridBagSizer(hgap=10, vgap=2)\n gbsizer.Add(addrLabel, pos=(0,0))\n gbsizer.Add(txcntLabel, pos=(0,1))\n gbsizer.Add(totalLabel, pos=(0,2))\n gbsizer.Add(errorLabel, pos=(0,3)) \n\n gbsizer.Add(retransLabel, pos=(0,4)) \n gbsizer.Add(self.addrTxt, pos=(1,0))\n gbsizer.Add(self.txcntTxt, pos=(1,1))\n gbsizer.Add(self.totalTxt, pos=(1,2))\n gbsizer.Add(self.errorTxt, pos=(1,3))\n gbsizer.Add(self.retransTxt, pos=(1,4)) \n gbsizer.Add(self.gauge, pos=(2,0), span=(1,5), flag=wx.EXPAND)\n\n sizer.Add(gbsizer, 0, wx.ALL|wx.EXPAND, 2)\n return sizer\n\n\n #----------------------------------------\n # GUI updates\n #----------------------------------------\n\n def UpdateProgress(self, start, txcnt, total, error, retx):\n \"\"\"update progress gauge \"\"\"\n \n wx.CallAfter(self.UpdateGUI, start, txcnt, total, error, retx)\n \n\n def UpdateGUI(self, start, txcnt, total, error, retx):\n \n self.addrTxt.SetValue(hex(start))\n self.txcntTxt.SetValue(str(txcnt))\n self.totalTxt.SetValue(str(total))\n self.errorTxt.SetValue(str(error))\n self.retransTxt.SetValue(str(retx)) \n if total:\n self.gauge.SetValue((txcnt * 100)/total)\n else:\n self.gauge.SetValue(0) \n\n\n def UpdateStage(self, stage, status):\n\n for service in DiagServices:\n if stage == service:\n self.exec_result[service].SetValue( status )\n \n\n \n #-------------------------------------------------------\n # Event Handlers\n #-------------------------------------------------------\n def OnChangeSpec(self, event):\n\n if self.protocol:\n self.protocol.SetSpecification(self.spec.GetValue())\n self.Print('\\nSpecification -> %s'%(self.spec.GetValue()))\n \n \n def OnOpenImageFile(self, event):\n \"\"\"Open a s19 file\"\"\"\n \n wildcard = \"S19 source (*.s19)|*.s19|\" \\\n \"MHX source (*.mhx)|*.mhx|\" \\\n \"All files (*.*)|*.*\"\n dialog = wx.FileDialog(None,\n \"Choose a target image file\",\n '.',\n \"\",\n wildcard, wx.OPEN)\n\n if dialog.ShowModal() == wx.ID_OK:\n self.imagePath.SetValue(dialog.GetPath())\n self.Print('\\nOpened file %s\\n' % self.imagePath.GetValue())\n dialog.Destroy()\n\n\n def OnSecuritySelection(self, evt):\n \"\"\"\n Input: event id\n Output: NA\n\n Show GUI object color depending on the selected security level\n 'NOT_SEC' - disable CRC address input box, disable GW physical connection CAN ID input box\n 'SEC_ACC' - disable CRC address input box\n 'SEC_FLA' - enable CRC address input box, enable GW physical connection CAN ID input box\n \"\"\"\n smode = self.secmode.GetValue()\n crclabel = 'NA'\n crcfgcolor = 'red'\n crcbgcolor = 'grey'\n rdcertfgcolor = 'red'\n rdcertbgcolor = 'light blue'\n certidlabel = ''\n certidfgcolor = 'red'\n certidbgcolor = 'light blue'\n if smode == 'SEC_FLA':\n crclabel = ''\n crcbgcolor = 'light blue'\n rdcertlabel = 'ReadCertification'\n salabel = 'SecurityAccessSC'\n elif smode == 'SEC_ACC':\n rdcertlabel = 'ReadCertification'\n salabel = 'SecurityAccessSC'\n else: # smode == 'NOT_SEC'\n rdcertlabel = 'Disabled'\n salabel = 'Disabled'\n rdcertbgcolor = 'grey'\n certidlabel = '0x7FF'\n certidbgcolor = 'grey'\n\n #self.crcloc.SetValue(crclabel) <- overwrite restored value error\n self.crcloc.SetForegroundColour(crcfgcolor)\n self.crcloc.SetBackgroundColour(crcbgcolor)\n self.crcloc.Refresh()\n\n #self.exec_btn['ReadCertification'].SetLabel(rdcertlabel) <- overwrite restored value error\n self.exec_result['ReadCertification'].SetForegroundColour(rdcertfgcolor)\n self.exec_result['ReadCertification'].SetBackgroundColour(rdcertbgcolor)\n self.exec_btn['ReadCertification'].Refresh()\n self.exec_result['ReadCertification'].Refresh()\n\n #self.exec_btn['SecurityAccessSC'].SetLabel(salabel) <- overwrite restored value error\n self.exec_result['SecurityAccessSC'].SetForegroundColour(rdcertfgcolor)\n self.exec_result['SecurityAccessSC'].SetBackgroundColour(rdcertbgcolor)\n self.exec_btn['SecurityAccessSC'].Refresh()\n self.exec_result['SecurityAccessSC'].Refresh()\n\n if smode == 'NOT_SEC':\n self.phytxidcert.SetValue(certidlabel)\n self.phytxidcert.SetForegroundColour(certidfgcolor)\n self.phytxidcert.SetBackgroundColour(certidbgcolor)\n self.phytxidcert.Refresh()\n if smode == 'NOT_SEC':\n self.phyrxidcert.SetValue(certidlabel)\n self.phyrxidcert.SetForegroundColour(certidfgcolor)\n self.phyrxidcert.SetBackgroundColour(certidbgcolor)\n self.phyrxidcert.Refresh()\n\n\n def OnSaveUserConfig(self, evt):\n \n wildcard = \"User Config file (*.cfg)|*.cfg|\" \\\n \"All files (*.*)|*.*\"\n dialog = wx.FileDialog(None, \"Choose User Configuration file\", os.getcwd(), \"\", wildcard, wx.SAVE)\n if dialog.ShowModal() == wx.ID_OK:\n # save user configurations\n self.SaveUserConfig(dialog.GetPath())\n self.Print('\\nUDS: %s is Saved.'%dialog.GetPath())\n dialog.Destroy()\n\n\n def OnLoadUserConfig(self, evt):\n\n wildcard = \"User Config file (*.cfg)|*.cfg|\" \\\n \"All files (*.*)|*.*\"\n dialog = wx.FileDialog(None, \"Choose User Configuration file\", os.getcwd(), \"\", wildcard, wx.OPEN)\n if dialog.ShowModal() == wx.ID_OK:\n # load user configuration\n self.LoadUserConfig(dialog.GetPath())\n # Activate/Deactivate CRC location\n #self.OnSecuritySelection(None)\n\n self.Print('\\nUDS: %s is Loaded'%dialog.GetPath())\n dialog.Destroy()\n\n \n def OnStop(self, evt):\n\n self.startok = False\n \n if self.txtimer_started:\n self.txperiodic_Stop()\n \n if self.trigger:\n \n self.trigger.close()\n self.trigger = None\n\n if self.protocol:\n self.protocol.Close()\n self.protocol = None\n\n self.Print('\\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<') \n self.Print('\\nStop CAN communication')\n self.Print('\\n<<<<<<<<<<<<<<<<<<<<<<<<<<<<<')\n else:\n self.Print('\\nAlready CAN communication was stopped')\n\n\n \n def OnStart(self, evt):\n \"\"\"\n Input: evend id\n Output: NA\n\n 1) clear GUI\n 2) convert Hex Image\n 3) activate Communication Channel\n 4) get RSA signature if it is configured\n \"\"\"\n\n \n\n # set CAN type\n self.stdcan =True\n \n # clear status\n \"\"\"\n for service in UdsServices: \n self.exec_result[service].SetValue( '-' )\n \"\"\"\n # save user configurations\n #self.SaveUserConfig()\n\n\n # now, start session\n if self.trigger:\n self.Print('\\nAlready CAN communication is started')\n return\n \n \n \n \"\"\"\n # get user configurations\n self.imgtypeval = self.imgtype.GetValue()\n secaddr = self.secaddr.GetValue()\n begin = self.beginaddr.GetValue()\n end = self.endaddr.GetValue()\n comctrl = self.comctrl.GetValue()\n if (not begin) or (not end) or (not secaddr) or (not self.imgtypeval) or (not comctrl):\n self.Print('\\nOne of input [begin,end,secaddr,imgtype, comctrl] is empty')\n return \n else:\n begin = int(self.beginaddr.GetValue(), 16)\n end = int(self.endaddr.GetValue(), 16)\n secaddr = int(self.secaddr.GetValue(), 16)\n comctrl = (comctrl == 'NORM_NM') and 3 or 1\n \"\"\"\n # ---------------------------------------------\n # Test point without CAN connection\n # return\n # ---------------------------------------------\n\n # image conversion\n #micro = self.micom.GetValue()\n \"\"\"\n if micro =='':\n self.Print('\\nError: No Target MCU is selected in the GUI!!')\n return\n if not self.GetHexImage(micro, begin, end, secaddr, self.imgtypeval):\n self.Print('\\nImage conversion failed')\n return\n else:\n # RSA\n if smode == 'SEC_FLA':\n img_sign, self.eolimage = self.ExtractSignature(self.eolimage)\n \"\"\"\n self.protocol = self.GetCommChannelDesc()\n \n self.savecfg()\n \n def savecfg(self):\n \n f = open(\"default.cfg\", 'w')\n \n spec = str(self.spec.GetValue())\n canch = self.canchannel.GetValue()\n canch = str(canch)\n speed = self.speed.GetValue()\n speed = str(speed)\n ftxid = self.fnctxid.GetValue()\n ftxid = str(ftxid)\n ptxid = self.phytxid.GetValue()\n ptxid = str(ptxid)\n prxid = self.phyrxid.GetValue()\n prxid = str(prxid)\n \n data = spec +', ' + canch+', ' + speed+', ' + ftxid+', ' +ptxid+', ' + prxid\n \n f.write(data)\n \n f.close()\n\n def OnVectorHardwareConfig(self, event):\n \"\"\"\n Input: event id\n Output: NA\n\n Vector HW configuration\n \"\"\"\n vcan.PopupHwConfig()\n \n\n def OnSaveTrace(self, event):\n\n f = open('savetrace_uds.txt','w')\n f.write( self.frame._trace['Uds'].GetValue() )\n f.close()\n\n def OnLoadConfig(self, event):\n\n pass\n\n def OnSaveConfig(self, event):\n\n pass\n\n def ExtractSignature(self, image):\n \"\"\"\n Separate signature from data.\n \"\"\"\n img_sign = None\n if image[-1][4:8] == [0xAA, 0xBB, 0xCC, 0xDD]:\n self.Print('\\nUDS: Found an Image Signature!')\n img_sign = image[-1][4:-1]\n return (img_sign, image[:-1])\n elif image[-2][4:8] == [0xAA, 0xBB, 0xCC, 0xDD]:\n self.Print('\\nUDS: Found an Image Signature!')\n img_sign = image[-2][4:-1] + image[-1][4:-1] \n return (img_sign, image[:-2])\n else:\n self.Print('\\nUDS: No Image Signature Found')\n return (img_sign, image[:])\n\n\n def GetCommChannelDesc(self):\n \"\"\"\n Input: NA\n Output: NA\n\n Get Communication Channel Information\n 1) Communication ID(CAN ID)\n 2) Communication Speed\n 3) Communication Channel\n \"\"\"\n self.fnctxidval = 0\n self.phytxidval = 0\n self.phyrxidval = 0\n self.phytxidvalcert = 0\n self.phyrxidvalcert = 0\n\n fnctxid = self.fnctxid.GetValue()\n phytxid = self.phytxid.GetValue()\n phyrxid = self.phyrxid.GetValue()\n phytxidcert = '0x7ff'\n phyrxidcert = '0x7ff'\n if fnctxid and fnctxid.isalnum():\n self.fnctxidval = int( fnctxid, 16)\n if phytxid and phytxid.isalnum():\n self.phytxidval = int( phytxid, 16)\n if phyrxid and phyrxid.isalnum():\n self.phyrxidval = int( phyrxid, 16) \n \n if phytxidcert and phytxidcert.isalnum():\n if phytxidcert == '0x7FF':\n self.phytxidvalcert = 0\n else:\n self.phytxidvalcert = int( phytxidcert, 16)\n if phyrxidcert and phyrxidcert.isalnum():\n if phyrxidcert == '0x7FF':\n self.phyrxidvalcert = 0\n else:\n self.phyrxidvalcert = int( phyrxidcert, 16) \n \n # Check CAN ID\n \"\"\"\n smode = self.secmode.GetValue()\n if smode in ['SEC_ACC','SEC_FLA']:\n if (self.phytxidvalcert == 0) or (self.phyrxidvalcert == 0):\n self.Print('\\nCAN: Invalid PhyTxIdCert and PhyRxIdCert ID ')\n return None\n else: # 'NOT_SEC'\n if self.phytxidvalcert or self.phyrxidvalcert:\n self.Print('\\nCAN: Invalide PhyTxIdCert and PhyRxIdCert ID -> 0x7FF')\n return None\n \"\"\"\n # check CAN type\n \"\"\"\n if not self.stdcan:\n self.fnctxidval |= 0x80000000\n self.phytxidval |= 0x80000000\n self.phyrxidval |= 0x80000000 \n self.phytxidvalcert |= 0x80000000\n self.phyrxidvalcert |= 0x80000000 \n \"\"\"\n self.speedval = int( self.speed.GetValue() ) * 1000\n self.can_ch = int( self.canchannel.GetValue() )\n self.Print('\\nCOM: CAN_%d FncTx(%X) PhyTx(%X) PhyRx(%X) PhyTxCert(%X) PhyRxCert(%X) Speed(%d)'\n %(self.can_ch, self.fnctxidval, self.phytxidval, self.phyrxidval, self.phytxidvalcert, self.phyrxidvalcert, self.speedval))\n\n self.trigger = monitorjob.TriggerJobCan(self.frame,\n self.can_ch,\n self.phytxidval,\n self.phyrxidval,\n self.speedval,\n self.Print, True)\n if self.trigger:\n self.trigger.setMsgHandler( None, self.CanRxEventCallback)\n\n # initialize uds protocol\n return protocol.UdsProtocol(self.frame,\n self.trigger, self.spec.GetValue(), 'MANUAL',\n self.fnctxidval, self.phytxidval, self.phyrxidval, self.phytxidvalcert, self.phyrxidvalcert,\n self.Print, self.UpdateProgress, self.UpdateStage )\n else:\n return None\n\n\n def GetHexImage(self, micro, begin, end, secaddr, imgtype):\n \"\"\"\n Get hex code from S19 image file.\n Input : micro - micom\n begin, end - image range\n secaddr\n imgtype\n Output: self.eolimg\n self.groupstart\n self.groupsize\n self.addrlist\n self.imgsign(if RSA is used)\n \"\"\"\n line = []\n utpfile = []\n signiture = []\n \n if imgtype == 'DUMP':\n self.eolimage = []\n self.groupstart = []\n self.groupsize = []\n self.addrlist = []\n return True\n\n # get group information\n imgpath = self.imagePath.GetValue()\n if (not imgpath) or (not os.path.exists( imgpath )):\n self.Print('\\nRTSWCONV: Invalid Image path!!!')\n return False\n \n line = rtswconv.hexutil.GetFileLines(imgpath)\n utpfile, signiture = self.IMGFile_Seperation(line)\n \n self.img_sign = self.GetIMGsign(signiture)\n \n \n\n # RSA\n smode = self.secmode.GetValue() \n if smode == 'SEC_FLA':\n rsa_crcloc = self.crcloc.GetValue()\n if rsa_crcloc.isalnum() == True:\n rsa_crcloc = int(self.crcloc.GetValue(), 16)\n else:\n self.Print('\\nCRC: Invalid CRC Address format -> hexadecimal format (0x...) is required!!!')\n return False\n else:\n rsa_crcloc = 0\n\n try:\n self.eolimage, self.rsa_imgarray = rtswconv.GetHexCode( micro, utpfile, \n begin, end,\n imgtype, secaddr, rsa_crcloc, self.Print)\n except IOError:\n self.Print('\\nRTSWCONV: Permission Denied(%s) -> Abort!!!'%(imgpath) )\n return False\n\n if (not self.eolimage) or ((smode == 'SEC_FLA') and (self.rsa_imgarray == [])):\n self.Print('\\nRTSWCONV: Hex Conversion Failed!!!')\n return False\n\n self.GetHexBlockGroup( rtswconv.DNLD_BLKLEN[micro] )\n self.Print('\\nRTSWCONV: Hex Conversion Success') \n return True\n\n \n def GetIMGsign (self, img_sign):\n img_sig=[]\n \n for line in img_sign:\n code = line[12:-3]\n for bidx in xrange(0,len(code),2):\n v = int( '0x' + code[bidx : bidx + 2], 16) \n img_sig.append(v)\n \n \n return img_sig\n\n\n def IMGFile_Seperation (self, data):\n step = False\n data1 = []\n data2 = []\n for line in data:\n if step== False: \n if line[2:4] != '09':\n data1.append(line)\n \n else:\n step = True\n data2.append(line)\n \n else :\n data2.append(line)\n \n return data1, data2\n\n\n def GetHexBlockGroup(self, block_size):\n \"\"\"\n Input : block_size - 128\n Output: self.addrlist self.groupsize self.groupstart\n \"\"\"\n\n # get address list\n self.addrlist= [0] * len(self.eolimage)\n for idx,line in enumerate(self.eolimage):\n self.addrlist[idx] = (line[1]<<16)|(line[2]<<8)|line[3]\n\n # calculate group start/size\n self.groupstart = [0]\n self.groupsize = []\n for idx in range(1, len(self.addrlist)):\n if ((self.addrlist[idx] - self.addrlist[idx - 1]) != block_size):\n self.groupsize.append( (idx - self.groupstart[-1]) * block_size)\n self.groupstart.append( idx )\n\n blockidx = self.groupstart[-1]\n self.groupsize.append( self.addrlist[-1] - self.addrlist[ blockidx ] + block_size )\n\n for start,size in zip(self.groupstart, self.groupsize):\n self.Print('\\nRTSWCONV: Group Start - 0x%X Size - 0x%X'%(self.addrlist[start], size))\n\n\n def OnTrace(self, event):\n \n ckbox = event.GetEventObject()\n self.traceFrame = ckbox.GetValue()\n \n def OnFilter(self, event):\n ckbox = event.GetEventObject()\n self.filter = ckbox.GetValue()\n \n if self.filteredcanid ==False:\n self.filteredcanid =[]\n else:\n self.filteredcanid = []\n filteredid = self.filtedID.GetValue()\n \n id_list = filteredid.split(', ')\n \n for canid in id_list:\n int_id = int(canid, 16)\n self.filteredcanid.append(int_id)\n\n\n\n #-------------------------------------------------------\n # Scheduling Service\n #-------------------------------------------------------\n def ScheduleService(self, rxmsg):\n \n if self.rxdispatch.has_key( self.todo ):\n self.rxdispatch[ self.todo ]['rx'](rxmsg)\n else:\n if rxmsg[0] == SID_NEG_RSP:\n self.Print('\\nUDS: Negative Response')\n \n \n def RcvDiagExt(self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n \n self.UpdateStage('SessionControlReq(10hex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n if rxmsg[0] == SID_DIAG_RSP:\n self.p2can_server_max = (rxmsg[2]<<8) | rxmsg[3]\n self.p2can_ext_server_max = ((rxmsg[4]<<8) | rxmsg[5]) * 10\n \n self.UpdateStage('SessionControlReq(10hex)', 'EXT_SESS_OK')\n self.Print('\\nUDS: RcvDiagExt %d %d'%(self.p2can_server_max, self.p2can_ext_server_max))\n self.todo = self.DIAG\n def RcvDiagProg(self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n self.todo = self.DIAG\n self.UpdateStage('SessionControlReq(10hex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n if rxmsg[0] == SID_DIAG_RSP:\n self.p2can_server_max = (rxmsg[2]<<8) | rxmsg[3]\n self.p2can_ext_server_max = ((rxmsg[4]<<8) | rxmsg[5]) * 10\n \n self.UpdateStage('SessionControlReq(10hex)', 'PROG_SESS_OK')\n self.Print('\\nUDS: RcvDiagProg %d %d'%(self.p2can_server_max, self.p2can_ext_server_max))\n self.todo = self.DIAG\n def RcvDiagDef(self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n self.todo = self.DIAG\n self.UpdateStage('SessionControlReq(10hex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n if rxmsg[0] == SID_DIAG_RSP:\n self.p2can_server_max = (rxmsg[2]<<8) | rxmsg[3]\n self.p2can_ext_server_max = ((rxmsg[4]<<8) | rxmsg[5]) * 10\n \n self.UpdateStage('SessionControlReq(10hex)', 'Def_SESS_OK')\n self.Print('\\nUDS: RcvDiagDefault %d %d'%(self.p2can_server_max, self.p2can_ext_server_max))\n self.todo = self.DIAG \n \n def RcvReadDTC (self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n self.todo = self.DIAG\n self.UpdateStage('ReadDTCInformation(19hex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n \n if rxmsg[0] == SID_READ_DTC_RSP:\n self.UpdateStage('ReadDTCInformation(19hex)', 'READ_DTC_OK')\n self.Print('\\nUDS: RcvReadDTC')\n \n def RcvReadData (self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n self.todo = self.DIAG\n self.UpdateStage('ReadDataByIdentifier(22hex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n \n if rxmsg[0] == SID_READ_DATA_RSP:\n self.UpdateStage('ReadDataByIdentifier(22hex)', 'READ_DATA_OK')\n self.Print('\\nUDS: RcvReadDTC')\n \n \n def RcvWriteData (self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n self.todo = self.DIAG\n self.UpdateStage('WriteDataByIdentifier(2Ehex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n \n if rxmsg[0] == SID_WRITE_DATA_RSP:\n self.UpdateStage('WriteDataByIdentifier(2Ehex)', 'WRITE_DATA_OK')\n self.Print('\\nUDS: RcvReadDTC')\n \n def RcvComCtrl (self, rxmsg):\n if rxmsg[0] == SID_NEG_RSP:\n self.todo = self.DIAG\n self.UpdateStage('CommunicationControl(28hex)', 'NEG_RSP')\n self.Print('\\nUDS: Negative Response')\n return\n \n if rxmsg[0] == SID_COM_RSP:\n self.UpdateStage('CommunicationControl(28hex)', 'COMCTRL_OK')\n self.Print('\\nUDS: RcvCommunication Control')\n \n \n \n \n#-----------------------------------------------------------\n# Unit Test\n#-----------------------------------------------------------\n# Test Ends\n# 1. ES95485\n# 1.1. Download failure case\n# - No response from the ECU -> 'Stop' -> ECU Reset -> 'Start' -> 'Diag..'\n# 1.2. Operation mode - AUTO, MANUAL\n# 1.3. CAN channel selection - Channel1, Channel2\n# 1.4. Retransmission mode\n# - Time out of Response -> ?\n# 2. ISO14229\n# 2.1. Download failure case\n# - No response from the ECU -> 'Stop' -> ECU Reset -> 'Start' -> 'Diag..'\n# 2.2. Operation mode - AUTO, MANUAL\n# 2.3. CAN channel selection - Channel1, Channel2\n# 2.4. Retransmission mode\n# - Time out of Response -> ?\n#-----------------------------------------------------------\n","sub_path":"udscan.py","file_name":"udscan.py","file_ext":"py","file_size_in_byte":85834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"559895466","text":"__author__ = 'chenzhongpu'\n\n\nimport feedparser\nfrom util import HTMLStripper, TerminalColor\n\n\ndef getWeibo():\n # Hanhan's weibo account ID\n weiBoId = \"1191258123\"\n rssUrl = \"http://pipes.yahoo.com/pipes/pipe.run?_id=c056f83e95a119e8466745db03b0e9e0&_render=rss&weibo=\" + weiBoId\n d = feedparser.parse(rssUrl)\n for i, post in enumerate(d.entries):\n s = HTMLStripper.HTMLStripper()\n s.feed(post.summary)\n if len(s.get_data()) == 0:\n print('[多媒体资源]')\n else:\n print(s.get_data().rstrip())\n print(TerminalColor.colors.OKGREEN + '---------' + TerminalColor.colors.ENDC)\n","sub_path":"weibo/GetWeibo.py","file_name":"GetWeibo.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"347728740","text":"#变量\ncolor = 'green'\n\n#函数\ndef myfunc():\n print('心如上慈善晚会一分钱没有~')\n\n#类\nclass Human:\n sex = 'man'\n age = 18\n name = '心如'\n\n def say():\n print('周杰当年强吻我~')\n\n def cry():\n print('喷壶哭晕在厕所~')\n\n\n#用于测试的代码\n#表示直接运行当前文件\nif __name__ == '__main__':\n print('直接运行当前文件')\n print(color)\n Human.say()\n\n\n","sub_path":"3/08错误异常和包/myfile.py","file_name":"myfile.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"350487283","text":"def min(a: int, b: int, c: int, d: int):\n min1 = a if a < b else b\n min2 = c if c < d else d\n return min1 if min1 < min2 else min2\n\narr=list(map(int,input().split()))\na = arr[0]\nb = arr[1]\nc = arr[2]\nd = arr[3]\n\nprint(min(a, b, c, d))\n\n","sub_path":"week8/5/306.py","file_name":"306.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"84667544","text":"import time\n# 1. check all valid moves\n# 2. create all possible children (check all ancestors)\n# 3. append children to list\n# 4. save range of children where are in the list\n# 5. check if children in range are solution\n# 6. again\n\n\nclass Puzzle:\n def __init__(self, values, ancestors=[], moves=[]):\n # takes in array of values\n self.values = values\n self.ancestors = ancestors # array of indeces of all ancestors\n self.moves = moves\n self.possible_moves = {\n \"shifts\": [1, -1, 3, -3],\n \"words\": [\"Right\", \"Left\", \"Down\", \"Up\"]\n }\n\n def __str__(self):\n string = \"layer : \" + str(len(self.ancestors)) + \"\\n\"\n string += \"ancestors : \" + str(self.ancestors) + \"\\n\"\n string += \"moves : \"\n for move in self.moves:\n string += self.possible_moves[\"words\"][self.possible_moves[\"shifts\"].index(\n move)] + \" \"\n string += \"\\n\"\n for value in self.values:\n string = string + \\\n str(value) + \"\\n\" if self.values.index(value) % 3 == 2 else string + \\\n str(value) + \" \"\n return string\n\n def get_values(self):\n return self.values\n\n def get_moves(self):\n return self.moves\n\n def get_indeces_of_ancestors(self):\n return self.ancestors\n\n def get_valid_states(self):\n position_of_zero = self.values.index(0)\n shifts = [1, -1, 3, -3] # right, left, down, up\n valid_states = []\n for shift in shifts:\n new_index = shift + position_of_zero\n if 0 <= new_index < len(self.values):\n if not ((position_of_zero in [3, 6] and shift == -1) or (position_of_zero in [2, 5] and shift == 1)):\n # deep copy to create copy and not link\n temp_values = self.values[:]\n temp_values[new_index], temp_values[position_of_zero] = temp_values[position_of_zero], temp_values[new_index]\n valid_states.append({\n \"array\": temp_values,\n \"direction\": shift})\n return valid_states\n\n\nclass Solver:\n def __init__(self, root, goal, max_layers=25, time_elapsed=0):\n self.tree = [root]\n self.goal = goal\n self.max_layers = max_layers\n\n def get_ancestors(self, node_indeces_of_ancesters):\n return [self.tree[index] for index in node_indeces_of_ancesters]\n\n def create_children_of_node(self, node, parent_node_index):\n states = node.get_valid_states()\n indeces_of_ancestors = node.get_indeces_of_ancestors()\n if indeces_of_ancestors is not None:\n ancestors = self.get_ancestors(indeces_of_ancestors)\n ancestors_values = [ancestor.get_values()\n for ancestor in ancestors]\n for state in states:\n if state[\"array\"] not in ancestors_values:\n new_child = Puzzle(state[\"array\"], indeces_of_ancestors + [\n parent_node_index], node.get_moves() + [state[\"direction\"]])\n self.tree.append(new_child)\n\n def show_solution(self, node):\n for index in node.ancestors:\n print(self.tree[index])\n print(\"\\n\")\n print(node)\n print(\"tree length : \" + str(len(self.tree)))\n\n def solve(self):\n left = 0 # index to keep the children in the tree seperate\n start = time.time()\n while True:\n temp = len(self.tree)\n # checks the solution and adds a new layer of nodes\n for index in range(left, len(self.tree)):\n node = self.tree[index]\n if node.get_indeces_of_ancestors() is not None:\n if len(node.get_indeces_of_ancestors()) > self.max_layers:\n print(\"Stopping at layer {}\".format(self.max_layers))\n print(\"self.tree: \" + str(len(self.tree)))\n self.time_elapsed = time.time() - start\n return False\n if node.get_values() == self.goal.get_values():\n # FOUND IT!\n self.time_elapsed = time.time() - start\n return node\n else:\n self.create_children_of_node(node, index)\n left = temp\n\n\nif __name__ == '__main__':\n root1 = Puzzle([0, 1, 3, 4, 2, 5, 7, 8, 6])\n root2 = Puzzle([1, 2, 3, 0, 5, 6, 4, 7, 8])\n root3 = Puzzle([0, 8, 3, 4, 6, 5, 7, 1, 2])\n goal = Puzzle([1, 2, 3, 4, 5, 6, 7, 8, 0])\n solver = Solver(root3, goal)\n if solver.solve():\n solver.show_solution(solver.solve())\n","sub_path":"math/8puzzle.py","file_name":"8puzzle.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"489080675","text":"from django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom page.models import Page\n\n\ndef create_user():\n u = User.objects.first()\n if not u:\n u = User.objects.create_user('F', 'hi2@blocl.com', 'test*pw')\n u.save()\n return u\n\n\nclass PageTestCase(TestCase):\n def setUp(self):\n \"\"\"Add a page\"\"\"\n create_user()\n\n p = Page(body='Lorem ipsum...', title='My title')\n p.save()\n\n def test_model(self):\n \"\"\"Test a page exists\"\"\"\n p = Page.objects.first()\n self.assertEqual(p.pk, 1)\n self.assertEqual(p.slug, 'my-title')\n self.assertEqual(p.title, 'My title')\n","sub_path":"page/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"491109249","text":"# coding: utf-8\nfrom __future__ import absolute_import\n\n\n#/\ndef tuple_offset(tup, offset):\n #/\n lst = list(tup)\n\n for idx, val in enumerate(offset):\n lst[idx] += val\n\n res = tuple(lst)\n\n return res\n\n#/\ndef tuple_incr_last(tup):\n lst = list(tup)\n\n lst[-1] += 1\n\n return tuple(lst)\n","sub_path":"src/aoikdyndocdsl/dep/aoiktupleutil.py","file_name":"aoiktupleutil.py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"555174373","text":"from tkinter import *\nimport csv\n\nroot = Tk()\nroot.wm_title(\"RP\")\n\nlistFrame = Frame(root, bd=5)\nlistFrame.grid(row=1, column=2)\n\nlistbox1 = Listbox(listFrame)\nlistbox1.insert(1, \"Option1\")\nlistbox1.insert(2, \"Option2\")\nlistbox1.insert(3, \"Option3\")\nlistbox1.pack()\n\ninfoFrame = Frame(root, bd=5)\ninfoFrame.grid(row=1, column=3)\n\ninfo_message = Message(infoFrame, width=300)\ninfo_message.pack()\n\n# Read stats from file\nstat_file = open('DiceTest.csv', 'rU')\nall_stats = list(csv.reader(stat_file, delimiter=';'))\n\n\ndef list_selection(event):\n # gets selection and info/stats for info_message\n index = int(listbox1.curselection()[0])\n stats = all_stats[index]\n\n infotext = str(stats[0]) # just the name\n for n in range(int((len(stats)-2)/2)): # rest of the stats\n infotext += ('\\n' + str(stats[n*2 + 2]) + '\\t' + str(stats[n*2 + 3]))\n\n info_message.config(text=infotext)\n\nlistbox1.bind('', list_selection)\n\ndef load():\n top = Toplevel()\n top.geometry('300x100')\n\n index = int(listbox1.curselection()[0])\n stats = all_stats[index]\n\n # some way to create arbitrary buttons/labels here (?)\n\nload_button = Button(root, text='Load', command=load)\nload_button.grid(row=2, column=2)\n\nroot.mainloop()","sub_path":"Training/testinglabelrefresh.py","file_name":"testinglabelrefresh.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"344406889","text":"import os, time, sys\nfrom glob import glob\nimport pathlib\nimport argparse\nimport shutil\nimport random\nimport numpy as np\n\n# import monai\n# from monai.data import PersistentDataset\n# from monai import transforms as mt\n# from monai.utils import set_determinism\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms, datasets, models\n\nfrom PersistentDataset import PersistentDataset\n\nfrom model.unet import ResNetUNet\n# from model.TernausNet import UNet11, LinkNet34, UNet, UNet16, AlbuNet\n# from model.UNet_3Plus import UNet_3Plus\n# from model.network import R2U_Net, AttU_Net, R2AttU_Net\n\nfrom collections import defaultdict\nfrom loss import dice_loss\n\nimport matplotlib \nmatplotlib.use('Agg') \nimport matplotlib.pyplot as plt\n\n\n######################################################\n\n# moddel_list = {'UNet11': UNet11,\n# 'UNet16': UNet16,\n# 'UNet': UNet,\n# 'AlbuNet': AlbuNet,\n# 'LinkNet34': LinkNet34}\n\npjoin = os.path.join\n\ndef reverse_transform(inp):\n inp = inp.numpy().transpose((1, 2, 0))\n inp = np.clip(inp, 0, 1)\n inp = (inp * 255).astype(np.uint8)\n return inp\n\ndef calc_loss(pred, target, metrics, bce_weight=0.5):\n bce = F.binary_cross_entropy_with_logits(pred, target)\n pred = torch.sigmoid(pred)\n iou, dice = dice_loss(pred, target)\n loss = bce * bce_weight + dice * (1 - bce_weight)\n\n metrics['iou'] += iou.data.cpu().numpy() * target.size(0)\n metrics['bce'] += bce.data.cpu().numpy() * target.size(0)\n metrics['dice'] += dice.data.cpu().numpy() * target.size(0)\n metrics['loss'] += loss.data.cpu().numpy() * target.size(0)\n\n return loss, iou\n\n\ndef print_metrics(metrics, epoch_samples, phase):\n outputs = []\n for k in metrics.keys():\n outputs.append(\"{}: {:4f}\".format(k, metrics[k] / epoch_samples))\n\n print(\"{}: {}\".format(phase, \", \".join(outputs)))\n\n\ndef get_transforms(args):\n\n val_trans = transforms.Compose([\n transforms.ToTensor(),\n ])\n \n return val_trans\n\n######################################################\n\ndef validation(args, model, device, val_loader):\n metrics = defaultdict(float)\n epoch_samples = 0\n\n model.eval()\n \n with torch.no_grad():\n metric_count = 0\n metric_sum = 0.0\n\n for img_idx, val_data in enumerate(val_loader):\n \n s_val_load1 = time.time()\n val_images = val_data['img'].to(device, dtype=torch.float32)\n s_val_load2 = time.time()\n\n val_labels = val_data['seg'].to(device, dtype=torch.float32)\n print(\"================= Load img %i: %f ms\" % (img_idx+1, (s_val_load2-s_val_load1)*1000))\n\n s1_val = time.time()\n val_outputs = model(val_images)\n s2_val = time.time()\n print(\"================= process img %i: %f ms\" % (img_idx+1, (s2_val-s1_val)*1000))\n print(\">>>>>>> Total %i: %f ms <<<<<<\" % (img_idx+1, (s2_val-s_val_load1)*1000))\n\n loss, iou = calc_loss(val_outputs, val_labels, metrics)\n epoch_samples += val_images.size(0)\n\n # ---------------------------------\n ## Plot\n val_outputs = torch.sigmoid(val_outputs)\n for i in range(val_labels.shape[0]):\n for j in range(1, val_labels.shape[1]):\n val_labels[i,j,:,:] += val_labels[i,0,:,:]*0.5\n val_outputs[i,j,:,:] += val_outputs[i,0,:,:]*0.5\n\n fldr = \"plot_output\"\n try:\n os.makedirs(fldr, exist_ok=True)\n except TypeError:\n raise Exception(\"Direction not create!\")\n f, axarr = plt.subplots(3)\n axarr[0].imshow(reverse_transform(val_images.squeeze_().cpu()))\n axarr[1].imshow(reverse_transform(val_labels.squeeze_()[1:4, :, :].cpu()))\n axarr[2].imshow(reverse_transform(val_outputs.squeeze_()[1:4, :, :].cpu()))\n plt.savefig(fldr+\"/val_\"+str(img_idx)+'.png')\n # ---------------------------------\n\n print_metrics(metrics, epoch_samples, 'val')\n epoch_loss = metrics['loss'] / epoch_samples\n epoch_iou = metrics['iou'] / epoch_samples\n\n return epoch_loss, epoch_iou\n\n######################################################\n\ndef main(args):\n\n ###################################\n # Path\n ###################################\n\n # config = wandb.config\n \n data_folder = './data'\n tmp_path = './tmp'\n\n shutil.rmtree(tmp_path, ignore_errors=True)\n persistent_cache = pathlib.Path(tmp_path, \"persistent_cache\")\n persistent_cache.mkdir(parents=True, exist_ok=True)\n # set_determinism(seed=0)\n\n ###################################\n # Dataset\n ###################################\n\n images = sorted(glob(pjoin(data_folder, 'images', '*.npz')))\n segs = sorted(glob(pjoin(data_folder, 'masks', '*.npz')))\n\n data_dicts = [\n {\"img\": image_name, \"seg\": label_name}\n for image_name, label_name in zip(images, segs)\n ]\n\n print(data_dicts)\n\n # random.shuffle(data_dicts)\n\n # val_idx = int(0.2*len(images))\n val_idx = int(len(images))\n val_files = data_dicts[-val_idx:]\n\n val_trans = get_transforms(args)\n\n # # train_ds = PersistentDataset(data=train_files, transform=train_trans, cache_dir=persistent_cache)\n val_ds = PersistentDataset(data=val_files, transform=None, cache_dir=persistent_cache)\n\n # train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available())\n val_loader = DataLoader(val_ds, batch_size=args.test_batch_size, num_workers=4)#, pin_memory=torch.cuda.is_available())\n\n\n\n ###################################\n # Test\n ###################################\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model = ResNetUNet(n_class=args.num_classes)\n\n # if args.model == 'ResNetUNet':\n # model = ResNetUNet(n_class=args.num_classes)\n # elif args.model == 'UNet3+':\n # model = UNet_3Plus(n_classes=args.num_classes)\n # elif args.model =='R2U_Net':\n # model = R2U_Net(img_ch=3, output_ch=args.num_classes, t=3)\n # elif args.model =='AttU_Net':\n # model = AttU_Net(img_ch=3, output_ch=args.num_classes)\n # elif args.model == 'R2AttU_Net':\n # model = R2AttU_Net(img_ch=3, output_ch=args.num_classes, t=3)\n # elif args.model == 'UNet':\n # model = UNet(num_classes=args.num_classes)\n # else:\n # model_name = moddel_list[args.model]\n # model = model_name(num_classes=args.num_classes, pretrained=False)\n\n model = model.to(device) \n\n # # ----------------------------\n # from torchsummary import summary\n # from prettytable import PrettyTable\n\n # summary(model, input_size=(3, 480, 640))\n\n # def count_parameters(model):\n # table = PrettyTable([\"Modules\", \"Parameters\"])\n # total_params = 0\n # for name, parameter in model.named_parameters():\n # param = parameter.numel()\n # table.add_row([name, param])\n # total_params+=param\n # print(table)\n # print(f\"Total Trainable Params: {total_params}\")\n # return total_params\n \n # count_parameters(model)\n # # --------------------------------\n\n\n model_path = os.path.join(args.save_dir, args.load_model_name)\n \n model.load_state_dict(torch.load(model_path)) # admm train need basline model\n print(\"Load Model!!\", args.load_model_name) \n model.cuda()\n\n epoch_loss, epoch_iou = validation(args, model, device, val_loader)\n\n # # --------------------------------\n # print(\"+++++++++++++++++++++++++++++\")\n # from ptflops import get_model_complexity_info\n # ### modified flops_counter.py for zero weight [conv_flops_counter_hook() & linear_flops_counter_hook()]\n # with torch.cuda.device(0):\n # macs, params = get_model_complexity_info(model, (3, 480, 640), as_strings=True,\n # print_per_layer_stat=False, verbose=False)\n # print('{:<30} {:<8}'.format('Computational complexity: ', macs))\n # print('{:<30} {:<8}'.format('Number of parameters: ', params))\n # print(\"+++++++++++++++++++++++++++++\")\n # # --------------------------------\n\n# ---------------------------------------------------------\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n\n # parser.add_argument(\"--dims\", default=(224, 224), type=list)\n parser.add_argument(\"--model\", default='ResNetUNet', type=str,\n help='[ResNetUNet, UNet, UNet11, UNet16, AlbuNet, LinkNet34]')\n parser.add_argument(\"--num_classes\", default=4, type=int)\n parser.add_argument(\"--val_inter\", default=1, type=int)\n parser.add_argument(\"--test_batch_size\", default=1, type=int)\n parser.add_argument('--save_dir', type=str, default=\"./ckpt\", metavar='N', help='Directory to save checkpoints')\n parser.add_argument('--load_model_name', type=str, default=\"ResNetUNet_47.pt\", metavar='N', help='Model name')\n\n args = parser.parse_args()\n # print(args)\n \n ###################################\n\n args, unknown = parser.parse_known_args()\n # wandb.init(config=args)\n # wandb.config.update(args)\n \n main(args)\n","sub_path":"test_multi.py","file_name":"test_multi.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"598994210","text":"#!/usr/bin/env python\nimport click\nimport numpy as np\nimport rasterio\nimport csv\n\n\n@click.command()\n@click.argument('raster1', metavar='', nargs=1, type=click.Path(exists=True, resolve_path=True))\n@click.argument('raster2', metavar='', nargs=1, type=click.Path(exists=True, resolve_path=True))\n@click.argument('csvfile', metavar='', nargs=1, type=click.Path(exists=True, resolve_path=True))\n@click.argument('output', metavar='', nargs=1, type=click.Path(resolve_path=True))\n@click.option('--header', metavar='', type=bool, default=True, show_default=True, help='Does CSV have header?')\n@click.option('--delim', metavar='', type=str, default=',', show_default=True, help='CSV Text delimiter')\n@click.option('--quote', metavar='', type=str, default='\"', show_default=True, help='CSV quote character')\n@click.option('--format', metavar='', type=str, default='GTiff', show_default=True, help='Output file format')\ndef create_strata(raster1, raster2, csvfile, header, output, delim, quote, format):\n \"\"\" Script to create a stratification raster from two maps\n using a lookup table\"\"\"\n\n ds1 = rasterio.open(raster1)\n ds2 = rasterio.open(raster2)\n\n with open(csvfile) as f:\n csvreader = csv.reader(f, delimiter=str(delim), quotechar=str(quote))\n if header:\n next(csvreader)\n lut_file = {(int(row[0]), int(row[1])): int(row[2]) for row in csvreader}\n\n # Calculate output dtype based on LUT values\n out_dt = 'byte'\n if min(lut_file.values()) < 0:\n # Must be signed int\n if max(np.abs(lut_file.values())) < 2 ** 15:\n out_dt = 'int8'\n elif max(np.abs(lut_file.values())) < 2 ** 31:\n out_dt = 'int32'\n elif max(np.abs(lut_file.values())) < 2 ** 63:\n out_dt = 'int64'\n else:\n click.echo('Required output data type is unknown')\n click.Abort()\n\n else:\n # Can be unsigned\n if max(lut_file.values()) < 2 ** 8:\n out_dt = 'uint8'\n elif max(lut_file.values()) < 2 ** 16:\n out_dt = 'uint16'\n elif max(lut_file.values()) < 2 ** 32:\n out_dt = 'uint32'\n elif max(lut_file.values()) < 2 ** 64:\n out_dt = 'uint64'\n else:\n click.echo('Required output data type is unknown')\n click.Abort()\n\n # Init output\n y1 = ds1.read(1)\n y2 = ds2.read(1)\n profile = ds1.profile\n profile.update(driver=format, dtype=out_dt)\n map = np.zeros_like(y1, dtype=out_dt)\n\n # Match codes and create map.\n for k, v in lut_file.items():\n m1 = np.in1d(y1, k[0]).reshape(y1.shape)\n m2 = np.in1d(y2, k[1]).reshape(y2.shape)\n m_1_2 = np.logical_and(m1, m2)\n\n # Add to map\n map[m_1_2] = v\n\n # Save using the profile taken from the input file directly. If needed, options\n # could be specified manually or by updating the profile itself. Map.astype\n # required to match the types and avoid writing larger files than needed\n\n with rasterio.open(output, mode='w', **profile) as dst:\n dst.write(map.astype(out_dt), 1)\n\n\nif __name__ == '__main__':\n create_strata()\n","sub_path":"multi_scene/6_stratification/helper_scripts/create_strata.py","file_name":"create_strata.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"602149069","text":"'''\n@author: Andrew Abi-Mansour (andrew.gaam [at] gmail [dot] com)\n@date: March 23, 2019\n'''\n\n\n__version__ = \"2018.9.2.10\"\n\nimport subprocess\nimport setuptools\nimport os, sys, shutil\nimport glob, re\nfrom distutils.command.build import build\nfrom setuptools.command.build_py import build_py\nfrom distutils.command.clean import clean\nfrom collections import defaultdict\n\nclass RDKitBuild(build):\n\n def find(self, fname, path):\n\n for root, dirs, files in os.walk(path):\n if fname in files:\n return os.path.join(root, fname)\n\n return None\n\n def run(self):\n\n python_version = str(sys.version_info[0]) + str(sys.version_info[1])\n\n python_lib = self.find('libpython{}.so'.format(python_version).format(python_version), '/') \n python_exec = sys.executable\n\n if not python_lib:\n print('Could not find any installed python-dev (libpython.so) library.')\n print('Proceeding ...')\n cm_args = ['-DPYTHON_EXECUTABLE=' + python_exec]\n else:\n cm_args = ['-DPYTHON_LIBRARY=' + python_lib, '-DPYTHON_EXECUTABLE=' + python_exec]\n\n os.chdir('src/build')\n try:\n import ninja\n except:\n self.spawn(cmd=['cmake', '..'] + cm_args)\n self.spawn(cmd=['make', '-j2', 'install'])\n else:\n self.spawn(cmd=['cmake', '-GNinja', '..'] + cm_args)\n self.spawn(cmd=['ninja'])\n\n os.chdir('../..')\n super().run()\n\nclass RDKitBuild_py(build_py):\n\n def find(self, fname, path):\n\n for root, dirs, files in os.walk(path):\n if fname in files:\n return os.path.join(root, fname)\n\n return None\n\n def run(self):\n\n python_version = str(sys.version_info[0]) + str(sys.version_info[1])\n\n python_lib = self.find('libpython{}.so'.format(python_version).format(python_version), '/') \n python_exec = sys.executable\n \n if not python_lib:\n print('Could not find any installed python-dev (libpython.so) library.')\n print('Proceeding ...')\n cm_args = ['-DPYTHON_EXECUTABLE=' + python_exec]\n else:\n cm_args = ['-DPYTHON_LIBRARY=' + python_lib, '-DPYTHON_EXECUTABLE=' + python_exec]\n\n os.chdir('src/build')\n try:\n import ninja\n except:\n self.spawn(cmd=['cmake', '..'] + cm_args)\n self.spawn(cmd=['make', 'install'])\n else:\n self.spawn(cmd=['cmake', '-GNinja', '..'] + cm_args)\n self.spawn(cmd=['ninja'])\n\n os.chdir('../..')\n super().run()\n\nclass RDKClean(clean):\n\n def delObjFiles(self, fdir):\n\n dfile_ext = ['tar.gz', '.so.1', '.so', '.log', '.a']\n\n for path in glob.glob(fdir):\n if os.path.isdir(path):\n self.delObjFiles(path + '/*')\n elif os.path.isfile(path):\n for ext in dfile_ext:\n if path.endswith(ext):\n os.remove(path)\n print('Deleting ' + os.path.abspath(path))\n\n def delDownloadedFiles(self, fdir):\n\n dlist = ['build', 'External/rapidjson-1.1.0', 'External/catch/catch/',\n 'External/YAeHMOP/tmp/', 'External/YAeHMOP/src/',\n 'External/CoordGen/maeparser/', 'External/CoordGen/coordgen/',\n 'External/YAeHMOP/yaehmop/', 'lib',\n 'rdkit/Chem/inchi.py', 'Code/GraphMol/SLNParse/lex.yysln.cpp',\n 'Code/GraphMol/SLNParse/sln.tab.cpp', 'Code/GraphMol/SLNParse/sln.tab.hpp',\n 'Code/GraphMol/SmilesParse/lex.yysmarts.cpp', 'Code/GraphMol/SmilesParse/lex.yysmiles.cpp',\n 'Code/GraphMol/SmilesParse/smarts.tab.cpp', 'Code/GraphMol/SmilesParse/smarts.tab.hpp',\n 'Code/GraphMol/SmilesParse/smiles.tab.cpp', 'Code/GraphMol/SmilesParse/smiles.tab.hpp',\n 'Code/RDGeneral/RDConfig.h', 'Code/RDGeneral/export.h', 'Code/RDGeneral/test.h', \n 'Code/RDGeneral/versions.cpp', 'Code/RDGeneral/versions.h', 'rdkit/RDPaths.py',\n 'Data/eht_parms.dat', 'Data/templates.mae']\n\n for dobj in dlist:\n path = os.path.join(fdir, dobj)\n\n if os.path.isdir(path):\n shutil.rmtree(path)\n elif os.path.isfile(path):\n os.remove(path)\n else:\n continue\n print('Deleting ' + os.path.abspath(path))\n\n def run(self):\n self.delObjFiles('src/*')\n self.delDownloadedFiles('src')\n super().run()\n\ndef pre_build():\n\n if not os.path.isdir(\"src/build\"):\n os.mkdir('src/build')\n\n if not os.path.isdir(\"src/build/lib\"):\n os.mkdir('src/build/lib')\n\ndef data_include(src_dir, data_dirs):\n \"\"\" returns a list of tuples: [... (dir, [file1, file2, ...]) ...] \"\"\"\n\n output = defaultdict(list)\n\n for data_dir in data_dirs:\n # python >= v3.5, the glob module supports the \"**\" for recursive option\n dir_files = glob.iglob(os.path.join(src_dir, data_dir, '**/*'), recursive=True)\n\n for file in dir_files:\n if os.path.isfile(file):\n dir_name = file.strip(os.path.basename(file)).strip(src_dir)\n dir_name = dir_name[1:] if dir_name.startswith('/') else dir_name\n dir_name = dir_name[:-1] if dir_name.endswith('/') else dir_name\n\n if dir_name != 'lib':\n dir_name = os.path.join('rdkit', dir_name)\n\n output[dir_name].append(file)\n\n return [(key, value) for key, value in output.items()]\n\nif __name__ == '__main__':\n\n pre_build() # must be always called to ensure sdist is built correctly\n\n setuptools.setup(\n name = \"rdkit\",\n version = __version__,\n description = (\"A collection of cheminformatics and machine-learning software written in C++ and Python\"),\n license = \"BSD\",\n keywords = \"cheminformatics\",\n url = \"https://github.com/rdkit/rdkit\",\n packages=setuptools.find_packages('src'),\n package_dir={'rdkit': 'src/rdkit'},\n data_files=data_include('src', ['lib', 'Data', 'Projects', 'Docs', 'Scripts']),\n include_package_data=True,\n classifiers=[\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: BSD License\",\n \t \"Programming Language :: Python :: 3\"\n ],\n zip_safe=False,\n cmdclass={'build_py': RDKitBuild_py, 'build': RDKitBuild, 'clean': RDKClean},\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"14052338","text":"\n# coding: utf-8\n\n# In[1]:\n\n# Do the import\nimport numpy as np\nimport csv\nimport cv2\nimport tensorflow as tf\nimport keras\nimport os\nimport sklearn\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom PIL import Image\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda\nfrom keras.layers import Cropping2D\nfrom keras.layers.convolutional import Convolution2D\nfrom sklearn.utils import shuffle\n\n\n# In[5]:\n\n#Read the CSV file\nlines_of_csv = []\nwith open('../data/driving_log.csv') as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n lines_of_csv.append(line)\n\n#Removing the 0th line as it has names of columns\ndel lines_of_csv[0]\n \nprint (lines_of_csv[0]) \nprint (lines_of_csv[28])\nprint (len(lines_of_csv))\n\ntrain_data, validation_data = train_test_split(lines_of_csv, test_size=0.2)\nprint (\"CSV file read and the data is split\")\nprint (len(train_data))\nprint (len(validation_data))\n\n\n# In[3]:\n\n# Do the data generation\n\ndef generate_data (samples, training, batch_size = 32):\n num_samples = len(samples)\n \n while 1: # Loop forever so the generator never terminates\n shuffle(samples)\n for offset in range(0, num_samples, batch_size):\n batch_samples = samples[offset:offset+batch_size]\n \n #create the empty list of images and angles\n images = []\n angles = []\n\n for batch_sample in batch_samples:\n name = '../data/IMG/'+batch_sample[0].split('/')[-1]\n center_image = cv2.imread(name)\n center_angle = float(batch_sample[3])\n #center_image = image_process(center_image) \n images.append(center_image)\n angles.append(center_angle)\n #Flip the data for training only\n if training == True:\n center_image_flipped = cv2.flip(center_image, 1)\n #center_image_flipped = np.fliplr(center_image)\n center_angle_flipped = center_angle * -1.0\n images.append(center_image_flipped)\n angles.append(center_angle_flipped)\n \n X_train = np.array(images)\n y_train = np.array(angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n\n# In[4]:\n\n#Do the image processing\n\ndef image_process (image):\n #Normalize the image\n \n #Crop the image\n \n #display the images\n return image\n \n\n\n# In[5]:\n\n#Build the model\n\ntraining = True\ntrain_generator = generate_data(train_data, training, batch_size=32)\ntraining = False\nvalidation_generator = generate_data(validation_data, training, batch_size=32)\n\n\nmodel = Sequential()\n\n# Do the image normalization\nmodel.add(Lambda(lambda x: x/127.5 - 1., input_shape=(160, 320, 3)))\n# Crop the bonet and thse sky/trees\nmodel.add(Cropping2D(cropping=((70,25), (0,0))))\n#Implement the Nvidia E2E Learning Network\nmodel.add(Convolution2D(24,5,5, subsample=(2,2), activation=\"relu\"))\nmodel.add(Convolution2D(36,5,5, subsample=(2,2), activation=\"relu\"))\nmodel.add(Convolution2D(48,5,5, subsample=(2,2), activation=\"relu\"))\nmodel.add(Convolution2D(64,3,3, activation=\"relu\"))\nmodel.add(Convolution2D(64,3,3, activation=\"relu\"))\nmodel.add(Flatten())\nmodel.add(Dense(100))\nmodel.add(Dense(50))\nmodel.add(Dense(10))\n\n#9. Output\nmodel.add(Dense(1))\n\nmodel.compile(loss='mse', optimizer = 'adam')\n#model.fit (X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch = 7)\n\nmodel.fit_generator(train_generator, samples_per_epoch= len(train_data)*2, validation_data=validation_generator, nb_val_samples=len(validation_data), nb_epoch=5)\n\nmodel.save('model.h5')\n\n\n# In[ ]:\n\n#Show the histogram of the steering angle\ngroups_of_sa = 100\nsa_per_group = int (len(measurements)/groups_of_sa)\n#sa_per_group += 1\nprint (sa_per_group)\n\nhist, bins = np.histogram (measurements, sa_per_group)\nbins = bins[:-1]\n\nprint (hist)\nprint (bins)\n\nplt.bar ( bins, hist, align = 'center', width = 0.1)\nplt.ylabel('Steering angle per group')\nplt.xlabel('Total groups ')\nplt.show()\n\n","sub_path":"CarND-Behavioral-Cloning-P3/Attempts/Model_5.py","file_name":"Model_5.py","file_ext":"py","file_size_in_byte":4140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"139691516","text":"from nltk import sent_tokenize, word_tokenize\r\nfrom nltk.tokenize import PunktSentenceTokenizer, RegexpTokenizer, TreebankWordTokenizer, WordPunctTokenizer, \\\r\n WhitespaceTokenizer\r\nfrom features.process_text.patterns import get_sentence_token_pattern, get_word_token_pattern\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\n\r\n_sentence_tokenizer_default = sent_tokenize\r\n\r\n_sentence_tokenizer_punkt = PunktSentenceTokenizer.tokenize\r\n\r\n_sentence_tokenizer_regex = RegexpTokenizer(pattern=get_sentence_token_pattern(), gaps=True).tokenize\r\n\r\n_SENTENCE_TOKENIZER_DICT = {\r\n 'default': _sentence_tokenizer_default,\r\n 'punkt': _sentence_tokenizer_punkt,\r\n 'regex': _sentence_tokenizer_regex\r\n}\r\n\r\n\r\ndef sentence_tokenize(text, sentence_tokenizer_id='default'):\r\n \"\"\"\r\n Tokenizes a sentence, based on a given tokenizer.\r\n Args:\r\n text: A string, describing a sentence.\r\n sentence_tokenizer_id: A tokenizer key, taken from SENTENCE_TOKENIZER_DICT.\r\n Returns:\r\n A tokenized sentence.\r\n \"\"\"\r\n # If text is empty, return None.\r\n if not text: return None\r\n sentence_tokenizer = _SENTENCE_TOKENIZER_DICT.get(sentence_tokenizer_id)\r\n return sentence_tokenizer(text)\r\n\r\n\r\n_word_tokenizer_default = word_tokenize\r\n\r\n_word_tokenizer_treebank = TreebankWordTokenizer().tokenize\r\n\r\n_word_tokenizer_regex = RegexpTokenizer(pattern=get_word_token_pattern(), gaps=False).tokenize\r\n\r\n_word_tokenizer_punkt = WordPunctTokenizer().tokenize\r\n\r\n_word_tokenizer_whitespace = WhitespaceTokenizer().tokenize\r\n\r\n_WORD_TOKENIZER_DICT = {\r\n 'default': _word_tokenizer_default,\r\n 'treebank': _word_tokenizer_treebank,\r\n 'regex': _word_tokenizer_regex,\r\n 'punkt': _word_tokenizer_punkt,\r\n 'whitespace': _word_tokenizer_whitespace\r\n}\r\n\r\n\r\ndef word_tokenize(text, word_tokenizer_id='default'):\r\n \"\"\"\r\n Word-tokenizes a given sentence, based on a defined tokenizer.\r\n Args:\r\n sentence: A string, corresponding to a sentence.\r\n word_tokenizer_id: A key from WORD_TOKENIZER_DICT\r\n Returns:\r\n A list of words, corresponding to the tokenized sentence.\r\n \"\"\"\r\n # If text is empty, return None.\r\n if not text: return None\r\n word_tokenizer = _WORD_TOKENIZER_DICT.get(word_tokenizer_id)\r\n tokens = None\r\n try:\r\n tokens = word_tokenizer(text)\r\n except TypeError:\r\n print(\"ERROR:\")\r\n print(text)\r\n return None\r\n tokens = [token.strip() for token in tokens]\r\n return tokens\r\n\r\ndef is_tokenized(text):\r\n return type(text) == list\r\n\r\ndef merge_tokens(tokens):\r\n return ' '.join(tokens)\r\n\r\n\"\"\"adicionar categoria ao dicionário de cada lista\"\"\"\r\ndef add_category_IV(list):\r\n list2 = []\r\n for dict in list:\r\n dict['Category'] = 'Instant_Video'\r\n list2.append(dict)\r\n return list2\r\n\r\ndef add_category_B(list):\r\n list2 = []\r\n for dict in list:\r\n dict['Category'] = 'Baby'\r\n list2.append(dict)\r\n return list2\r\n\r\ndef add_category_DM(list):\r\n list2 = []\r\n for dict in list:\r\n dict['Category'] = 'Digital_Music'\r\n list2.append(dict)\r\n return list2\r\n\r\ndef add_category_MI(list):\r\n list2 = []\r\n for dict in list:\r\n dict['Category'] = 'Musical_Instruments'\r\n list2.append(dict)\r\n return list2\r\n\r\ndef word_tokenize_scikit(dataset):\r\n \"\"\"\r\n Previous class tokenization was done using ntlk. In this class we learn how to do it with scikit-learn\r\n Args:\r\n dataset: a collection of documents stored in a vector\r\n Returns:\r\n A list of words, corresponding to the indexed vocabulary of the dataset\r\n \"\"\"\r\n vectorizer = CountVectorizer()\r\n x = vectorizer.fit_transform(dataset)\r\n\r\n return x, vectorizer\r\n\r\n\r\n","sub_path":"features/process_text/tokenize.py","file_name":"tokenize.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"338787880","text":"# -*- coding: utf-8 -*-\n\n\nfrom __future__ import unicode_literals\n\nimport re\nimport os\n\nfrom datetime import datetime\n\nfrom first import first\n\nfrom annoying.fields import AutoOneToOneField\n\nfrom django.core import validators\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.hashers import (\n check_password,\n make_password,\n is_password_usable)\n\nfrom django.contrib.auth.models import (\n AbstractUser,\n BaseUserManager,\n PermissionsMixin)\n\nfrom django.dispatch import receiver\nfrom django.db.models.signals import (\n pre_save,\n post_save)\n\nfrom django.forms.models import model_to_dict\nfrom django.utils import timezone\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import pgettext_lazy\nfrom django.utils.translation import ugettext_lazy as _\nfrom unidecode import unidecode\nfrom django.core.urlresolvers import reverse\n\nfrom django_countries.fields import CountryField\nfrom phonenumber_field.modelfields import PhoneNumberField\n\nfrom django.db import models\n\nfrom timezone_field import TimeZoneField\nfrom django_countries.fields import CountryField\n\nfrom django_images.models import Image\n\nfrom django_messages.models import Message\n\nfrom django.db.models import Q\n\nfrom availableworks.settings import AUTH_USER_MODEL\n\nfrom availableworks.core.models.mixins import AWModel\nfrom availableworks.core.models.subscription import (\n Subscription,\n SubscriptionCharge)\n\nfrom availableworks.core.external.payment import (\n create_stripe_managed_account,\n retrieve_stripe_managed_account,\n create_stripe_bank_account,\n retrieve_stripe_bank_account,\n create_stripe_customer,\n retrieve_stripe_customer)\n\ndef _createHash():\n return os.urandom(40).encode('hex')\n\n\ndef _content_file_name(instance, filename):\n name = _createHash() + os.path.splitext(filename)[1]\n return '/'.join(['avatars', name])\n\nclass AWUser(AWModel, AbstractUser):\n id = models.AutoField(primary_key=True)\n\n USERNAME_FIELD = 'id'\n REQUIRED_FIELDS = ['email']\n\nAWUser._meta.get_field('username').blank = True\nAWUser._meta.get_field('username').null = True\nAWUser._meta.get_field('first_name').blank = True\nAWUser._meta.get_field('first_name').null = True\nAWUser._meta.get_field('last_name').blank = True\nAWUser._meta.get_field('last_name').null = True\nAWUser._meta.get_field('email').null = False\nAWUser._meta.get_field('email').blank = False\nAWUser._meta.get_field('email')._unique = True\n\n\nclass UserProfile(AWModel):\n\n user = models.OneToOneField(\n AUTH_USER_MODEL,\n primary_key=True,\n related_name = 'profile')\n\n subscription = models.ForeignKey(\n Subscription,\n related_name = '+')\n\n addresses = models.ManyToManyField(\n 'Address',\n related_name='addresses')\n\n default_shipping_address = models.ForeignKey(\n 'Address',\n related_name='+',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n verbose_name=pgettext_lazy('User field', 'default shipping address'))\n\n default_billing_address = models.ForeignKey(\n 'Address',\n related_name='+',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n verbose_name=pgettext_lazy('User field', 'default billing address'))\n\n rating = models.IntegerField(\n null=True)\n\n occupation = models.CharField(\n max_length=200,\n default='Artist')\n\n website = models.URLField(\n max_length=200,\n blank=True)\n\n description = models.TextField(\n blank=True,\n help_text=pgettext_lazy(\n 'Public Profile Account Description',\n 'Please write as many details as you can about yourself or operation.'))\n\n EMAIL_YES_TYPE = 1\n EMAIL_NO_TYPE = 2\n EMAIL_CHOICES = (\n (EMAIL_YES_TYPE, 'Yes'),\n (EMAIL_NO_TYPE, 'No'))\n\n email_notification = models.IntegerField(\n choices = EMAIL_CHOICES,\n default = EMAIL_YES_TYPE,\n blank = False)\n\n ARTIST_TYPE = 1\n ORGANIZATION_TYPE = 2\n ACCOUNT_CHOICES = (\n (ARTIST_TYPE, 'Artist'),\n (ORGANIZATION_TYPE, 'Organization'))\n\n account_type = models.IntegerField(\n choices=ACCOUNT_CHOICES,\n default=ARTIST_TYPE)\n\n stripe_secret_key = models.CharField(\n max_length=200,\n blank=True,\n null=True)\n\n stripe_publishable_key = models.CharField(\n max_length=200,\n blank=True,\n null=True)\n\n stripe_account_id = models.CharField(\n max_length=200,\n blank=True,\n null=True)\n\n stripe_customer_id = models.CharField(\n max_length=200,\n blank=True,\n null=True)\n\n terms_agreed = models.BooleanField(\n default = False)\n\n terms_agreed_at = models.DateTimeField(\n null=True)\n\n def __repr__(self):\n return (\n 'UserProfile('\n 'user_id={user_id}, '\n 'occupation={occupation}, '\n 'website={website}, '\n 'account_type={account_type})'\n ).format(\n user_id = self.user.pk,\n occupation = self.occupation,\n website = self.website,\n account_type = self.account_type)\n\n def __unicode__(self):\n return unicode(str(self))\n\n def __str__(self):\n return repr(self)\n\n @property\n def display_id(self):\n return first([\n self.name,\n self.username,\n self.email])\n\n @property\n def name(self):\n names = [\n self.user.first_name,\n self.user.last_name]\n if any(names):\n return '{0} {1}'.format(\n self.user.first_name,\n self.user.last_name)\n\n @name.setter\n def name(self, new_name):\n if not new_name:\n self.user.first_name = None\n self.user.last_name = None\n else:\n parts = new_name.split()\n self.user.first_name = parts[0]\n self.user.last_name = ' '.join(parts[1:]) or None\n\n @property\n def username(self):\n return self.user.username\n\n @username.setter\n def username(self, new_username):\n self.user.username = new_username\n self.user.save()\n\n @property\n def email(self):\n return self.user.email\n\n @email.setter\n def email(self, new_email):\n self.user.email = new_email\n self.user.save()\n\n @property\n def location(self):\n location = None\n try:\n location = self.addresses[0].city\n except IndexError:\n location = 'Planet Earth'\n\n return location\n\n @property\n def default_credit_card(self):\n if not self.credit_cards:\n return None\n\n return self.credit_cards.order_by('-created_at').first()\n\n @property\n def default_bank_account(self):\n if not self.bank_accounts:\n return None\n\n return self.bank_accounts.order_by('-created_at').first()\n\n @property\n def all_works(self):\n return self.undeleted_works.all()\n\n @property\n def available_works(self):\n return self.undeleted_works.filter(available_stock__gte = 1).all()\n\n @property\n def sold_works(self):\n return self.works.filter(sold_stock__gte = 1).all()\n\n @property\n def undeleted_works(self):\n return self.works.filter(deleted=False)\n\n @property\n def can_sell_works(self):\n return self.subscription.selling_enabled\n\n def get_profile_url(self):\n if self.username:\n return reverse(\n 'uprofile:public-profile-by-username',\n args = (self.username,))\n return reverse(\n 'uprofile:public-profile-by-pk',\n args=(self.pk,))\n\n def get_owner_profile_url(self):\n return reverse(\n 'uprofile:owner-profile-by-pk',\n args=(self.pk,))\n\n def get_settings_url(self):\n return reverse(\n 'account:settings')\n\n def set_password(self, new_password):\n self.user.set_password(new_password)\n\n def get_or_create_stripe_account(self):\n if not self.stripe_account_id:\n stripe_account = create_stripe_managed_account(\n self.user.email)\n self.stripe_account_id = stripe_account['id']\n self.stripe_publishable_key = stripe_account['keys']['publishable']\n self.stripe_secret_key = stripe_account['keys']['secret']\n else:\n stripe_account = retrieve_stripe_managed_account(\n self.stripe_account_id)\n return stripe_account\n\n def get_or_create_stripe_customer_entry(self):\n if not self.stripe_customer_id:\n customer_entry = create_stripe_customer(self.user.email)\n self.stripe_customer_id = customer_entry['id']\n else:\n customer_entry = retrieve_stripe_customer(\n self.stripe_customer_id)\n return customer_entry\n\n def get_all_messages(self):\n return Message.objects.filter(\n (Q(sender = self.user) & ~Q(recipient = self.user)) |\n (~Q(sender = self.user) & Q(recipient = self.user)),\n recipient_deleted_at__isnull=True).order_by('sent_at').all()\n\n def get_all_threads(self):\n all_messages = self.get_all_messages()\n user_id = self.user.pk\n threads = {}\n\n for message in all_messages:\n contact_id = message.sender.pk\n if contact_id == user_id:\n contact_id = message.recipient.pk\n\n contact = AWUser.objects.get(pk=contact_id).profile\n try:\n thread = threads[contact]\n except KeyError:\n thread = threads[contact] = []\n\n thread.append(message)\n\n return threads\n\n def get_messages_for_contact(self, contact):\n contact_user = contact.user\n messages = Message.objects.filter(\n (Q(sender = self.user) & Q(recipient = contact_user)) |\n (Q(sender = contact_user) & Q(recipient = self.user))).order_by(\n 'sent_at').all()\n\n return messages\n\n def has_purchased_work(self, work):\n from availableworks.core.models.order import OrderLineItem\n\n line_item = OrderLineItem.objects.filter(\n work_id = work.pk\n ).filter(\n order__buyer = self)\n\n return bool(line_item)\n\n def owns_work(self, work):\n return work.owner == self\n\n def has_reviewed_work(self, work):\n from availableworks.core.models.order import OrderFeedback\n\n feedback = OrderFeedback.objects.filter(\n reviewer = self\n ).filter(\n work = work\n ).all()\n\n return bool(feedback)\n\n def get_earliest_unreviewed_order_for_work(self, work):\n from availableworks.core.models.order import Order\n\n orders = Order.objects.filter(\n buyer = self,\n feedback = None,\n line_items__work_id = work.pk\n ).order_by('created_at').first()\n\n return orders\n\n def mark_all_messages_read(self):\n unread = Message.objects.filter(\n recipient = self.user,\n read_at__isnull=True,\n recipient_deleted_at__isnull=True).all()\n\n for u in unread:\n u.read_at = datetime.now()\n u.save()\n\n def can_make_purchases(self):\n return all([\n self.default_billing_address,\n self.default_shipping_address,\n self.default_credit_card])\n\nclass ProfileCover(Image):\n profile = models.OneToOneField(\n 'uprofile.UserProfile',\n related_name = 'cover_image',\n primary_key=True,\n on_delete=models.CASCADE)\n\nclass ProfileAvatar(Image):\n profile = models.OneToOneField(\n 'uprofile.UserProfile',\n related_name = 'avatar_image',\n primary_key=True,\n on_delete=models.CASCADE)\n\nclass Address(models.Model):\n userprofile = models.ForeignKey(\n 'UserProfile',\n related_name='+')\n\n alias = models.CharField(\n max_length = 30,\n null=True)\n\n first_name = models.CharField(\n pgettext_lazy('Address field', 'first name'),\n max_length=256)\n\n last_name = models.CharField(\n pgettext_lazy('Address field', 'last name'),\n max_length=256)\n\n company_name = models.CharField(\n pgettext_lazy('Address field', 'company name'),\n max_length=256,\n blank=True)\n\n street_address_1 = models.CharField(\n pgettext_lazy('Address field', 'street address 1'),\n max_length=256)\n\n street_address_2 = models.CharField(\n pgettext_lazy('Address field', 'street address 2'),\n max_length=256,\n blank=True)\n\n city = models.CharField(\n pgettext_lazy('Address field', 'city'),\n max_length=256)\n\n country_area = models.CharField(\n pgettext_lazy('Address field', 'state'),\n max_length=128,\n blank=True)\n\n postal_code = models.CharField(\n pgettext_lazy('Address field', 'postal code'),\n max_length=20)\n\n USA = 'US'\n COUNTRY_CHOICES = (\n (USA, 'United States'),)\n\n country = models.CharField(\n pgettext_lazy('Address field', 'country'),\n choices=COUNTRY_CHOICES,\n default = USA,\n max_length=2)\n\n phone = PhoneNumberField(blank=True)\n\n class Meta:\n unique_together = (\n 'userprofile',\n 'alias')\n\n @models.permalink\n def get_absolute_url(self):\n return ('profile:address-edit',\n (),\n {'slug': self.get_slug(), 'pk': self.id})\n\n def get_slug(self):\n value = unidecode(self.alias)\n value = re.sub(r'[^\\w\\s-]', '', value).strip().lower()\n return mark_safe(re.sub(r'[-\\s]+', '-', value))\n\n def __repr__(self):\n\n return (\n 'Address('\n 'first_name={first_name}'\n 'last_name={last_name}'\n 'company_name={company_name}'\n 'street_address_1={street_address_1}'\n 'street_address_2={street_address_2}'\n 'city={city}'\n 'postal_code={postal_code}'\n 'country={country}'\n 'country_area={country_area}'\n 'phone={phone}'\n ).format(\n first_name = repr(self.first_name),\n last_name = repr(self.last_name),\n company_name = repr(self.company_name),\n street_address_1 = repr(self.street_address_1),\n street_address_2 = repr(self.street_address_2),\n city = repr(self.city),\n postal_code = repr(self.postal_code),\n country = repr(self.country),\n country_area = repr(self.country_area),\n phone = repr(self.phone))\n\n\ndef _create_profile_for_user(sender, instance, **kwargs):\n if not hasattr(instance, 'profile'):\n new_profile = UserProfile(user=instance)\n instance.profile = new_profile\n instance.save()\n new_profile.save()\n\ndef _set_default_subscription(sender, instance, **kwargs):\n try:\n instance.subscription\n except ObjectDoesNotExist:\n instance.subscription = Subscription.get_default()\n instance.save()\n\npost_save.connect(\n _create_profile_for_user,\n sender=AWUser,\n dispatch_uid='create_user_profile')\n\npre_save.connect(\n _set_default_subscription,\n sender=UserProfile,\n dispatch_uid='set_default_subscription')\n","sub_path":"availableworks/uprofile/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"574910329","text":"import logging\nimport random\nfrom datetime import date, datetime\n\nfrom flask import json\nfrom flask_script import Manager\n\nfrom airline.utair import app, mongo\n\n\nfrom airline.models import Client, Transaction # isort:skip\nfrom airline.libs import helpers as h # isort:skip\n\n\nlogger = logging.getLogger(__name__)\nmanager = Manager(app)\n\n\n@manager.command\ndef run():\n app.run()\n\n\n@manager.command\ndef clear_db():\n list(map(lambda name: not name.startswith('system.') and mongo.db.drop_collection(name),\n mongo.db.collection_names()))\n\n\n@manager.command\ndef prepare_db():\n clear_db()\n\n Client(name='Иванов И.И.', email='ivan@example.com', card_number='111-111').save()\n Client(name='Петров П.П.', email='petya@example.com', card_number='222-222').save()\n\n Transaction(card_number='111-111', bonus=12, flight_from='Барселона', flight_to='Рим',\n flight_date=datetime(2012, 12, 12)).save()\n Transaction(card_number='111-111', bonus=12, flight_from='Москва', flight_to='Харьков',\n flight_date=datetime(2014, 10, 21)).save()\n Transaction(card_number='111-111', bonus=12, flight_from='Салихард', flight_to='Орел',\n flight_date=datetime(2015, 9, 14)).save()\n Transaction(card_number='444-444', bonus=12, flight_from='Берлин', flight_to='Киев',\n flight_date=datetime(2015, 12, 10)).save()\n\n\n@manager.command\ndef send_code(email):\n client = Client.collection.get(email=email)\n if not client:\n logger.error('Клиент не найден')\n return\n\n code = h.generate_secret()()\n h.send_email(\"Ваш код для аутентификации: %s\" % code, email)\n\n client.auth_code = code\n client.access_token = None\n client.save()\n\n\n@manager.command\ndef generate_transactions(count=1000, filename='transactions.json'):\n card_numbers = ['111-111', '222-222', '333-333', '444-444', '555-555']\n bonuses = list(range(15))\n cities = ['Москва', 'Нижний Новгород', 'Сочи', 'Краснодар', 'Пенза', 'Самара', 'Саратов',\n 'Воронеж', 'Тамбов', 'Липецк', 'Рязань', 'Белогород', 'Екатеренбург', 'Грозный']\n with open(filename, 'w') as outfile:\n json.dump([Transaction(card_number=random.choice(card_numbers),\n bonus=random.choice(bonuses),\n flight_from=random.choice(cities),\n flight_to=random.choice(cities),\n flight_date=date(random.randint(2015, 2018),\n random.randint(1, 12),\n random.randint(1, 28)))\n for i in range(int(count))],\n outfile)\n\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"airline/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":2951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"91525389","text":"import warnings\nimport logging\nfrom typing import Optional, Dict\n\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\nlogging.getLogger(\"numpy\").setLevel(logging.ERROR)\nwarnings.filterwarnings('ignore')\nimport os\nimport sys\nfrom pathlib import Path\n\nsys.path.append(Path(os.getcwd()).parent.as_posix())\nimport gym\nfrom ROAR_Sim.configurations.configuration import Configuration as CarlaConfig\nfrom ROAR.configurations.configuration import Configuration as AgentConfig\nfrom ROAR.agent_module.rl_occu_map_e2e_training_agent import RLOccuMapE2ETrainingAgent\nfrom stable_baselines.ddpg.policies import CnnPolicy\n# from stable_baselines.common.policies import CnnPolicy\nfrom stable_baselines import DDPG\nfrom datetime import datetime\nfrom stable_baselines.common.callbacks import CheckpointCallback, EveryNTimesteps, CallbackList\nfrom utilities import find_latest_model\nfrom ROAR_Gym.envs.roar_env import LoggingCallback\n\n\ndef main(output_folder_path: Path):\n # Set gym-carla environment\n agent_config = AgentConfig.parse_file(Path(\"configurations/agent_configuration.json\"))\n carla_config = CarlaConfig.parse_file(Path(\"configurations/carla_configuration.json\"))\n\n params = {\n \"agent_config\": agent_config,\n \"carla_config\": carla_config,\n \"ego_agent_class\": RLOccuMapE2ETrainingAgent,\n \"max_collision\": 5,\n }\n\n env = gym.make('roar-occu-map-e2e-v0', params=params)\n env.reset()\n model_params: dict = {\n \"verbose\": 1,\n \"env\": env,\n \"render\": True,\n \"tensorboard_log\": (output_folder_path / \"tensorboard\").as_posix(),\n \"buffer_size\": 10000,\n \"nb_rollout_steps\": 100,\n # \"batch_size\": 16,\n \"nb_eval_steps\": 50\n }\n model, callbacks = setup(model_params, output_folder_path)\n model = model.learn(total_timesteps=int(1e6), callback=callbacks, reset_num_timesteps=False)\n # model.save(f\"occu_map_e2e_ddpg_{datetime.now()}\")\n\n\ndef setup(model_params, output_folder_path):\n latest_model_path = find_latest_model(Path(output_folder_path))\n if latest_model_path is None:\n print(\"Creating model...\")\n model = DDPG(CnnPolicy, **model_params)\n else:\n print(\"Loading model...\")\n model = DDPG.load(latest_model_path, **model_params)\n tensorboard_dir = (output_folder_path / \"tensorboard\")\n ckpt_dir = (output_folder_path / \"checkpoints\")\n tensorboard_dir.mkdir(parents=True, exist_ok=True)\n ckpt_dir.mkdir(parents=True, exist_ok=True)\n checkpoint_callback = CheckpointCallback(save_freq=200, verbose=2, save_path=ckpt_dir.as_posix())\n # event_callback = EveryNTimesteps(n_steps=100, callback=checkpoint_callback)\n logging_callback = LoggingCallback(model=model, verbose=1)\n callbacks = CallbackList([checkpoint_callback, logging_callback])\n return model, callbacks\n\n\nif __name__ == '__main__':\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n datefmt=\"%H:%M:%S\", level=logging.INFO)\n logging.getLogger(\"Controller\").setLevel(logging.ERROR)\n logging.getLogger(\"SimplePathFollowingLocalPlanner\").setLevel(logging.ERROR)\n main(output_folder_path=Path(os.getcwd()) / \"output\" / \"occu_map_e2e\")\n","sub_path":"runner_occu_map_e2e_env.py","file_name":"runner_occu_map_e2e_env.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"156227453","text":"# coding: utf-8\n# Pré-vogais\n# Iann Carvalho, 2016 / Programação1\n\ndef pre_vogais(palavra):\n\tpalavra=palavra.lower()\n\tlista_vogais = ['a', 'e', 'i', 'o', 'u']\n\tl=[]\n\tfor i in range(1, len(palavra)):\n\t\tfor vogais in lista_vogais:\n\t\t\tif palavra[i]==vogais:\n\t\t\t\tif len(l)==0:\n\t\t\t\t\tl.append(palavra[i-1])\n\t\t\t\telse:\n\t\t\t\t\tcontador=0\n\t\t\t\t\tfor letras in l:\n\t\t\t\t\t\tif letras != palavra[i-1]:\n\t\t\t\t\t\t\tcontador+=1\n\t\t\t\t\t\tif contador==len(l):\n\t\t\t\t\t\t\tl.append(palavra[i-1])\n\treturn l\n\t\n\n\t\t\t\t\n\t\nassert pre_vogais(\"Andrade\") == ['r', 'd']\nassert pre_vogais(\"exemplo\") == ['x', 'l']\nassert pre_vogais(\"hiaTO\") == ['h', 'i', 't']\nassert pre_vogais(\"Arara\") == ['r'] \n","sub_path":"Unidade 6/pre-vogais/prevogais.py","file_name":"prevogais.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"469259598","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\n\nclass Patterns(object):\n\tdef __init__(self):\n\t\tself.app_name = 'polls'\n\t\tself.name = 'polls'\n\n\t@property\n\tdef urls(self):\n\t\turlpatterns = patterns('polls.views',\n\t\t\turl(r'^$', 'poll_list', name = 'list'),\n\t\t\turl(r'^zoznam/(?P\\d+)/', 'poll_list', name = 'list-page'),\n\t\t\turl(r'^post/(?P\\d+)/$', 'post', name = 'post'),\n\t\t\turl(r'^vytvorit/$', 'create', name = 'create'),\n\t\t\turl(r'^detail/(?P[-\\w]+)/$', 'poll_detail_by_slug', name = \"detail-by-slug\"),\n\t\t)\n\t\treturn (urlpatterns, self.app_name, self.name)\n\nurlpatterns = Patterns().urls\n","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"600590695","text":"#!/usr/bin/python\n#\n# Copyright 2015 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Adds sitelinks to a campaign using the CampaignExtensionSettingService.\n\nThe LoadFromStorage method is pulling credentials and properties from a\n\"googleads.yaml\" file. By default, it looks for this file in your home\ndirectory. For more information, see the \"Caching authentication information\"\nsection of our README.\n\nApi: AdWordsOnly\n\"\"\"\n\n\nfrom datetime import datetime\n\n\nfrom googleads import adwords\nfrom googleads import errors\nfrom pytz import timezone\n\nCAMPAIGN_ID = 'INSERT_CAMPAIGN_ID_HERE'\n\n\ndef main(client, campaign_id):\n # Initialize appropriate services.\n campaign_extension_setting_service = client.GetService(\n 'CampaignExtensionSettingService', version='v201506')\n customer_service = client.GetService('CustomerService', version='v201506')\n\n customer_tz = timezone(customer_service.get()['dateTimeZone'])\n time_fmt = '%s %s' % ('%Y%m%d %H%M%S', customer_tz)\n\n # Create the sitelinks\n sitelink1 = {\n 'xsi_type': 'SitelinkFeedItem',\n 'sitelinkText': 'Store Hours',\n 'sitelinkFinalUrls': {'urls': ['http://www.example.com/storehours']}\n }\n\n # Show the Thanksgiving specials link only from 20 - 27 Nov.\n sitelink2 = {\n 'xsi_type': 'SitelinkFeedItem',\n 'sitelinkText': 'Thanksgiving Specials',\n 'sitelinkFinalUrls': {'urls': ['http://www.example.com/thanksgiving']},\n # The time zone of the start and end date/times must match the time zone\n # of the customer.\n 'startTime': datetime(datetime.now().year, 11, 20, 0, 0, 0, 0,\n customer_tz).strftime(time_fmt),\n 'endTime': datetime(datetime.now().year, 11, 27, 23, 59, 59, 59,\n customer_tz).strftime(time_fmt)\n }\n\n # Show the wifi details primarily for high end mobile users.\n sitelink3 = {\n 'xsi_type': 'SitelinkFeedItem',\n 'sitelinkText': 'Wifi Available',\n 'sitelinkFinalUrls': {'urls': ['http://www.example.com/mobile/wifi']},\n # See https://developers.google.com/adwords/api/docs/appendix/platforms\n # for device criteria IDs.\n 'devicePreference': {'devicePreference': '30001'}\n }\n\n # Show the happy hours link only during Mon - Fri 6PM to 9PM.\n sitelink4 = {\n 'xsi_type': 'SitelinkFeedItem',\n 'sitelinkText': 'Happy hours',\n 'sitelinkFinalUrls': {'urls': ['http://www.example.com/happyhours']},\n 'scheduling': {\n 'feedItemSchedules': [\n {\n 'dayOfWeek': day,\n 'startHour': '18',\n 'startMinute': 'ZERO',\n 'endHour': '21',\n 'endMinute': 'ZERO'\n } for day in ['MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY',\n 'FRIDAY']\n ]\n }\n }\n\n # Create your Campaign Extension Settings. This associates the sitelinks\n # to your campaign.\n campaign_extension_setting = {\n 'campaignId': campaign_id,\n 'extensionType': 'SITELINK',\n 'extensionSetting': {\n 'extensions': [sitelink1, sitelink2, sitelink3, sitelink4]\n }\n }\n\n operation = {\n 'operator': 'ADD',\n 'operand': campaign_extension_setting\n }\n\n # Add the extensions.\n response = campaign_extension_setting_service.mutate([operation])\n\n if 'value' in response:\n print ('Extension setting with type \"%s\" was added to campaignId \"%d\".' %\n (response['value'][0]['extensionType'],\n response['value'][0]['campaignId']))\n else:\n raise errors.GoogleAdsError('No extension settings were added.')\n\n\nif __name__ == '__main__':\n # Initialize client object.\n adwords_client = adwords.AdWordsClient.LoadFromStorage()\n\n main(adwords_client, CAMPAIGN_ID)\n","sub_path":"adwords api/googleads-python-lib-master/examples/adwords/v201506/extensions/add_site_links.py","file_name":"add_site_links.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"381170489","text":"from django.forms import ModelForm\nfrom .models import ReviewRus, CommentRus\n\n#class to add a review\nclass AddReviewRus(ModelForm):\n\t\n\tclass Meta:\n\t\t#This references the model from which it should take the fields\n\t\tmodel = ReviewRus\n\n\t\t#Names of fields for html forms\n\t\tfields = ['book', 'book_author', 'review_text', 'review_author', 'review_pub_date']\t\n\n#class to add a comment\nclass AddCommentRus(ModelForm):\n\n\tclass Meta:\n\t\t#This references the model from which it should take the fields\n\t\tmodel = CommentRus\n\n\t\t#Names of fields for html forms\n\t\tfields = ['review', 'comment_text', 'comment_author', 'comment_pub_date']","sub_path":"site_rus/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"403230310","text":"import pygame\r\nfrom pygame.locals import *\r\nfrom sys import exit\r\nfrom random import randint\r\n\r\npygame.init()\r\nscreen = pygame.display.set_mode((640, 480), 0, 32)\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n exit()\r\n\r\n rand_col = (randint(0, 255), randint(0, 255), randint(0, 255))\r\n # screen.lock() #很快你就会知道这两句lock和unlock的意思了\r\n\r\n rand_pos = pygame.mouse.get_pos()\r\n screen.set_at(rand_pos, rand_col)\r\n # screen.unlock()\r\n pygame.display.set_caption('pos {}'.format(rand_pos))\r\n\r\n pygame.display.update()","sub_path":"pygame_5.3.py","file_name":"pygame_5.3.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"158369014","text":"#!/usr/bin/env python\n\"\"\"\nSNU Integrated Module\n\n - ROS-embedded Code Version\n\n - [1] Outdoor Surveillance Robot Agents\n\n - Static (fixed)\n - Dynamic (moving)\n\n - [2] ROS Bag File\n\n\n\"\"\"\nimport os\nimport argparse\nimport time\nimport yaml\nimport rospy\nimport logging\nimport tf2_ros\nimport numpy as np\n\nimport utils.loader\nfrom utils.profiling import Timer\nfrom utils.ros.base import backbone\nfrom utils.ros.sensors import snu_SyncSubscriber\nimport snu_visualizer\nfrom module_bridge import snu_algorithms\n\n\n# Run Mode (choose btw ==>> bag / imseq / agent)\nRUN_MODE = \"bag\"\n\n\n# Define SNU Module Class\nclass snu_module(backbone):\n def __init__(self, logger, opts):\n super(snu_module, self).__init__(opts=opts)\n\n # Initialize Logger Variable\n self.logger = logger\n\n # Initialize Frame Index\n self.fidx = 0\n\n # Initialize Loop Timer\n self.loop_timer = Timer(convert=\"FPS\")\n\n # Synchronized Timestamp of Multimodal Sensors\n self.sync_stamp = None\n\n # Declare SNU Visualizer\n self.visualizer = snu_visualizer.visualizer(opts=opts)\n\n # Declare ROS Synchronization Switch Dictionary\n self.ros_sync_switch_dict = {\n \"color\": opts.sensors.color[\"is_valid\"],\n \"disparity\": False, \"aligned_disparity\": opts.sensors.disparity[\"is_valid\"],\n \"thermal\": opts.sensors.thermal[\"is_valid\"],\n \"infrared\": opts.sensors.infrared[\"is_valid\"],\n \"nightvision\": opts.sensors.lidar[\"is_valid\"],\n }\n\n def gather_all_sensor_params_via_files(self):\n # Get Sensor Parameter File Path\n if self.opts.env_type in [\"static\", \"imseq\"]:\n if self.opts.env_type == \"imseq\":\n sensor_params_path = os.path.join(os.path.dirname(__file__), \"configs\", \"imseq\", \"sensor_params\")\n else:\n sensor_params_path = os.path.join(os.path.dirname(__file__), \"configs\", \"agents\", \"static\", \"sensor_params\")\n if os.path.isdir(sensor_params_path) is True:\n # Collect List of Sensor Parameter for Each Modality\n sensor_param_filenames = os.listdir(sensor_params_path)\n\n for sensor_param_filename in sensor_param_filenames:\n modal_type = sensor_param_filename.split(\".\")[0]\n\n # Get Sensor Parameters from YAML file\n sensor_param_filepath = os.path.join(sensor_params_path, sensor_param_filename)\n with open(sensor_param_filepath, \"r\") as stream:\n tmp = yaml.safe_load(stream=stream)\n\n if self.opts.env_type in [\"static\", \"dynamic\"]:\n sensor_param_array = np.asarray(tmp[\"STATIC_{:02d}\".format(self.opts.agent_id)][\"camera_param\"])\n else:\n raise NotImplementedError()\n\n # Update Sensor Parameter\n modal_obj = getattr(self, modal_type)\n modal_obj.update_sensor_params_file_array(sensor_param_array=sensor_param_array)\n else:\n rospy.loginfo(\"Sensor Parameter Directory Not Found...!\")\n\n # Call as Function\n def __call__(self, module_name):\n # Initialize SNU Algorithm Class\n snu_usr = snu_algorithms(opts=self.opts)\n self.logger.info(\"SNU Algorithm and Neural Network Models Loaded...!\")\n time.sleep(0.01)\n\n # ROS Node Initialization\n self.logger.info(\"ROS Node Initialization\")\n rospy.init_node(name=module_name, anonymous=True)\n\n # Check for Sensor Parameter Files\n rospy.loginfo(\"Checking Sensor Parameter Directory...!\")\n self.gather_all_sensor_params_via_files()\n\n # Check for TF_STATIC, Sensor Parameter Files (yaml)\n if self.opts.env_type in [\"dynamic\", \"bag\"]:\n\n # Subscribe for TF_STATIC\n tf_buffer = tf2_ros.Buffer()\n tf_listener = tf2_ros.TransformListener(buffer=tf_buffer)\n\n # Iterate Loop until \"tf_static is heard\"\n tf_static_check_flag = 0\n while self.tf_transform is None:\n try:\n self.tf_transform = tf_buffer.lookup_transform(\n \"rgb_frame\", 'velodyne_frame_from_rgb', rospy.Time(0)\n )\n except:\n if tf_static_check_flag == 0:\n rospy.loginfo(\"SNU-MODULE : TF_STATIC Transform Unreadable...! >> WAIT FOR A MOMENT...\")\n tf_static_check_flag += 1\n\n if tf_static_check_flag >= 30 and self.opts.env_type == \"bag\":\n rospy.loginfo(\"TF_STATIC: Custom TF Static Transform Loaded...!\")\n\n class TF_TRANSLATION(object):\n def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z\n\n class TF_ROTATION(object):\n def __init__(self, x, y, z, w):\n self.x = x\n self.y = y\n self.z = z\n self.w = w\n\n class _TF_TRANSFORM(object):\n def __init__(self, translation, rotation):\n self.translation = translation\n self.rotation = rotation\n\n class TF_TRANSFORM(object):\n def __init__(self):\n translation = TF_TRANSLATION(\n x=0.44415, y=0.128996, z=0.238593\n )\n rotation = TF_ROTATION(\n x=0.482089, y=-0.501646, z=0.526684, w=0.488411\n )\n self.transform = _TF_TRANSFORM(\n translation=translation, rotation=rotation\n )\n\n self.tf_transform = TF_TRANSFORM()\n\n # Load ROS Synchronized Subscriber\n rospy.loginfo(\"Load ROS Synchronized Subscriber...!\")\n sync_ss = snu_SyncSubscriber(\n ros_sync_switch_dict=self.ros_sync_switch_dict, options=self.opts\n )\n\n # ROS Loop Starts\n rospy.loginfo(\"Starting SNU Integrated Module...!\")\n try:\n while not rospy.is_shutdown():\n self.loop_timer.reset()\n\n # Make Synchronized Data\n sync_ss.make_sync_data()\n\n # Get Synchronized Data, Loop Until Synchronized\n sync_data = sync_ss.get_sync_data()\n if sync_data is None:\n # print(\"LOOPING...!\")\n continue\n else:\n self.update_all_modal_data(sync_data=sync_data)\n self.sync_stamp = sync_data[0]\n sensor_fps = self.loop_timer.elapsed\n\n # Increase Frame Index\n self.fidx += 1\n\n # Update Sensor Image Frame Size\n if self.fidx == 1:\n self.opts.sensors.update_sensor_image_size(\n frame=self.color.get_data()\n )\n\n # Gather All Data and Process Disparity Frame\n sync_data_dict = self.gather_all_modal_data()\n if self.disparity is not None:\n sync_data_dict[\"disparity\"].process_data(self.opts.sensors.disparity)\n\n # SNU USR Integrated Algorithm Call\n trajectories, detections, heatmap, fps_dict = snu_usr(\n sync_data_dict=sync_data_dict, fidx=self.fidx\n )\n\n # Algorithm Total FPS\n total_fps = self.loop_timer.elapsed\n\n # Log Profile\n # rospy.loginfo(\n # \"FIDX: {} || # of Trajectories: <{}> || Total SNU Module Speed: {:.2f}fps\".format(\n # self.fidx, len(snu_usr), total_fps\n # )\n # )\n rospy.loginfo(\"FIDX: {} || # of Tracklets: <{}> || [SENSOR: {:.2f}fps | | SEG: {:.1f}fps | DET: {:.1f}fps | TRK: {:.1f}fps | ACL: {:.1f}fps]\".format(\n self.fidx, len(snu_usr), sensor_fps, fps_dict[\"seg\"], fps_dict[\"det\"], fps_dict[\"trk\"], fps_dict[\"acl\"]\n )\n )\n\n # Draw Results\n result_frame_dict = self.visualizer(\n sensor_data=self.color, trajectories=trajectories, detections=detections, fidx=self.fidx,\n segmentation=heatmap\n )\n\n # Publish Tracks\n self.publish_tracks(trajectories=trajectories, odometry_msg=self.odometry_msg)\n\n # Publish SNU Result Image Results\n self.publish_snu_result_image(result_frame_dict=result_frame_dict)\n\n # Draw / Show / Publish Top-view Result\n if self.opts.visualization.top_view[\"is_draw\"] is True:\n self.visualizer.visualize_top_view_trajectories(trajectories=trajectories)\n\n # # Publish Top-view Result\n # self.top_view_result_pub.publish(\n # self.pub_bridge.cv2_to_imgmsg(\n # self.visualizer.top_view_map, \"rgb8\"\n # )\n # )\n\n # Rospy Spin\n rospy.spin()\n\n except KeyboardInterrupt:\n rospy.logwarn(\"ShutDown SNU Module...!\")\n\n\ndef main():\n # Set Logger\n logger = utils.loader.set_logger(logging_level=logging.INFO)\n\n # Argument Parser\n args = utils.loader.argument_parser(\n logger=logger, script_name=os.path.basename(__file__),\n dev_version=4.5, mode_selection=RUN_MODE\n )\n\n # Load Configuration from File\n cfg = utils.loader.cfg_loader(logger=logger, args=args)\n\n # Load Options\n opts = utils.loader.load_options(logger=logger, args=args, cfg=cfg)\n opts.visualization.correct_flag_options()\n\n # Initialize SNU Module\n ros_snu_usr = snu_module(logger=logger, opts=opts)\n\n # Run SNU Module\n ros_snu_usr(module_name=\"snu_module\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/snu_module/scripts4/ros__run_snu_module.py","file_name":"ros__run_snu_module.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"523341479","text":"from __future__ import absolute_import, division, print_function\n\nimport os\n\nimport matplotlib.pyplot as plt\n\nfrom vivarium.core.composition import set_axes\nfrom vivarium.library.dict_utils import get_value_from_path\n\n\ndef plot_diauxic_shift(timeseries, settings={}, out_dir='out'):\n external_path = settings.get('external_path', ('environment',))\n internal_path = settings.get('internal_path', ('cytoplasm',))\n internal_counts_path = settings.get('internal_counts_path', ('cytoplasm_counts',))\n reactions_path = settings.get('reactions_path', ('reactions',))\n global_path = settings.get('global_path', ('global',))\n\n time = [t/60 for t in timeseries['time']] # convert to minutes\n\n environment = get_value_from_path(timeseries, external_path)\n cell = get_value_from_path(timeseries, internal_path)\n cell_counts = get_value_from_path(timeseries, internal_counts_path)\n reactions = get_value_from_path(timeseries, reactions_path)\n globals = get_value_from_path(timeseries, global_path)\n\n # environment\n lactose = environment['lcts_e']\n glucose = environment['glc__D_e']\n\n # internal\n LacY = cell['LacY']\n lacy_RNA = cell['lacy_RNA']\n LacY_counts = cell_counts['LacY']\n lacy_RNA_counts = cell_counts['lacy_RNA']\n\n # reactions\n glc_exchange = reactions['EX_glc__D_e']\n lac_exchange = reactions['EX_lcts_e']\n\n # global\n mass = globals['mass']\n\n # settings\n environment_volume = settings.get('environment_volume')\n\n n_cols = 2\n n_rows = 4\n\n # make figure and plot\n fig = plt.figure(figsize=(n_cols * 6, n_rows * 1.5))\n grid = plt.GridSpec(n_rows, n_cols)\n\n ax1 = fig.add_subplot(grid[0, 0]) # grid is (row, column)\n ax1.plot(time, glucose, label='glucose')\n ax1.plot(time, lactose, label='lactose')\n set_axes(ax1)\n ax1.title.set_text('environment, volume = {} L'.format(environment_volume))\n ax1.set_ylabel('(mM)')\n ax1.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n\n ax2 = fig.add_subplot(grid[1, 0]) # grid is (row, column)\n ax2.plot(time, lacy_RNA, label='lacy_RNA')\n ax2.plot(time, LacY, label='LacY')\n set_axes(ax2)\n ax2.title.set_text('internal')\n ax2.set_ylabel('(mM)')\n ax2.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n\n ax3 = fig.add_subplot(grid[2, 0]) # grid is (row, column)\n ax3.plot(time, mass, label='mass')\n set_axes(ax3, True)\n ax3.title.set_text('global')\n ax3.set_ylabel('(fg)')\n ax3.set_xlabel('time (min)')\n ax3.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n\n ax4 = fig.add_subplot(grid[0, 1]) # grid is (row, column)\n ax4.plot(time, glc_exchange, label='glucose exchange')\n ax4.plot(time, lac_exchange, label='lactose exchange')\n set_axes(ax4, True)\n ax4.title.set_text('flux'.format(environment_volume))\n ax4.set_xlabel('time (min)')\n ax4.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))\n\n # save figure\n fig_path = os.path.join(out_dir, 'diauxic_shift')\n plt.subplots_adjust(wspace=0.6, hspace=0.5)\n plt.savefig(fig_path, bbox_inches='tight')\n","sub_path":"vivarium/plots/transport_metabolism.py","file_name":"transport_metabolism.py","file_ext":"py","file_size_in_byte":3082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"191046746","text":"models = ['SQuAD1-1', 'NewsQA', 'SearchQA', 'TriviaQA_wiki', 'WikiHop', 'ComplexWebQuestions', 'DuoRC']\n\nimport subprocess\n\nfor i, train_m in enumerate(models): \n\tfor j, eval_m in enumerate(models): \n\t\t# print(\" * * * * * * * * * * * \")\n\t\t# command = f\"python multiqa.py evaluate --model model --datasets {eval_m} --cuda_device 0 --models_dir '/net/nfs.corp/aristo/danielk/MultiQA/models/{train_m}/'\"\n\t\t# outfile_name=f'eval_train:{train_m}_eval:{eval_m}.txt'\n\t\t# print(command)\n\t\t# process = subprocess.Popen(command, stdout = subprocess.PIPE, stderr=subprocess.STDOUT, shell = True)\n\n\t\t# file = open(outfile_name, \"w\")\n\t\t# for line in process.stdout:\n\t\t# \tfile.write(line.decode('utf-8'))\n\t\t# process.wait()\n\t\t# file.close()\n\n\n\t\tif i == 0: \n\t\t\tcontinue \n\t\tprint(\" * * * * * * * * * * * \")\n\t\textended_train_models = \"_\".join(models[:i+1])\n\t\tcommand = f\"python multiqa.py evaluate --model model --datasets {eval_m} --cuda_device 0 --models_dir '/net/nfs.corp/aristo/danielk/MultiQA/models/{extended_train_models}/' \"\n\t\toutfile_name=f'eval_train:{extended_train_models}_eval:{eval_m}.txt'\n\t\tprint(command)\n\t\tprint(outfile_name)\n\t\t# with open(outfile_name, \"w\") as outfile:\n\t\t# \tsubprocess.call(command, stdout=outfile, shell=True)\n\t\tprocess = subprocess.Popen(command, stdout = subprocess.PIPE, stderr=subprocess.STDOUT, shell = True)\n\t\t# (process_output, error) = process.communicate()\n\t\tfile = open(outfile_name, \"w\")\n\t\t# file.write(str(process_output))\n\t\tfor line in process.stdout:\n\t\t\tfile.write(line.decode('utf-8'))\n\t\tprocess.wait()\n\t\tfile.close()\n\n","sub_path":"daniel_eval.py","file_name":"daniel_eval.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"404056299","text":"import re\nimport inspect\nfrom collections import Mapping, OrderedDict, namedtuple\n\nfrom .error import ArgumentError, ParameterError\n\n# pylint: disable=bad-whitespace\n_POSITIONAL_ONLY = inspect.Parameter.POSITIONAL_ONLY\n_POSITIONAL_OR_KEYWORD = inspect.Parameter.POSITIONAL_OR_KEYWORD\n_VAR_POSITIONAL = inspect.Parameter.VAR_POSITIONAL\n_KEYWORD_ONLY = inspect.Parameter.KEYWORD_ONLY\n_VAR_KEYWORD = inspect.Parameter.VAR_KEYWORD\n\n_EMPTY = inspect.Parameter.empty\n\n\nclass Parameter:\n def __init__(self, name, kind, default=_EMPTY, *, cli_name=None):\n self.name = name\n self.kind = kind\n self.default = default\n self.cli_name = cli_name or name.strip('_')\n\n def __repr__(self):\n format_str = 'Parameter(%(name)s, %(kind)s, %(default)s, cli_name=%(cli_name))'\n return format_str % dir(self)\n\n\ndef param_normalize_name(param):\n \"\"\"\n Return (key, value) tuple. Both will be used in help message.\n\n Example:\n model_name -> (model-name, MODEL_NAME)\n \"\"\"\n return param.name.replace('_', '-'), param.cli_name.upper()\n\n\nnormalize_re = re.compile(r'([A-Za-z0-9_]+)')\ndef parameter_sanity(params):\n \"\"\"\n Check that the parameter list follows these rules:\n * If \"opts\" in parameters, its default value should be a string\n * If \"opts\" in parameters, the name of the other parameter cannot\n be just a single character.\n * Cannot have multiple parameters having the same CLI name. This\n is possible because underline will be stripped when parsing\n parameters.\n \"\"\"\n try:\n opts = params['opts'].default\n except KeyError:\n opts = None\n\n if opts is not None and not isinstance(opts, str):\n raise ParameterError(\n 'The default value of the parameter \"opts\" should be of type str')\n\n cli_names = set()\n for param in params.values():\n cli_name = param.cli_name\n\n m = normalize_re.search(cli_name)\n if m is None or m.group(0) != cli_name:\n raise ValueError('Invalid parameter name', cli_name)\n\n if cli_name in cli_names:\n raise ParameterError(\n 'Two or more parameters have the same command-line name %s' % cli_name)\n\n cli_names.add(cli_name)\n\n if opts is not None and len(param.name) == 1:\n raise ParameterError(\n 'If opts exists, there can\\'t be any letter-keyword arguments.')\n\n\nclass Func:\n def __init__(self, func):\n sig = inspect.signature(func)\n\n params = OrderedDict()\n for param in sig.parameters.values():\n params[param.name] = Parameter(param.name,\n param.kind,\n param.default)\n\n parameter_sanity(params)\n\n self.func = func\n self.parameters = params\n\n\n def apply(self, spec, strict=False):\n \"\"\"\n This function passively feeds command-line options into the target\n function. That is, too many positional arguments and keyword-only\n arguments won't cause error; all the remaining arguments not fed into\n the target function will be returned for successive calls.\n\n If strict is True, too many positional arguments or keyword-only\n arguments will cause Error.\n \"\"\"\n args = []\n kwargs = {}\n remaining_args = []\n remaining_kwargs = {}\n remaining_varargs = []\n\n params = tuple(self.parameters.values())\n\n n_pos_param = 0\n var_pos_param, var_kw_param = False, False\n for param in params:\n if param.kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):\n n_pos_param += 1\n elif param.kind == _VAR_POSITIONAL:\n var_pos_param = True\n elif param.kind == _VAR_KEYWORD:\n var_kw_param = True\n\n # Feed args\n if len(spec.args) < n_pos_param:\n args.extend(spec.args)\n for param in params[len(spec.args):n_pos_param]:\n if param.cli_name in spec.kwargs:\n kwargs[param.name] = spec.kwargs[param.cli_name]\n continue\n elif param.default != _EMPTY:\n continue\n raise ArgumentError('Missing required argument', param.cli_name)\n\n elif len(spec.args) == n_pos_param:\n args.extend(spec.args)\n\n else:\n if var_pos_param and not spec.varargs:\n args.extend(spec.args)\n elif strict:\n raise ArgumentError(\n ('Too many positional arguments. '\n 'Expect {0}/Provide {1}').format(n_pos_param, len(spec.args)))\n elif not var_pos_param:\n args.extend(spec.args[:n_pos_param])\n remaining_args.extend(spec.args[n_pos_param:])\n\n # Feed varargs\n if spec.varargs:\n if var_pos_param:\n args.extend(spec.varargs)\n elif strict:\n return ArgumentError('Unexpected var-arguments')\n else:\n remaining_varargs.extend(spec.varargs)\n\n # Feed kwargs\n for param in params[n_pos_param:]:\n if param.kind in (_VAR_POSITIONAL, _VAR_KEYWORD):\n continue\n\n if param.cli_name in spec.kwargs:\n kwargs[param.name] = spec.kwargs[param.cli_name]\n elif param.default == _EMPTY:\n raise ArgumentError('Missing required argument',\n param.cli_name)\n\n cli_names = {param.cli_name for param in params}\n if var_kw_param:\n for arg in spec.kwargs:\n kwargs[arg] = spec.kwargs[arg]\n elif strict:\n for arg in spec.kwargs:\n if arg not in cli_names:\n raise ArgumentError('Unexpected argument', param.cli_name)\n else:\n for arg in spec.kwargs:\n if arg not in kwargs:\n remaining_kwargs[arg] = spec.kwargs[arg]\n\n remaining = type(spec)(remaining_args,\n remaining_kwargs,\n remaining_varargs)\n\n return args, kwargs, remaining\n\n\n def usage(self, unbounded=False):\n \"\"\"\n Return usage token list. Each token is a string for one parameter.\n\n If unbounded is True, the first parameter \"self\" will be ignored.\n \"\"\"\n must = []\n optional = []\n varargs = []\n for param in self.parameters.values():\n if unbounded:\n unbounded = False\n continue\n\n pkey, pvalue = param_normalize_name(param)\n if param.kind in (_POSITIONAL_ONLY,\n _POSITIONAL_OR_KEYWORD):\n if param.default == _EMPTY:\n must.append(pvalue)\n elif isinstance(param.default, bool):\n format_str = '[-{}]' if len(pkey) == 1 else '[--{}]'\n optional.append(format_str.format(pkey))\n else:\n optional.append('[--{0} {1}]'.format(pkey, pvalue))\n\n elif param.kind == _VAR_POSITIONAL:\n varargs = ['{}...'.format(pvalue)]\n\n elif param.kind == _KEYWORD_ONLY:\n if param.default == _EMPTY:\n must.append('--{} {}'.format(pkey, pvalue))\n elif isinstance(param.default, bool):\n format_str = '[-{}]' if len(pkey) == 1 else '[--{}]'\n optional.append(format_str.format(pkey))\n else:\n optional.append('[--{0} {1}]'.format(pkey, pvalue))\n\n elif param.kind == _VAR_KEYWORD:\n optional.append('[OPTIONS...]')\n\n else:\n raise ValueError('Unexpected parameter kind %s' % param.kind)\n\n return must + optional + varargs\n\n def __call__(self, *args, **kwargs):\n return self.func(*args, **kwargs)\n","sub_path":"classarg/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"152575170","text":"#!/usr/bin/env python\n# coding=utf-8\n\n\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef state(k):\n return -3 * k * k + 10 * k\n\n\nif __name__ == '__main__':\n\n kalman = cv2.KalmanFilter(1, 1, 0)\n kalman.transitionMatrix = np.array([1.0])\n kalman.measurementMatrix = np.array([1.0])\n kalman.processNoiseCov = np.array([5.0])\n kalman.measurementNoiseCov = np.array([2.0])\n kalman.statePost = np.array([0.0])\n kalman.errorCovPost = np.array([0.0])\n\n time = 0.0\n step = 0.1\n time_pos = []\n real_pos = []\n measured_pos = []\n predicted_pos = []\n\n s = state(time)\n prediction = kalman.predict()\n\n while time < 7:\n measurement = np.dot(kalman.measurementMatrix, s) + kalman.measurementNoiseCov * np.random.randn()\n kalman.correct(measurement)\n prediction = kalman.predict()\n\n time_pos.append(time)\n real_pos.append(s)\n measured_pos.append(measurement[0])\n predicted_pos.append(prediction[0, 0])\n\n time += step\n s = state(time)\n\n \n plt.plot(time_pos, real_pos, '-g')\n plt.plot(time_pos, measured_pos, '-b')\n plt.plot(time_pos, predicted_pos, '-r')\n plt.show()\n","sub_path":"scripts/kalman.py","file_name":"kalman.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"25536302","text":"from unittest import TestCase, main\nfrom converse.markov import Token, Markov\n\nclass TestMarkov(TestCase):\n\n def setUp(self):\n self.corpus = [\n '@IronMan jumped over the fence and drank the milk.',\n 'I do not know what I would like to eat for #dinner #winning.',\n 'Data structures are an important subject in computer science.',\n 'He jumped across the titanium wall, breaking his ankle #fail.',\n 'Amazon has the biggest database of random @stuff.',\n 'MongoDB is the best NoSQL database\\n known to mankind.',\n 'Everyone should check out https://google.com/maps.'\n ]\n self.markov = Markov(corpus=self.corpus, depth=3, limit=10)\n\n def test_generate_seed(self):\n for seed in [self.markov.generate_seed(100) for i in range(100)]:\n self.assertGreaterEqual(seed, 0)\n self.assertLess(seed, 100)\n\n def test_create_ngrams(self):\n ngrams = self.markov.create_ngrams(3, self.markov.tokens)\n for gram in ngrams:\n for word in gram:\n self.assertTrue(isinstance(word, str))\n self.assertEqual(len(gram), 3)\n\n def test_diversity(self):\n diversity = self.markov.diversity\n self.assertLess(diversity, 1.0)\n self.assertGreaterEqual(diversity, 0.0)\n\n def test_tokens(self):\n tokens = self.markov.tokens\n for token in tokens:\n self.assertTrue(isinstance(token, str))\n self.assertNotIn('#', token)\n self.assertNotIn('@', token)\n self.assertNotIn('http', token)\n self.assertNotIn('\\n', token)\n\n def test_database(self):\n database = self.markov.database\n self.assertTrue(isinstance(database, dict))\n for key in database.keys():\n data = database[key]\n self.assertTrue(isinstance(key, tuple))\n self.assertEqual(len(key), 2)\n self.assertTrue(isinstance(data, list))\n self.assertGreater(len(data), 0)\n for token in data:\n self.assertTrue(isinstance(token, Token))\n\n def test_generate_text(self):\n try:\n text = self.markov.generate_text()\n token_cache = self.markov.tokens\n for token in text.split(' '):\n self.assertIn(token, token_cache)\n except KeyError:\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"converse/test/test_markov.py","file_name":"test_markov.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"26069325","text":"a = 200\nb = 33\n\nif (b > a):\n print(\"b is greater than a\")\nelse:\n print(\"b is not greater than a\")\n\n# Write a conditional statement that evaluates if the user input is positive or negative\n\nuser_number = int(input(\"Input a positive or negative number: \"))\n\nif (user_number) > 0:\n print(\"Your number is a positive number.\")\nelif(user_number == 0):\n print(\"You entered 0.\")\nelse:\n print(\"Your number is a negative number.\")\n\n# Ask the user for their age \n# If they are younger than 13, tell them they can only watch PG/G Movies\n#If they are 13 and older but younger than 17, they can only watch PG-13/PG/G movies.\n# If they are older than 17 they can watch all movies.\n\nuser_age = int(input(\"What is your age? \"))\n\nif (user_age) < 13:\n print(\"You can only watch PG and G Movies.\")\nelif 13 < (user_age) < 17:\n print(\"You can watch PG-13/PG/G Movies.\")\nelse:\n print(\"You can watch all movies.\")\n\nis_Hungry = True\nis_Sleepy = False\nis_Bored = True\nif(is_Hungry == True):\n print(\"You should go eat.\")\nif(is_Sleepy == True):\n print(\"You should go sleep.\")\nif (is_Sleepy == False):\n print(\"Wow, you're well rested.\")\n\nif(is_Hungry == is_Bored or is_Sleepy == is_Hungry):\n print(\"You should do your homework.\")\nelse:\n print(\"You can play outside.\")\n\nif(is_Sleepy == is_Hungry and is_Hungry == is_Bored):\n if(is_Sleepy == is_Bored):\n print(\"It's nap time.\")\nelse:\n print(\"It's time for bed.\")\n\n# Ask the user for a number \n# Tell the user if the number is even or odd\n\neven_odd = int(input(\"Type in a number: \"))\n\nif (even_odd%2 != 0):\n print(\"Your number is odd.\")\nelif (even_odd%2 == 0):\n print(\"Your number is even.\")\nelse:\n print(\"N/A\")\n\n# Math Quadrants\n# Ask the user for an x and a y value\n\n# Using a nested conditional, output which quadrant they are in\n\nx = int(input(\"What's your X-axis? \"))\ny = int(input(\"what's your Y-axis? \"))\n\nif (x > 0 and y > 0):\n if (y > 0):\n print(\"It is in the first quadrant.\")\n else:\n print(\"Your number is in the fourth quadrant.\")\nelif (x < 0):\n if (y < 0):\n print(\"It is in the third quadrant.\")\n if(y > 0):\n print(\"Your number is in the second quadrant.\")\n\n# create an if statement using \"and\" or \"or\" for the third and second quadrant\n\nif (x < 0 and y > 0):\n print(\"It is in the second quadrant\")\nelif (x < 0 and y < 0):\n print(\"It is in the third quadrant.\")\n\n# let the user know when they are on the x-axis or y-axis\n# if we have +y or -y but x == 0\n# \"You are on the y-axis\"\n# if we have -x and +x but y == 0\n# \"You are on the x-axis\"\n\nif(x == 0 and y != 0):\n print(\"You are on the y-axis.\")\nif(x != 0 and y == 0):\n print(\"You are on the x-axis.\")\n\n# if x and y are 0, output the origin\nif (x == 0 and y == 0):\n print(\"You're on the origon.\")\n\n# and, or\n# and takes precedence over or\n# \"and\" both coniditions have to be correct\n# \"or\" only one of the conditions have to be correct\n\nx = 5\ny = 6\nz = 7\nif(x == 5 and y == 5 or z ==5):\n print(\"Yay\")\nelse:\n print(\"Nay\")\n\nif(x == 5 or y == 5 and z == 5):\n print(\"Yay\")\nelse:\n print(\"Nay\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"447907342","text":"import adv.adv_test\nimport adv\nfrom adv import *\n\ndef module():\n return Sinoa\n\nclass Sinoa(adv.Adv):\n comment = '5c+fs'\n\n a1 = ('a',0.13,'hp100')\n a3 = ('bt',0.2)\n conf = {}\n conf['slot.d'] = slot.d.Dreadking_Rathalos()\n conf['acl'] = '''\n `s3, not this.s3_buff_on\n `s1\n `s2\n `fs, seq=5\n '''\n\n def s1_proc(this, e):\n adv.Teambuff('s1_att',0.25/4,15,'att').on()\n adv.Teambuff('s1_crit',0.25/4,10,'crit').on()\n\n # def s1_proc(this, e):\n # r = random.random()\n # if r<0.25 :\n # adv.Teambuff('s1_att',0.25,15,'att').on()\n # elif r<0.5 :\n # adv.Teambuff('s1_crit',0.25,10,'crit').on()\n # else:\n # log('failed','s1')\n\nif __name__ == '__main__':\n conf = {}\n adv.adv_test.test(module(), conf, verbose=-2)\n\n\n","sub_path":"adv/sinoa.py","file_name":"sinoa.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"593430459","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun May 20 21:26:58 2018\r\n\r\n@author: Francis\r\n\"\"\"\r\nimport numpy as np\r\nfrom math import *\r\nimport matplotlib.pyplot as mp\r\n\r\n\r\n#from keras.optimizers import Adam, SGD,Adadelta,Adagrad\r\n#from keras.utils import to_categorical\r\n#from keras.models import Sequential\r\n#from keras.layers.core import Activation\r\n#from keras.layers.core import Flatten\r\n#from keras.layers.core import Dense\r\n#from keras import backend as K\r\n\r\n\r\n# functions and classes\r\n# action matrix - all possible combinations of actions of the three motors\r\nACTIONMAT = np.array([[0,0,-1],[0,0,0],[0,0,1],\r\n [0,-1,-1],[0,-1,0],[0,-1,1],\r\n [0,1,-1],[0,1,0],[0,1,1],\r\n [-1,0,-1],[-1,0,0],[-1,0,1],\r\n [-1,-1,-1],[-1,-1,0],[-1,-1,1],\r\n [-1,1,-1],[-1,1,0],[-1,1,1],\r\n [1,0,-1],[1,0,0],[1,0,1],\r\n [1,-1,-1],[1,-1,0],[1,-1,1],\r\n [1,1,-1],[1,1,0],[1,1,1]])\r\n\r\nCOLLISIONVOLUMES=[[0,0,95,180],[0,0,60,130]]\r\n\r\nclass RobotArm():\r\n def __init__(self):\r\n self.state = [0,0,0]\r\n \r\n def setState(self,st):\r\n self.state = st\r\n self.position = calHandPosition(st)\r\n \r\n def setGoal(self,newGoal):\r\n self.goal = newGoal\r\n \r\n def calcReward(self):\r\n dx = self.goal[0] - self.position[0]\r\n dy = self.goal[1] - self.position[1]\r\n dist2goal = sqrt(dx*dx + dy*dy)\r\n self.dist2goal = dist2goal\r\n # we want the reqard to be 100 if the goal is met\r\n # and porportional to the distance from goal otherwise\r\n # the arm is 340mm long, so that is as far away as we can get\r\n #\r\n reward = (340.0-dist2goal)/340.0 * 100.0\r\n # check for collisions with robot body or the floor\r\n colide = isWithin(self.position)\r\n if colide:\r\n reward=0\r\n print (\"COLLISION WARNING\")\r\n self.reward = reward\r\n return reward\r\n \r\n\r\n def step(self,act,learningRate):\r\n newState = self.state + (act * learningRate)\r\n # range check\r\n for ii in range(3):\r\n newState[ii]=max(newState[ii],0)\r\n newState[ii]=min(newState[ii],255.0)\r\n \r\n self.setState(newState)\r\n reward = self.calcReward()\r\n \r\n return self.state,reward\r\n# for a given action, return the new state \r\n\r\ndef isBetween(pnt,minpt,maxpt):\r\n betw = False\r\n if pnt > minpt and pt x1 and yy1:\r\n # yes, point is in volume\r\n collision=True\r\n return collision\r\n\r\ndef isequals(Qline1,Qline2):\r\n st1 = Qline[0]\r\n st2 = Qline[1]\r\n return st1 == st2\r\n\r\ndef stateEqual(state1,state2):\r\n s1,s2,s3 = state1\r\n s4,s5,s6 = state2\r\n return s1==s4 and s2==s5 and s3==s6 \r\n\r\n# just a utility to display the joint angle in degrees\r\ndef joint2deg(jointPos):\r\n return jointPos * (180.0 / 255.0)\r\n\r\ndef calHandPosition(stat):\r\n m1,m2,m3=stat\r\n # calculate hand position based on the position of the servo motors\r\n # m1, m2, m3 = motor command from 0 to 255\r\n # forward kinematics\r\n # we first convert each to an angle \r\n d1 = 102.5 # length of first joint (sholder to elbow) in mm\r\n d2 = 97.26 # length of second joint arm (elbow to wrist) in mm\r\n d3 = 141 # length of thrird joint arm (wrist to hand)\r\n right = pi/2.0 # right angle, 90 degrees or pi/2 radians\r\n m1Theta = pi - m1*(pi/255.0)\r\n m2Theta = pi - m2*(pi/255.0)\r\n m3Theta = pi - m3*(pi/255.0)\r\n \r\n m2Theta = m1Theta-right+m2Theta\r\n m3Theta = m2Theta-right+m3Theta\r\n joint1 = np.array([d1*cos(m1Theta),d1*sin(m1Theta)])\r\n joint2 = np.array([d2*cos(m2Theta),d2*sin(m2Theta)])+joint1\r\n joint3 = np.array([d3*cos(m3Theta),d3*sin(m3Theta)])+joint2\r\n return joint3\r\n\r\ndef action_sample(mode,state,Qmatrix):\r\n if mode==\"random\":\r\n index = np.random.randint(0,ACTIONMAT.shape[0])\r\n action = ACTIONMAT[index]\r\n return index\r\n if mode==\"Q\":\r\n # Qmatrix contains a list of states [x,y], actions [1...27] and rewards\r\n # for example [34,14, 122],5,23 \r\n # where 34,14,122 is the state (motor posisions), 5 is the index to the ACTIONMAT action 5[0,-1,1]\r\n # we want to see if this state has an action, and if so, to take the max reward\r\n #\r\n # find the states\r\n myStatesQ =[]\r\n for datum in Qmatrix:\r\n if stateEqual(datum[0],state):\r\n myStatesQ.append(datum)\r\n if len(myStatesQ)==0:\r\n # no data found, do a random move\r\n # note recursion\r\n action=action_sample('random',state,Qmatrix)\r\n else:\r\n maxState=[[0,0,0],0,-9999.0]\r\n for thisStateQ in myStatesQ:\r\n # find the state with the highest value\r\n if thisStateQ[2]>maxState[2]:\r\n maxStat = thisStateQ\r\n if maxState[2]==-9999.0:\r\n # no max state was found?\r\n action=action_sample('random',state,Qmatrix)\r\n else:\r\n action = maxState[1]\r\n index=action\r\n return index\r\n \r\ndef maxQ(Q, state):\r\n #retrieve the maximum Q value for this state\r\n maxQvalue=0\r\n for thisQ in Q:\r\n thisState=thisQ[0]\r\n action=thisQ[1]\r\n qvalue = thisQ[2]\r\n if stateEqual(state,thisState):\r\n maxQvalue = max(qvalue,maxQvalue)\r\n return maxQvalue\r\n \r\n \r\ndef setQ(Q,state,action,value):\r\n index = 0\r\n found=False\r\n # print (\"setQ \",state,action,value),\r\n for datum in Q:\r\n try:\r\n if stateEqual(state,datum[0]) and action==datum[1]:\r\n Q[index]=[state,action,value]\r\n found=True\r\n break\r\n except:\r\n print (\"except setQ\",action, datum)\r\n index += 1\r\n if not found: \r\n Q.append([state,action,value])\r\n \r\n# begin main program\r\n\r\n# starting state\r\n# our arm has states from 0 to 255 which map to degrees from 0 to 180\r\n# here is our beginning state\r\n# initialize our \"Q\" matrix\r\n# @ Q is a matrix of the number of states by the number of actions\r\n# we will add states as we go in this version of this program\r\n#Q=[[[127,127,127],1,0.0]]\r\nQ=[]\r\nstate = [127,127,127]\r\noldState = state\r\n# initial learning rate for the arm - we start with 10 units\r\nlearningRate = 10.0\r\nrobotArm = RobotArm()\r\nrobotArm.setState(state)\r\ngoal=[14,251]\r\nrobotArm.setGoal(goal)\r\nknt = 0 # counter\r\nreward=0.0 # no reward yet...\r\nd2g=0.0\r\noldd2g = d2g\r\ncurve = []\r\ncurve2=[]\r\nposx = []\r\nposy=[]\r\noldReward=0.0\r\ngamma = 0.6 # discount for rewards that take more time\r\n\r\n\r\n# Q learning phase\r\nstateReset = [127,127,127]\r\nstate = stateReset\r\nrobotArm.setState(state)\r\nknt = 0\r\nreward = 0\r\ngamma = 0.99\r\nG=0\r\n# perform training on Q Learning\r\nfor epoch in range(1,100):\r\n done=False\r\n G,reward,knt = 0,0,0\r\n state = stateReset\r\n robotArm.setState(state)\r\n while not done:\r\n action = action_sample(\"Q\",state,Q)\r\n motorAction = ACTIONMAT[action]\r\n state2,reward = robotArm.step(motorAction,learningRate)\r\n newQ=reward + gamma * maxQ(Q, state2)\r\n setQ(Q,state,action,newQ)\r\n G += reward\r\n knt +=1\r\n if knt > 1000 or reward > 90:\r\n done=True\r\n state = state2\r\n robotArm.setState(state)\r\n if epoch % 2 == 0:\r\n print(\"Epoch \",epoch,\"TotalReward:\",G,\" counter:\",knt,\"Q Len \",len(Q))\r\n \r\n \r\n\r\n\r\n \r\n ","sub_path":"Chapter05/armTrainingQlearn.py","file_name":"armTrainingQlearn.py","file_ext":"py","file_size_in_byte":7762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"616936455","text":"def lengthOfNum(x):\n l = 1\n while (int(x/10) != 0):\n l = l + 1\n x = int(x/10)\n return l\n\ndef armsum (x , l):\n sum = 0\n while (int(x) != 0):\n rem = x%10\n sum = sum + pow (rem, l) \n x = int(x/10)\n return sum\n \n\n \nIP = input(\"Enter the number\")\nSumIP = armsum (int(IP), lengthOfNum(int(IP)))\nprint (SumIP)\nif SumIP == int(IP):\n print (\"It is an Armstrong number\")\nelse:\n print (\"Not an Armstrong number\")\n\n#loops\n\n## while\n\n# while (expr), expr has a boolean result, eg: x> 5, x!=0, x==0\n# keeps running till expr is true.\n# expression to figure out how many times to run the loop\n# x = 5 \n# while(x > 2):\n# print(\"Reeza\")\n# x = x - 1 \n\n## for\n# set of values for which to run the loop.\n## for x in range(0,5): <- set to iterate. range(0,5)=> [0,1,2,3,4]\n## y = [1,2,3], for i in y:","sub_path":"BP5.py","file_name":"BP5.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"567039544","text":"import pickle\nfrom sklearn.svm import LinearSVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import train_test_split\nimport glob\nfrom functions import *\nimport time\nimport matplotlib.pyplot as plt\nimport os\n\nif __name__ == \"__main__\":\n # select parameters based on experiment results\n \"\"\"\n experiment accuracy results:(Use model_performance.py)\n Orient\t Color Space\n \t RGB\t HSV LUV HLS\t YUV\t YCrCb\n 3\t 0.9716 0.9828 0.9803\t 0.9831\t0.9885\t0.9845\n 6\t 0.9837\t0.9899\t 0.9842\t 0.9893\t0.9901\t0.993\n 9\t 0.9851\t0.9907\t 0.989\t 0.9924\t0.9924\t0.9938\n 12\t 0.9845\t0.9938\t 0.987\t 0.9904\t0.993\t0.9913\n \"\"\"\n color_space = 'YCrCb'\n orient = 9 # HOG orientations\n pix_per_cell = 8 # HOG pixels per cell\n cell_per_block = 2 # HOG cells per block\n hog_channel = \"ALL\" # Can be 0, 1, 2, or \"ALL\"\n spatial_size = (32, 32) # Spatial binning dimensions\n hist_bins = 32 # Number of histogram bins\n spatial_feat = True # Spatial features on or off\n hist_feat = True # Histogram features on or off\n hog_feat = True # HOG features on or off\n\n notcars = glob.glob(r\"../non-vehicles/**/**/*.png\")\n cars = glob.glob(r\"../vehicles/**/**/*.png\")\n\n # features.p is pickle files which stores feature vector\n if \"features.p\" in os.listdir():\n dist_pickle = pickle.load( open(\"features.p\", \"rb\" ) )\n car_features = dist_pickle[\"car_features\"]\n notcar_features = dist_pickle[\"notcar_features\"]\n else:\n car_features = extract_features(cars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n notcar_features = extract_features(notcars, color_space=color_space,\n spatial_size=spatial_size, hist_bins=hist_bins,\n orient=orient, pix_per_cell=pix_per_cell,\n cell_per_block=cell_per_block,\n hog_channel=hog_channel, spatial_feat=spatial_feat,\n hist_feat=hist_feat, hog_feat=hog_feat)\n pickle.dump({\"car_features\":car_features,\"notcar_features\":notcar_features}, open(\"features.p\",\"wb\"))\n\n print('Using:',orient,'orientations',pix_per_cell,\n 'pixels per cell and', cell_per_block,'cells per block')\n\n X = np.vstack((car_features, notcar_features)).astype(np.float64)\n # Fit a per-column scalery_start_stop\n X_scaler = StandardScaler().fit(X)\n # Apply the scaler to X\n scaled_X = X_scaler.transform(X)\n\n # Define the labels vector\n y = np.hstack((np.ones(len(car_features)), np.zeros(len(notcar_features))))\n\n\n # Split up data into randomized training and test sets\n rand_state = np.random.randint(0, 100)\n X_train, X_test, y_train, y_test = train_test_split(\n scaled_X, y, test_size=0.2, random_state=rand_state)\n\n\n print('Feature vector length:', len(X_train[0]))\n # Use a linear SVC\n svc = LinearSVC()\n # Check the training time for the SVC\n t=time.time()\n svc.fit(X_train, y_train)\n t2 = time.time()\n print(round(t2-t, 2), 'Seconds to train SVC...')\n # Check the score of the SVC\n print('Test Accuracy of SVC = ', round(svc.score(X_test, y_test), 4))\n # Check the prediction time for a single sample\n t=time.time()\n pickle.dump({\"cars\":cars,\"notcars\":notcars,\"network\":svc,\"X_scaler\":X_scaler,\"color_space\":color_space,\"orient\":orient,\"pix_per_cell\":pix_per_cell,\"cell_per_block\":cell_per_block,\"hog_channel\":hog_channel,\n \"spatial_size\":spatial_size,\"hist_bins\":hist_bins,\"spatial_feat\":spatial_feat,\"hist_feat\":hist_feat,\"hog_feat\":hog_feat}, open(\"model.p\",\"wb\"))\n","sub_path":"model_save.py","file_name":"model_save.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"330088765","text":"from django.shortcuts import render\n\nfrom DeBar.classes import message\nfrom DeBar.funcoes.function import getSessionVariavel\n\n\ndef renderizar (request, page, dados = None):\n\n mensagem = message.getMensagem(request)\n variavel = getSessionVariavel(request)\n nome = None\n\n try:\n del request.session['variavel']\n except:\n pass\n\n try:\n nome = dados.model.__name__\n tamanho = len(dados)\n except:\n tamanho = None\n\n if dados and tamanho:\n\n nome = dados.first()\n nome = nome.__class__.__name__\n\n return render(request, page, {'mensagem': mensagem,\n 'variavel': variavel,\n nome: dados})\n if nome:\n return render(request, page, {'mensagem': mensagem,\n 'variavel': variavel,\n nome: dados})\n\n else:\n\n return render(request, page, {'mensagem': mensagem, 'variavel': variavel})","sub_path":"DeBar/funcoes/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"454603720","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 24 02:08:19 2020\r\n\r\n@author: DHRUV\r\n\"\"\"\r\n\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nimport math as math\r\nfrom sklearn.model_selection import train_test_split\r\n\r\ndf=pd.read_csv('Normalized_Dataset.csv', index_col=0)\r\nprint(df)\r\ndf.columns = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,'O']\r\nprint(df)\r\ndf.index = range(1,570)\r\nprint(df)\r\nfeatures = df.drop(columns = 'O')\r\nprint(features)\r\noutput = df['O']\r\nprint(output)\r\n\r\n\"\"\"\r\nlabels for features and output changed according to general \r\nmatrix norms\r\n\r\nIn the first part to solving c we'll run only a single trial\r\non the first 500 rows of the data set as training data set and the\r\nremaining 69 rows of the data set as test data\r\n\r\nThis is done to get acquainted with the entire modelling process\r\nof gradient descent in Python\r\n\r\n\"\"\"\r\n\r\nx_train_i = features.iloc[range(0,500)]\r\nprint(x_train_i)\r\nx_test_i = features.iloc[range(500,569)]\r\nprint(x_test_i)\r\nx_test_i.index = range(1,70)\r\nprint(x_test_i)\r\ny_train_i = output.iloc[range(0,500)]\r\nprint(y_train_i)\r\ny_test_i = output.iloc[range(500,569)]\r\nprint(y_test_i)\r\ny_test_i.index = range(1,70)\r\nprint(y_test_i)\r\n\r\n\r\nx_train = x_train_i.to_numpy()\r\nx_test = x_test_i.to_numpy()\r\ny_train = y_train_i.to_numpy()\r\ny_test = y_test_i.to_numpy()\r\n\r\nprint(x_train)\r\nprint(x_test)\r\nprint(y_train)\r\nprint(y_test)\r\n\r\nw_b = np.zeros(31)\r\n\r\ngrad_w_b = np.zeros(31)\r\n\r\n\"\"\"\r\nThe first 30 entries represent w's and the last entry b\r\nThis initial w and b is taken to be 0 in entirety\r\n\"\"\"\r\n\r\nu = 0.2\r\n\r\nindex = np.arange(0,500)\r\ngrad_cal_index = np.arange(0,30)\r\nN_index = np.arange(0,500)\r\n\r\nlam = 0.01\r\n\r\n\"\"\"\r\n\r\ni iterates\r\nk is to evaluate entries of gradient, the entries corresponding\r\nto partial derivatives with respect to w's\r\n\r\nfor partial derivative with respect to b, the equation changes and so \r\nthe last entry in grad_w_b will be seperately calculated\r\n\r\nq moves over x_train to evaluate the gradient\r\n\r\nThe equation for gradient was obtained by partially differentiating the\r\ngiven base equation in HW1 by hand. The formulas thus obtained for the \r\ngradient are then represented here\r\n\r\n\r\n\"\"\"\r\n\r\ni=0\r\n\r\n\r\n\r\nfor i in index:\r\n w = w_b[0:30]\r\n b = w_b[30]\r\n k=0 \r\n for k in grad_cal_index:\r\n q=0\r\n sum_1 = 0\r\n sum_2 = 0\r\n for q in N_index:\r\n sum_1 = sum_1 + (-y_train[q])*(x_train[q][k])\r\n exponent_interim = np.dot(w,x_train[q,:])\r\n exponent = exponent_interim + b\r\n e_exponent = math.exp(exponent)\r\n ratio = (e_exponent)/(e_exponent + 1)\r\n sum_term = (ratio)*(x_train[q][k])\r\n sum_2 = sum_2 + sum_term\r\n \r\n grad_w_b[k] = sum_1 + sum_2 + (lam)*(w_b[k])\r\n \r\n \r\n t=0\r\n sum_1_b = 0\r\n sum_2_b = 0\r\n for t in N_index:\r\n sum_1_b = sum_1_b + (-y_train[t])\r\n exponent_interim_b = np.dot(w,x_train[t,:])\r\n exponent_b = exponent_interim_b + b\r\n ratio_b = (exponent_b)/(exponent_b + 1)\r\n sum_2_b = sum_2_b + ratio_b\r\n \r\n grad_w_b[30] = sum_1_b + sum_2_b\r\n \r\n norm_grad = np.linalg.norm(grad_w_b)\r\n \r\n if norm_grad <= 0.001:\r\n break\r\n else:\r\n w_b = w_b - u*grad_w_b\r\n \r\n\r\nprint(grad_w_b)\r\nprint(norm_grad)\r\nprint(w_b)\r\n\r\n\r\n\r\n\r\n\r\n\"\"\"\r\nA few comments\r\n\r\nThis is not the most efficient code, even though accurate. Reasons are thus -\r\n\r\nsum_1_b term is constant for each iteration and thus can be calculated and\r\nstored in advance\r\n\r\nsum_1 can be calculated in advance and stored as a vector. Not dependent on descent\r\niteration. When gradient for w are calculated, their corresponding \r\nindices can be called\r\n\r\nw'x + b is different for each iteration but they can be calculated\r\nin the beginning of each iteration for different rows and placed in \r\na vector and called on in the q loop rather than being calculated repeatedly\r\nfor each q loop in each k loop\r\n\r\nAgain we presume that the goal here isn't to write the most efficient\r\ncode, in so far as the code is correct, which this code is\r\n\r\n\"\"\"\r\n\r\n \r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"Gradient Descent Error.py","file_name":"Gradient Descent Error.py","file_ext":"py","file_size_in_byte":4245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"182173461","text":"import numpy\nfrom scipy.fftpack.realtransforms import dct\nfrom scipy.stats import kurtosis, skew\nfrom AudioLibrary.AudioSignal import *\n\n\nclass AudioFeatures:\n\n def __init__(self, audio_signal, win_size, win_step):\n\n # Audio Signal\n self._audio_signal = audio_signal\n\n # Short time features window size\n self._win_size = win_size\n\n # Short time features window step\n self._win_step = win_step\n\n '''\n Global statistics features extraction from an audio signals\n '''\n def global_feature_extraction(self, stats=['mean', 'std'], features_list=[], nb_mfcc=12, nb_filter=40, diff=0, hamming=True):\n\n # Extract short term audio features\n st_features, f_names = self.short_time_feature_extraction(features_list, nb_mfcc, nb_filter, hamming)\n\n # Number of short term features\n nb_feats = st_features.shape[1]\n\n # Number of statistics\n nb_stats = len(stats)\n\n # Global statistics feature names\n feature_names = [\"\" for x in range(nb_feats * nb_stats)]\n for i in range(nb_feats):\n for j in range(nb_stats):\n feature_names[i + j * nb_feats] = f_names[i] + \"_d\" + str(diff) + \"_\" + stats[j]\n\n # Calculate global statistics features\n features = numpy.zeros((nb_feats * nb_stats))\n for i in range(nb_feats):\n\n # Get features series\n feat = st_features[:, i]\n\n # Compute first or second order difference\n if diff > 0:\n feat = feat[diff:] - feat[:-diff]\n\n # Global statistics\n for j in range(nb_stats):\n features[i + j * nb_feats] = self.compute_statistic(feat, stats[j])\n\n return features, feature_names\n\n '''\n Short-time features extraction from an audio signals\n '''\n def short_time_feature_extraction(self, features=[], nb_mfcc=12, nb_filter=40, hamming=True):\n\n # Copy features list to compute\n features_list = list(features)\n\n # MFFCs features names\n mfcc_feature_names = []\n if 'mfcc' in features_list:\n mfcc_feature_names = [\"mfcc_{0:d}\".format(i) for i in range(1, nb_mfcc + 1)]\n features_list.remove('mfcc')\n\n # Filter banks features names\n fbank_features_names = []\n if 'filter_banks' in features_list:\n fbank_features_names = [\"fbank_{0:d}\".format(i) for i in range(1, nb_filter + 1)]\n features_list.remove('filter_banks')\n\n # All Features names\n feature_names = features_list + mfcc_feature_names + fbank_features_names\n\n # Number of features\n nb_features = len(feature_names)\n\n # Framming signal\n frames = self._audio_signal.framing(self._win_size, self._win_step, hamming=hamming)\n\n # Number of frame\n nb_frames = len(frames)\n\n # Compute features on each frame\n features = numpy.zeros((nb_frames, nb_features))\n cur_pos = 0\n for el in frames:\n\n # Get signal of the frame\n signal = el._signal\n\n # Compute the normalize magnitude of the spectrum (Discrete Fourier Transform)\n dft = el.dft(norm=True)\n\n # Return the first half of the spectrum\n dft = dft[:int((self._win_size * self._audio_signal._sample_rate) / 2)]\n if cur_pos == 0:\n dft_prev = dft\n\n # Compute features on frame\n for idx, f in enumerate(features_list):\n features[cur_pos, idx] = self.compute_st_features(f, signal, dft, dft_prev,\n self._audio_signal._sample_rate)\n\n # Compute MFCCs and Filter Banks\n if len(mfcc_feature_names) > 0:\n features[cur_pos, len(features_list):len(features_list) + len(mfcc_feature_names) + len(fbank_features_names)] = self.mfcc(signal, self._audio_signal._sample_rate,\n nb_coeff=nb_mfcc, nb_filt=nb_filter, return_fbank=len(fbank_features_names) > 0)\n # Compute Filter Banks\n elif len(fbank_features_names) > 0:\n features[cur_pos, len(features_list) + len(mfcc_feature_names):] = self.filter_banks_coeff(signal, self._audio_signal._sample_rate, nb_filt=nb_filter)\n\n # Keep previous Discrete Fourier Transform coefficients\n dft_prev = dft\n cur_pos = cur_pos + 1\n\n return features, feature_names\n\n '''\n Computes zero crossing rate of a signal\n '''\n @staticmethod\n def zcr(signal):\n zcr = numpy.sum(numpy.abs(numpy.diff(numpy.sign(signal))))\n zcr = zcr / (2 * numpy.float64(len(signal) - 1.0))\n return zcr\n\n '''\n Computes signal energy of frame\n '''\n @staticmethod\n def energy(signal):\n energy = numpy.sum(signal ** 2) / numpy.float64(len(signal))\n return energy\n\n '''\n Computes entropy of energy\n '''\n @staticmethod\n def energy_entropy(signal, n_short_blocks=10, eps=10e-8):\n\n # Total frame energy\n energy = numpy.sum(signal ** 2)\n sub_win_len = int(numpy.floor(len(signal) / n_short_blocks))\n\n # Length of sub-frame\n if len(signal) != sub_win_len * n_short_blocks:\n signal = signal[0:sub_win_len * n_short_blocks]\n\n # Get sub windows\n sub_wins = signal.reshape(sub_win_len, n_short_blocks, order='F').copy()\n\n # Compute normalized sub-frame energies:\n sub_energies = numpy.sum(sub_wins ** 2, axis=0) / (energy + eps)\n\n # Compute entropy of the normalized sub-frame energies:\n entropy = -numpy.sum(sub_energies * numpy.log2(sub_energies + eps))\n\n return entropy\n\n '''\n Computes spectral centroid of frame\n '''\n @staticmethod\n def spectral_centroid_spread(fft, fs, eps=10e-8):\n\n # Sample range\n sr = (numpy.arange(1, len(fft) + 1)) * (fs / (2.0 * len(fft)))\n\n # Normalize fft coefficients by the max value\n norm_fft = fft / (fft.max() + eps)\n\n # Centroid:\n C = numpy.sum(sr * norm_fft) / (numpy.sum(norm_fft) + eps)\n\n # Spread:\n S = numpy.sqrt(numpy.sum(((sr - C) ** 2) * norm_fft) / (numpy.sum(norm_fft) + eps))\n\n # Normalize:\n C = C / (fs / 2.0)\n S = S / (fs / 2.0)\n\n return C, S\n\n '''\n Computes the spectral flux feature\n '''\n @staticmethod\n def spectral_flux(fft, fft_prev, eps=10e-8):\n\n # Sum of fft coefficients\n sum_fft = numpy.sum(fft + eps)\n\n # Sum of previous fft coefficients\n sum_fft_prev = numpy.sum(fft_prev + eps)\n\n # Compute the spectral flux as the sum of square distances\n flux = numpy.sum((fft / sum_fft - fft_prev / sum_fft_prev) ** 2)\n\n return flux\n\n '''\n Computes the spectral roll off\n '''\n @staticmethod\n def spectral_rolloff(fft, c=0.90, eps=10e-8):\n\n # Total energy\n energy = numpy.sum(fft ** 2)\n\n # Roll off threshold\n threshold = c * energy\n\n # Compute cumulative energy\n cum_energy = numpy.cumsum(fft ** 2) + eps\n\n # Find the spectral roll off as the frequency position\n [roll_off, ] = numpy.nonzero(cum_energy > threshold)\n\n # Normalize\n if len(roll_off) > 0:\n roll_off = numpy.float64(roll_off[0]) / (float(len(fft)))\n else:\n roll_off = 0.0\n\n return roll_off\n\n '''\n Computes the Filter Bank coefficients\n '''\n @staticmethod\n def filter_banks_coeff(signal, sample_rate, nb_filt=40, nb_fft=512):\n\n # Magnitude of the FFT\n mag_frames = numpy.absolute(numpy.fft.rfft(signal, nb_fft))\n\n # Power Spectrum\n pow_frames = ((1.0 / nb_fft) * (mag_frames ** 2))\n low_freq_mel = 0\n\n # Convert Hz to Mel\n high_freq_mel = (2595 * numpy.log10(1 + (sample_rate / 2) / 700))\n\n # Equally spaced in Mel scale\n mel_points = numpy.linspace(low_freq_mel, high_freq_mel, nb_filt + 2)\n\n # Convert Mel to Hz\n hz_points = (700 * (10 ** (mel_points / 2595) - 1))\n bin = numpy.floor((nb_fft + 1) * hz_points / sample_rate)\n\n # Calculate filter banks\n fbank = numpy.zeros((nb_filt, int(numpy.floor(nb_fft / 2 + 1))))\n for m in range(1, nb_filt + 1):\n\n # left\n f_m_minus = int(bin[m - 1])\n\n # center\n f_m = int(bin[m])\n\n # right\n f_m_plus = int(bin[m + 1])\n\n for k in range(f_m_minus, f_m):\n fbank[m - 1, k] = (k - bin[m - 1]) / (bin[m] - bin[m - 1])\n for k in range(f_m, f_m_plus):\n fbank[m - 1, k] = (bin[m + 1] - k) / (bin[m + 1] - bin[m])\n filter_banks = numpy.dot(pow_frames, fbank.T)\n\n # Numerical Stability\n filter_banks = numpy.where(filter_banks == 0, numpy.finfo(float).eps, filter_banks)\n\n # dB\n filter_banks = 20 * numpy.log10(filter_banks)\n\n return filter_banks\n\n '''\n Computes the MFCCs\n '''\n def mfcc(self, signal, sample_rate, nb_coeff=12, nb_filt=40, nb_fft=512, return_fbank=False):\n\n # Apply filter bank on spectogram\n filter_banks = self.filter_banks_coeff(signal, sample_rate, nb_filt=nb_filt, nb_fft=nb_fft)\n\n # Compute MFCC coefficients\n mfcc = dct(filter_banks, type=2, axis=-1, norm='ortho')[1: (nb_coeff + 1)]\n\n # Return MFFCs and Filter banks coefficients\n if return_fbank is True:\n return numpy.concatenate((mfcc, filter_banks))\n else:\n return mfcc\n\n '''\n Compute statistics on short time features\n '''\n @staticmethod\n def compute_statistic(seq, statistic):\n if statistic == 'mean':\n S = numpy.mean(seq)\n elif statistic == 'med':\n S = numpy.median(seq)\n elif statistic == 'std':\n S = numpy.std(seq)\n elif statistic == 'kurt':\n S = kurtosis(seq)\n elif statistic == 'skew':\n S = skew(seq)\n elif statistic == 'min':\n S = numpy.min(seq)\n elif statistic == 'max':\n S = numpy.max(seq)\n elif statistic == 'q1':\n S = numpy.percentile(seq, 1)\n elif statistic == 'q99':\n S = numpy.percentile(seq, 99)\n elif statistic == 'range':\n S = numpy.abs(numpy.percentile(seq, 99) - numpy.percentile(seq, 1))\n return S\n\n '''\n Compute short time features on signal\n '''\n def compute_st_features(self, feature, signal, dft, dft_prev, sample_rate):\n if feature == 'zcr':\n F = self.zcr(signal)\n elif feature == 'energy':\n F = self.energy(signal)\n elif feature == 'energy_entropy':\n F = self.energy_entropy(signal)\n elif feature == 'spectral_centroid':\n [F, FF] = self.spectral_centroid_spread(dft, sample_rate)\n elif feature == 'spectral_spread':\n [FF, F] = self.spectral_centroid_spread(dft, sample_rate)\n elif feature == 'spectral_entropy':\n F = self.energy_entropy(dft)\n elif feature == 'spectral_flux':\n F = self.spectral_flux(dft, dft_prev)\n elif feature == 'sprectral_rolloff':\n F = self.spectral_rolloff(dft)\n return F\n","sub_path":"Voice Analysis/Notebook/SVM/AudioLibrary/AudioFeatures.py","file_name":"AudioFeatures.py","file_ext":"py","file_size_in_byte":11344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"442520276","text":"import unittest\r\n# from queue import PriorityQueue\r\nfrom collections import deque\r\n\r\n\r\n# def bfs_get_path(graph, start_node, end_node):\r\n\r\n# # Find the shortest route in the network between the two users\r\n \r\n\r\n# return None\r\n\r\n\r\n\r\ndef bfs_get_path(graph, start_node, end_node):\r\n\r\n # Find the shortest route in the network between the two users\r\n #TimeComplexity(O(V+E))\r\n if graph.get(end_node, None) is None or graph.get(start_node,None) is None:\r\n raise Exception\r\n\r\n if start_node == end_node:\r\n return [start_node]\r\n\r\n visited=[]\r\n previous={}\r\n queue=[start_node]\r\n while queue:\r\n node=queue.pop(0)\r\n visited.append(node)\r\n for adj in graph[node]:\r\n if adj not in visited:\r\n previous[adj]=node\r\n queue.append(adj)\r\n if adj==end_node:\r\n spath=[]\r\n temp=adj\r\n while temp is not None:\r\n spath.append(temp)\r\n temp=previous.get(temp, None)\r\n spath.reverse()\r\n return spath\r\n\r\n return None\r\n\r\n\r\n# print bfs(graph, '1', '11')\r\n\r\n\r\n\r\n# Tests\r\n\r\nclass Test(unittest.TestCase):\r\n\r\n def setUp(self):\r\n self.graph = {\r\n 'a': ['b', 'c', 'd'],\r\n 'b': ['a', 'd'],\r\n 'c': ['a', 'e'],\r\n 'd': ['a', 'b'],\r\n 'e': ['c'],\r\n 'f': ['g'],\r\n 'g': ['f'],\r\n }\r\n\r\n def test_two_hop_path_1(self):\r\n actual = bfs_get_path(self.graph, 'a', 'e')\r\n expected = ['a', 'c', 'e']\r\n self.assertEqual(actual, expected)\r\n\r\n def test_two_hop_path_2(self):\r\n actual = bfs_get_path(self.graph, 'd', 'c')\r\n expected = ['d', 'a', 'c']\r\n self.assertEqual(actual, expected)\r\n\r\n def test_one_hop_path_1(self):\r\n actual = bfs_get_path(self.graph, 'a', 'c')\r\n expected = ['a', 'c']\r\n self.assertEqual(actual, expected)\r\n\r\n def test_one_hop_path_2(self):\r\n actual = bfs_get_path(self.graph, 'f', 'g')\r\n expected = ['f', 'g']\r\n self.assertEqual(actual, expected)\r\n\r\n def test_one_hop_path_3(self):\r\n actual = bfs_get_path(self.graph, 'g', 'f')\r\n expected = ['g', 'f']\r\n self.assertEqual(actual, expected)\r\n\r\n def test_zero_hop_path(self):\r\n actual = bfs_get_path(self.graph, 'a', 'a')\r\n expected = ['a']\r\n self.assertEqual(actual, expected)\r\n\r\n def test_no_path(self):\r\n actual = bfs_get_path(self.graph, 'a', 'f')\r\n expected = None\r\n self.assertEqual(actual, expected)\r\n\r\n def test_start_node_not_present(self):\r\n with self.assertRaises(Exception):\r\n bfs_get_path(self.graph, 'h', 'a')\r\n\r\n def test_end_node_not_present(self):\r\n with self.assertRaises(Exception):\r\n bfs_get_path(self.graph, 'a', 'h')\r\n\r\n\r\nunittest.main(verbosity=2)","sub_path":"Week3/Day4/bfs.py","file_name":"bfs.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"45474132","text":"# refrences\n# 1. https://sparkbyexamples.com/pyspark-tutorial/\nfrom pyspark.sql.functions import col, udf, broadcast, collect_list, \\\n rank, percent_rank, max as spark_max\n# pass a udf_function to a method\nfrom pyspark.sql.types import StructType, StringType, IntegerType, StructField\nfrom pyspark.sql.window import Window\n\nfrom pyspark import Row\n\n# from src.basics.useful_utils import *\n\n# start soark from python program\n\"\"\"SimpleApp.py\"\"\"\nfrom pyspark.sql import SparkSession\n\nspark = SparkSession.builder.appName(\"SimpleApp\").getOrCreate()\nlogFile = \"README.md\" # Should be some file on your system\nlogData = spark.read.text(logFile).cache()\nnumAs = logData.filter(logData.value.contains('a')).count()\nnumBs = logData.filter(logData.value.contains('b')).count()\nprint(\"Lines with a: %i, lines with b: %i\" % (numAs, numBs))\n\n\n# pyspark read and write textfile\n# rdd.coalesce(1).saveAsTextFile(...)\n# users = sc.textFile(\"/tmp/movielens/ml-1m/users.dat\") \\\n# .map(lambda l: l.split(\"::\")[0:4])\\\n# .map(lambda l: (int(l[0]), l[1], int(l[2]), int(l[3])))\\\n#\n\n# peopleDF = spark.read.json(\"examples/src/main/resources/people.json\")\n\n# DataFrames can be saved as Parquet files, maintaining the schema information.\n# peopleDF.write.parquet(\"people.parquet\")\n#\n# # Read in the Parquet file created above.\n# # Parquet files are self-describing so the schema is preserved.\n# # The result of loading a parquet file is also a DataFrame.\n# parquetFile = spark.read.parquet(\"people.parquet\")\n\n\ndef get_ml_data(spark):\n input = \"/Users/guoqiong/intelWork/data/movielens/ml-1m\"\n ratedf = spark.sparkContext.textFile(input + \"/ratings.dat\") \\\n .map(lambda x: x.split(\"::\")[0:4]) \\\n .map(lambda x: (int(x[0]), int(x[1]), int(x[2]), int(x[3]))) \\\n .toDF([\"user\", \"movie\", \"rate\", \"time\"])\n # gender = lambda x: 1 (if x = \"M\") else 0\n userdf = spark.sparkContext.textFile(input + \"/users.dat\") \\\n .map(lambda x: x.split(\"::\")[0:4]) \\\n .map(lambda x: (int(x[0]), x[1], int(x[2]), int(x[3]))) \\\n .toDF([\"user\", \"gender\", \"age\", \"income\"])\n\n n_uid = ratedf.select(\"user\").agg(spark_max(ratedf.user).alias(\"max\")).rdd.map(\n lambda row: row['max']).collect()[0] + 1\n n_mid = ratedf.select(\"movie\").agg(spark_max(ratedf.movie).alias(\"max\")).rdd.map(\n lambda row: row['max']).collect()[0] + 1\n\n return [ratedf, userdf]\n\n\ndef transform_python_udf(self, in_col, out_col, udf_func):\n df = self.df_scaled.withColumn(out_col, udf_func(col(in_col)))\n return df\n\n\ndef createDFfromRdd(spark):\n data = [(\"Java\", int(20000)), (\"Python\", 100000), (\"Scala\", int(3000))]\n columns = [\"language\", \"users_count\"]\n rdd = spark.sparkContext.parallelize(data)\n schema = StructType(\n [StructField(\"language\", StringType()), StructField(\"users_count\", IntegerType())])\n\n dfFromRDD1 = rdd.toDF(columns)\n dfFromRDD1.show()\n dfFromRDD2 = spark.createDataFrame(rdd).toDF(*columns)\n dfFromRDD2.show()\n rowrdd = rdd.map(lambda x: (x[0], int(x[1])))\n dfFromRDD3 = spark.createDataFrame(rowrdd, schema)\n dfFromRDD3.show()\n pass\n\n\ndef createDFfromList(spark):\n data = [(\"Java\", int(20000)), (\"Python\", 100000), (\"Scala\", int(3000))]\n columns = [\"language\", \"users_count\"]\n rdd = spark.sparkContext.parallelize(data)\n schema = StructType(\n [StructField(\"language\", StringType()), StructField(\"users_count\", IntegerType())])\n\n dfFromData1 = spark.createDataFrame(data).toDF(*columns)\n # dfFromData1.printSchema()\n rowData = map(lambda x: Row(*x), data)\n dfFromData2 = spark.createDataFrame(rowData, columns)\n # dfFromData2.printSchema()\n mappeddata = map(lambda x: (x[0], int(x[1])), data)\n dfFromData3 = spark.createDataFrame(mappeddata, schema)\n # dfFromData3.printSchema()\n return dfFromData3\n\n\n# groupby, aggregation functions and\n# approx_count_distinct, avg, collect_list, collect_set, countDistinct, count,\n# grouping, fist, last, kurtosis, max, min, skewness, stddev, stddev_samp, stddev_pop, sum, sumDistinct, variance, var_samp, var_pop\ndef agg_functions(spark):\n [ratedf, _] = get_ml_data(spark)\n\n df = ratedf.groupBy(\"user\").agg(collect_list(col(\"movie\")).alias(\"movie\"),\n collect_list(col(\"rate\")).alias(\"rate\"))\n\n print(df.show(10))\n df.printSchema()\n\n\n# window functions,\n# row_number(), rank(), percent_rank(), dense_rank(), ntile(), cume_dist(), lag(e:column offset:int), lead(e:column, offset:int)\ndef window_functions(spark):\n simpleData = ((\"James\", \"Sales\", 3000), \\\n (\"Michael\", \"Sales\", 4600), \\\n (\"Robert\", \"Sales\", 4100), \\\n (\"Maria\", \"Finance\", 3000), \\\n (\"James\", \"Sales\", 3000), \\\n (\"Scott\", \"Finance\", 3300), \\\n (\"Jen\", \"Finance\", 3900), \\\n (\"Jeff\", \"Marketing\", 3000), \\\n (\"Kumar\", \"Marketing\", 2000), \\\n (\"Saif\", \"Sales\", 4100) \\\n )\n\n columns = [\"employee_name\", \"department\", \"salary\"]\n df = spark.createDataFrame(data=simpleData, schema=columns)\n df.printSchema()\n df.show(truncate=False)\n windowSpec = Window.partitionBy(\"department\").orderBy(\"salary\")\n df.withColumn(\"rank\", rank().over(windowSpec)) \\\n .show()\n\n df.withColumn(\"percent_rank\", percent_rank().over(windowSpec)) \\\n .show()\n from pyspark.sql.functions import lag, lead\n\n df.withColumn(\"lag\", lag(\"salary\", 1).over(windowSpec)) \\\n .show()\n df.withColumn(\"lead\", lead(\"salary\", 1).over(windowSpec)) \\\n .show()\n from pyspark.sql.functions import col, avg, sum, min, row_number, max as spark_max\n windowSpecAgg = Window.partitionBy(\"department\").orderBy(\"salary\")\n df.withColumn(\"row\", row_number().over(windowSpec)) \\\n .withColumn(\"avg\", avg(col(\"salary\")).over(windowSpecAgg)) \\\n .withColumn(\"sum\", sum(col(\"salary\")).over(windowSpecAgg)) \\\n .withColumn(\"min\", min(col(\"salary\")).over(windowSpecAgg)) \\\n .withColumn(\"max\", spark_max(col(\"salary\")).over(windowSpecAgg)) \\\n .where(col(\"row\") == 1).select(\"department\", \"avg\", \"sum\", \"min\", \"max\") \\\n .show()\n\n\n# time functions\n# current_date(), to_date(), date_format(), add_months(), date_add(), date_sub(), datediff(),\n# months_between(), next_day(), year(), month, dayofmonth, dayofweek, dayofyear, weekofyear, from_unixtime, unix_timestamp\n\n# all kinds of join senarios\n# inner join, left, right, full\n# broadcast join\n# bucket join\ndef join_senarios(spark):\n df1 = spark.createDataFrame(\n [(1, \"a\", 2.0), (2, \"b\", 3.0), (3, \"c\", 3.0)],\n (\"x1\", \"x2\", \"x3\"))\n\n df2 = spark.createDataFrame(\n [(1, \"f\", -1.0), (2, \"b\", 0.0)], (\"x1\", \"x2\", \"x3\"))\n\n df = df1.join(broadcast(df2), (df1.x1 == df2.x1) & (df1.x2 == df2.x2))\n df.show()\n df = df1.join(df2, [\"x1\", \"x2\"])\n df.show()\n\n\ndef join_skew(spark):\n # if hive,setup, hive.map.aggr = true; hive.groupb.skewindata = true\n [ratedf, userdf] = get_ml_data(spark)\n distribution = ratedf.select(\"user\").groupBy(\"user\").count().orderBy(\n col(\"count\").desc()).persist() # should sample(0.01)\n total_number = distribution.select(\"count\").groupBy().sum().collect()[0][0]\n distribution.show(10)\n print(\"*************\")\n print(total_number)\n # regular join\n print(\"regular join count\")\n joined1 = ratedf.join(userdf, [\"user\"])\n print(joined1.count())\n\n topusers = distribution.filter(\"count > \" + str(total_number) + \"* 0.001\").select(\n \"user\").collect()\n topusers1 = distribution.orderBy(col(\"count\").desc()).limit(10).collect()\n print(topusers1)\n topids = list(map(lambda x: x[0], topusers1))\n import random\n gen_key = lambda userid: str(userid) + \"_\" + str(\n random.randint(1, 10)) if userid in topids else str(userid)\n # gen_key_udf = udf(gen_key, StringType()) if too many need structfiled and name\n gen_key_udf = udf(gen_key)\n ratedf = ratedf.withColumn(\"joinkey\", gen_key_udf(col(\"user\"))).withColumnRenamed(\"user\",\n \"userr\")\n userdf = userdf.withColumn(\"joinkey\", gen_key_udf(col(\"user\")))\n joined = ratedf.join(userdf,\n [\"joinkey\"]) # .filter(col(\"user\").isin(topids)) # filter isin list\n joined.show(2)\n print(\"skew join count\")\n print(joined.count())\n\n\ndef join_bucket(spark): # too big data\n [ratedf, userdf] = get_ml_data(spark)\n ratedf.show(10)\n ratedf.write.mode(\"overwrite\").bucketBy(4, \"user\").saveAsTable(\"rating_table\")\n userdf.write.mode(\"overwrite\").bucketBy(4, \"user\").saveAsTable(\"user_table\")\n rate_tbl = spark.table(\"rating_table\")\n user_tbl = spark.table(\"user_table\")\n joined = rate_tbl.join(user_tbl, [\"user\"])\n joined.show(10)\n\n\n# udfs\n# one to one, one to multiple,\n# one to multiple, multiple to mutiple\n\ndef udf_functions(spark):\n [ratedf, userdf] = get_ml_data(spark)\n item_size = ratedf.select(\"movie\").distinct().count()\n hist_df = gen_his_seq(ratedf, user_col='user', cols=['movie', 'rate'], sort_col='time',\n min_len=1, max_len=10)\n with_negative = add_negtive_samples(df=hist_df, item_size=item_size, item_col=\"movie\",\n label_col=\"label\", neg_num=2)\n padded = pad(df=with_negative, padding_cols=[\"movie_history\", \"rate_history\"])\n padded.show(10)\n\n # def gen_reindex_mapping(self, columns=[], freq_limit=10):\n # \"\"\"\n # Generate a mapping from old index to new one based on popularity count on descending order\n # :param columns: str or a list of str\n # :param freq_limit: int, dict or None. Indices with a count below freq_limit\n # will be omitted. Can be represented as either an integer or dict.\n # For instance, 15, {'col_4': 10, 'col_5': 2} etc. Default is 10,\n #\n # :return: a dictionary of list of dictionaries, a mapping from old index to new index\n # new index starts from 1, save 0 for default\n # \"\"\"\n # str_to_list(columns, \"columns\")\n # if isinstance(freq_limit, int):\n # freq_limit = {col: freq_limit for col in columns}\n # assert isinstance(freq_limit, dict), \\\n # \"freq_limit should be int or dict, but get a \" + type(freq_limit)\n # index_dicts = []\n # for c in columns:\n # c_count = self.select(c).group_by(c, agg={c: \"count\"}).rename(\n # {\"count(\" + c + \")\": \"count\"})\n # c_count = c_count.filter(pyspark_col(\"count\") >= freq_limit[c]) \\\n # .order_by(\"count\", ascending=False)\n # c_count_pd = c_count.to_pandas()\n # c_count_pd.reindex()\n # c_count_pd[c + \"_new\"] = c_count_pd.index + 1\n # index_dict = dict(zip(c_count_pd[c], c_count_pd[c + \"_new\"]))\n # index_dicts.append(index_dict)\n # if isinstance(columns, str):\n # index_dicts = index_dicts[0]\n #\n # return index_dicts\n #\n # def add_value_features(self, columns, tbls, key=None, value=None, reindex_only=False):\n # \"\"\"\n # Add features based on columns and another key value table,\n # for each col in columns, it adds a value_col using key-value pairs from tbls, replace old\n # index with new one from key-value tbls if reindex_only is True.\n #\n # :param columns: a list of str\n # :param tbls: Table with only two columns [key, value]\n # :param key: str, name of key column in tbl, None while reindex_only\n # :param value: str, name of value column in tbl, None while reindex_only\n # :param reindex_only: boolean, if reindex only or add values\n #\n # :return: FeatureTable, dict\n # \"\"\"\n # if isinstance(columns, str):\n # columns = [columns]\n # assert isinstance(columns, list), \\\n # \"columns should be str or a list of str but get \" + type(columns)\n # if isinstance(tbls, Table):\n # tbls = [tbls]\n # assert isinstance(tbls, list), \\\n # \"tbls should be Table or a list of Tables get \" + type(tbls)\n #\n # if reindex_only:\n # assert len(columns) == len(tbls), \\\n # \"each column of columns should have one corresponding index table while reindex\"\n # else:\n # assert len(tbls) == 1, \\\n # \"all columns should share one index table while add value features\"\n #\n # def lookup(items, keyvalue_map):\n # getvalue = lambda item: keyvalue_map.get(item, 0)\n # if isinstance(items, int) or items is None:\n # values = getvalue(items)\n # elif isinstance(items, list) and isinstance(items[0], int):\n # values = [getvalue(item) for item in items]\n # elif isinstance(items, list) and isinstance(items[0], list) and isinstance(items[0][0],\n # int):\n # values = []\n # for line in items:\n # line_values = [getvalue(item) for item in line]\n # values.append(line_values)\n # else:\n # raise ValueError('only int, list[int], and list[list[int]] are supported.')\n # return values\n #\n # value_dims = {}\n # df = self.df\n # spark = OrcaContext.get_spark_session()\n # for i, c in enumerate(columns):\n # (index_tb, new_c) = (tbls[i], c) if reindex_only else (tbls[0], c.replace(key, value))\n # key_value = dict(index_tb.df.rdd.map(lambda row: (row[0], row[1])).collect())\n # key_value_bc = spark.sparkContext.broadcast(key_value)\n # col_type = df.schema[c].dataType\n # lookup_udf = udf(lambda x: lookup(x, key_value_bc.value), col_type)\n # df = df.withColumn(new_c, lookup_udf(pyspark_col(c)))\n # value_dims[c] = max(key_value.values()) + 1\n #\n # return FeatureTable(df), value_dims\n #\n # def add_value_features(self, key_cols, tbl, key, value):\n # \"\"\"\n # Add features based on key_cols and another key value table,\n # for each col in key_cols, it adds a value_col using key-value pairs from tbl\n #\n # :param key_cols: a list of str\n # :param tbl: Table with only two columns [key, value]\n # :param key: str, name of key column in tbl\n # :param value: str, name of value column in tbl\n #\n # :return: FeatureTable\n # \"\"\"\n # spark = OrcaContext.get_spark_session()\n # keyvalue_bc = spark.sparkContext.broadcast(dict(tbl.df.distinct().rdd.map(\n # lambda row: (row[0], row[1])).collect()))\n #\n # keyvalue_map = keyvalue_bc.value\n #\n # def gen_values(items):\n # getvalue = lambda item: keyvalue_map.get(item)\n # if isinstance(items, int):\n # values = getvalue(items)\n # elif isinstance(items, list) and isinstance(items[0], int):\n # values = [getvalue(item) for item in items]\n # elif isinstance(items, list) and isinstance(items[0], list) and isinstance(items[0][0],\n # int):\n # values = []\n # for line in items:\n # line_cats = [getvalue(item) for item in line]\n # values.append(line_cats)\n # else:\n # raise ValueError('only int, list[int], and list[list[int]] are supported.')\n # return values\n # df = self.df\n # for c in key_cols:\n # col_type = df.schema[c].dataType\n # cat_udf = udf(gen_values, col_type)\n # df = df.withColumn(c.replace(key, value), cat_udf(pyspark_col(c)))\n # return FeatureTable(df)\n #\n # def reindex(self, columns=[], index_dicts=[]):\n # \"\"\"\n # Replace the value using index_dicts for each col in columns, set 0 for default\n #\n # :param columns: str of a list of str\n # :param index_dicts: dict or list of dicts from int to int\n #\n # :return: FeatureTable and dimentionss of columns\n # \"\"\"\n # if isinstance(columns, str):\n # columns = [columns]\n # assert isinstance(columns, list), \\\n # \"columns should be str or a list of str, but get a \" + type(columns)\n # if isinstance(index_dicts, dict):\n # index_dicts = [index_dicts]\n # assert isinstance(index_dicts, list), \\\n # \"index_dicts should be dict or a list of dict, but get a \" + type(index_dicts)\n # assert len(columns) == len(index_dicts), \\\n # \"each column of columns should have one corresponding index_dict\"\n #\n # tbl = FeatureTable(self.df)\n # for i, c in enumerate(columns):\n # index_dict = index_dicts[i]\n # spark = OrcaContext.get_spark_session()\n # index_dict_bc = spark.sparkContext.broadcast(index_dict)\n # index_lookup = lambda x: index_dict_bc.value.get(x, 0)\n # tbl = tbl.apply(c, c, index_lookup, \"int\")\n # return tbl\n #\n # def gen_reindex_mapping(self, columns=[], freq_limit=10):\n # \"\"\"\n # Generate a mapping from old index to new one based on popularity count on descending order\n # :param columns: str or a list of str\n # :param freq_limit: int, dict or None. Indices with a count below freq_limit\n # will be omitted. Can be represented as either an integer or dict.\n # For instance, 15, {'col_4': 10, 'col_5': 2} etc. Default is 10,\n #\n # :return: a dictionary of list of dictionaries, a mapping from old index to new index\n # new index starts from 1, save 0 for default\n # \"\"\"\n # if isinstance(columns, str):\n # columns = [columns]\n # assert isinstance(columns, list), \\\n # \"columns should be str or a list of str, but get a \" + type(columns)\n # if isinstance(freq_limit, int):\n # freq_limit = {col: freq_limit for col in columns}\n # assert isinstance(freq_limit, dict),\\\n # \"freq_limit should be int or dict, but get a \" + type(freq_limit)\n # index_dicts = []\n # for c in columns:\n # c_count = self.select(c).group_by(c, agg={c: \"count\"}).rename(\n # {\"count(\" + c + \")\": \"count\"})\n # c_count = c_count.filter(pyspark_col(\"count\") >= freq_limit[c])\\\n # .order_by(\"count\", ascending=False)\n # c_count_pd = c_count.to_pandas()\n # c_count_pd.reindex()\n # c_count_pd[c + \"_new\"] = c_count_pd.index + 1\n # index_dict = dict(zip(c_count_pd[c], c_count_pd[c + \"_new\"]))\n # index_dicts.append(index_dict)\n # if isinstance(columns, str):\n # index_dicts = index_dicts[0]\n #\n # return index_dicts\n #\n # def test_add_value_features_reindex(self):\n # file_path = os.path.join(self.resource_path, \"friesian/feature/parquet/data1.parquet\")\n # feature_tbl = FeatureTable.read_parquet(file_path)\n # string_idx_list = feature_tbl.gen_string_idx([\"col_4\", \"col_5\"],\n # freq_limit={\"col_4\": 1, \"col_5\": 1},\n # order_by_freq=False)\n # tbl_with_index = feature_tbl.encode_string([\"col_4\", \"col_5\"], string_idx_list)\n # tbl_with_index.show(100)\n # index_dicts = tbl_with_index.gen_reindex_mapping([\"col_4\", \"col_5\"], 2)\n # tbls = []\n # for d in index_dicts:\n # dict_tbl = StringIndex.from_dict(d, \"tmp\").cast(\"tmp\", \"int\")\n # tbls.append(dict_tbl)\n # reidxed, _ = tbl_with_index.add_value_features([\"col_4\", \"col_5\"], tbls, reindex_only=True)\n # assert (reidxed.filter(col(\"col_4\") == 0).size() == 3)\n # assert (reidxed.filter(col(\"col_4\") == 1).size() == 2)\n # assert (reidxed.filter(col(\"col_5\") == 0).size() == 2)\n # assert (reidxed.filter(col(\"col_5\") == 1).size() == 3)\n #\n # def test_reindex(self):\n # file_path = os.path.join(self.resource_path, \"friesian/feature/parquet/data1.parquet\")\n # feature_tbl = FeatureTable.read_parquet(file_path)\n # string_idx_list = feature_tbl.gen_string_idx([\"col_4\", \"col_5\"],\n # freq_limit={\"col_4\": 1, \"col_5\": 1},\n # order_by_freq=False)\n # tbl_with_index = feature_tbl.encode_string([\"col_4\", \"col_5\"], string_idx_list)\n # index_dicts = tbl_with_index.gen_reindex_mapping([\"col_4\", \"col_5\"], 2)\n # reindexed = tbl_with_index.reindex([\"col_4\", \"col_5\"], index_dicts)\n # assert(reindexed.filter(col(\"col_4\") == 0).size() == 3)\n # assert(reindexed.filter(col(\"col_4\") == 1).size() == 2)\n # assert(reindexed.filter(col(\"col_5\") == 0).size() == 2)\n # assert(reindexed.filter(col(\"col_5\") == 1).size() == 3)\n\n\nif __name__ == '__main__':\n spark = SparkSession.builder.enableHiveSupport().appName('SparkByExamples.com').getOrCreate()\n conf = spark.sparkContext.getConf()\n print(\"8888\")\n print(conf)\n # create dataframe\n # createDFfromRdd(spark)\n # createDFfromList(spark)\n\n # filter dataframe\n # df = createDFfromList(spark)\n # df.orderBy(\"users_count\").filter(df[\"users_count\"] > 100).show()\n # df.filter(df.users_count > 100)\n # df.filter(col(\"users_count\") > 100)\n # df.filter(\"users_count = 3000\")\n # df.filter(\"users_count == 3000\")\n\n # window_functions(spark)\n # join_senarios(spark)\n # join_bucket(spark)\n join_skew(spark)\n # agg_functions(spark)\n\n # all kinds of udfs\n # udf_functions(spark)\n","sub_path":"src/pyspark/pyspark_experiences.py","file_name":"pyspark_experiences.py","file_ext":"py","file_size_in_byte":22051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"322914995","text":"from typing import List\n\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\nfrom rest_framework.request import Request\n\nfrom accesses.models import DataRight, build_data_rights\nfrom admin_cohort.models import User, JobStatus, NewJobStatus\nfrom cohort.models import CohortResult\nimport workspaces.conf_workspaces as conf_workspaces\nfrom workspaces.models import Account\nfrom exports import conf_exports\nfrom exports.emails import check_email_address\nfrom exports.models import ExportRequest, ExportRequestTable, \\\n VALIDATED_STATUS, ExportType\nfrom exports.permissions import can_review_transfer_jupyter, can_review_export\n\n\nclass ExportRequestTableSerializer(serializers.ModelSerializer):\n class Meta:\n model = ExportRequestTable\n fields = \"__all__\"\n read_only_fields = [\n \"export_request_table_id\",\n \"target_table_name\",\n \"source_table_name\",\n \"export_request\",\n \"deleted_at\",\n ]\n\n\ndef check_rights_on_perimeters_for_exports(rights: List[DataRight],\n export_type: str, is_nomi: bool):\n if is_nomi:\n wrong_perims = [r.care_site_id for r in rights\n if not r.right_read_patient_nominative]\n if len(wrong_perims) or len(rights) == 0:\n raise ValidationError(\n f\"L'utilisateur n'a pas le droit de lecture nominative \"\n f\"actuellement sur les périmètres {wrong_perims}.\"\n )\n else:\n wrong_perims = [r.care_site_id for r in rights\n if not r.right_read_patient_pseudo_anonymised]\n if len(wrong_perims) or len(rights) == 0:\n raise ValidationError(\n f\"L'utilisateur n'a pas le droit de lecture pseudonymisée \"\n f\"actuellement sur les périmètres {wrong_perims}.\"\n )\n\n if export_type in [ExportType.CSV.value]:\n if is_nomi:\n wrong_perims = [r.care_site_id for r in rights\n if not r.right_export_csv_nominative]\n else:\n wrong_perims = [r.care_site_id for r in rights\n if not r.right_export_csv_pseudo_anonymised]\n\n if len(wrong_perims) or len(rights) == 0:\n raise ValidationError(\n f\"Le provider n'a pas le droit d'export \"\n f\"{is_nomi and 'nominatif' or 'pseudonymisé'} \"\n f\"actuellement sur les périmètres {wrong_perims}.\"\n )\n\n if export_type in [ExportType.PSQL.value, ExportType.HIVE.value]:\n if is_nomi:\n wrong_perims = [r.care_site_id for r in rights\n if not r.right_transfer_jupyter_nominative]\n else:\n wrong_perims = [r.care_site_id for r in rights\n if not r.right_transfer_jupyter_pseudo_anonymised]\n\n if len(wrong_perims) or len(rights) == 0:\n raise ValidationError(\n f\"Le provider n'a pas le droit d'export jupyter \"\n f\"{is_nomi and 'nominatif' or 'pseudonymisé'} \"\n f\"actuellement sur les périmètres {wrong_perims}.\"\n )\n\n\nclass ReviewFilteredPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n q = super(ReviewFilteredPrimaryKeyRelatedField, self).get_queryset()\n creator = self.context.get('request', None).user\n if can_review_export(creator):\n return q\n else:\n return q.filter(owner=creator)\n\n\nclass ExportRequestSerializer(serializers.ModelSerializer):\n tables = ExportRequestTableSerializer(many=True, )\n cohort = ReviewFilteredPrimaryKeyRelatedField(\n queryset=CohortResult.objects.all(), source='cohort_fk')\n reviewer_fk = serializers.PrimaryKeyRelatedField(\n queryset=User.objects.all(), allow_null=True, allow_empty=True,\n required=False)\n cohort_id = serializers.IntegerField(required=False)\n # after database fusion\n # creator = ReducedUserSerializer(allow_null=True, read_only=True)\n # reviewer = ReducedUserSerializer(allow_null=True, read_only=True)\n\n class Meta:\n model = ExportRequest\n fields = \"__all__\"\n read_only_fields = [\n \"export_request_id\",\n \"request_datetime\",\n \"execution_request_datetime\",\n \"validation_request_datetime\",\n \"is_user_notified\",\n \"target_location\",\n \"target_name\",\n \"creator_id\",\n \"reviewer_id\",\n \"cleaned_at\",\n # Base\n \"insert_datetime\",\n \"update_datetime\",\n \"delete_datetime\",\n # Job\n \"request_job_id\",\n \"request_job_status\",\n \"new_request_job_status\",\n \"request_job_fail_msg\",\n \"request_job_duration\",\n \"review_request_datetime\",\n \"reviewer_fk\",\n # to deprecate\n \"status\",\n \"status_info\",\n ]\n extra_kwargs = {\n 'cohort': {'required': True},\n 'output_format': {'required': True},\n 'creator': {'required': True},\n 'owner': {'required': True},\n }\n\n def create_tables(self, tables, req):\n for table in tables:\n # table[\"export_request\"] = req #.id\n ExportRequestTable.objects.create(\n export_request=req, **table\n )\n\n def validate_owner_rights(self, validated_data):\n cont_req: Request = self.context.get('request')\n owner: User = validated_data.get('owner')\n perim_ids = list(map(int, conf_exports.get_cohort_perimeters(\n validated_data.get('cohort_fk').fhir_group_id,\n getattr(cont_req, 'jwt_session_key', None)\n )))\n\n rights = build_data_rights(owner, perim_ids)\n\n check_rights_on_perimeters_for_exports(\n rights, validated_data.get('output_format'),\n validated_data.get('nominative'))\n\n def create(self, validated_data):\n owner: User = validated_data.get('owner')\n check_email_address(owner)\n cohort: CohortResult = validated_data.get('cohort_fk')\n\n creator_is_reviewer = can_review_transfer_jupyter(\n self.context.get('request').user)\n\n if not creator_is_reviewer and cohort.owner.pk != owner.pk:\n raise ValidationError(\"The owner of the request does not own the \"\n \"Cohort requested\")\n\n if cohort.new_request_job_status != NewJobStatus.finished \\\n and cohort.request_job_status != JobStatus.FINISHED:\n raise ValidationError('The requested cohort has not successfully '\n 'finished.')\n validated_data['cohort_id'] = (validated_data.get('cohort_fk')\n .fhir_group_id)\n\n output_format = validated_data.get('output_format')\n validated_data['motivation'] = validated_data.get('motivation', \"\")\\\n .replace(\"\\n\", \" -- \")\n\n if output_format in [ExportType.HIVE, ExportType.PSQL]:\n self.validate_sql_hive(validated_data, creator_is_reviewer)\n else:\n self.validate_csv(validated_data)\n\n tables = validated_data.pop(\"tables\", [])\n req = super(ExportRequestSerializer, self).create(validated_data)\n\n self.create_tables(tables, req)\n try:\n from exports.tasks import launch_request\n launch_request.delay(req.id)\n except Exception as e:\n req.new_request_job_status = NewJobStatus.failed\n req.request_job_fail_msg = f\"INTERNAL ERROR: \" \\\n f\"Could not launch celery task: {e}\"\n return req\n\n def validate_sql_hive(self, validated_data, creator_is_reviewer: bool):\n target_unix_account: Account = validated_data.get(\n 'target_unix_account', None)\n if target_unix_account is None:\n raise ValidationError(\"Pour une demande d'export hive, il faut \"\n \"fournir target_unix_account\")\n\n owner = validated_data.get('owner')\n if creator_is_reviewer:\n validated_data['status'] = VALIDATED_STATUS\n validated_data['reviewer_fk'] = self.context.get('request').user\n else:\n if not conf_workspaces.is_user_bound_to_unix_account(\n owner, target_unix_account.aphp_ldap_group_dn):\n raise ValidationError(\n f\"Le compte Unix destinataire ({target_unix_account.pk}) \"\n f\"n'est pas lié à l'utilisateur voulu \"\n f\"({owner.pk})\")\n self.validate_owner_rights(validated_data)\n\n def validate_csv(self, validated_data):\n validated_data[\"status\"] = VALIDATED_STATUS\n creator: User = self.context.get('request').user\n\n if validated_data.get('owner').pk != creator.pk:\n raise ValidationError(\n f\"Dans le cas d'une demande d'export CSV, vous ne pouvez pas \"\n f\"générer de demande d'export pour un autre provider_id \"\n f\"que le vôtre. (vous êtes connecté.e en tant \"\n f\"que {creator.displayed_name}\")\n if not validated_data.get('nominative'):\n raise ValidationError(\n \"Actuellement, la demande d'export CSV en pseudo-anonymisée \"\n \"n'est pas possible.\"\n )\n\n self.validate_owner_rights(validated_data)\n\n def update(self, instance, validated_data):\n raise ValidationError(\"Update is not authorized. \"\n \"Please use urls /deny or /validate\")\n\n\nclass OwnedCohortPrimaryKeyRelatedField(serializers.PrimaryKeyRelatedField):\n def get_queryset(self):\n request = self.context.get('request', None)\n queryset = super(OwnedCohortPrimaryKeyRelatedField, self).get_queryset()\n if not request or not queryset:\n return None\n return queryset.filter(owner=request.user)\n\n\nclass ExportRequestSerializerNoReviewer(ExportRequestSerializer):\n cohort = OwnedCohortPrimaryKeyRelatedField(\n queryset=CohortResult.objects.all(), source='cohort_fk')\n\n\nclass AnnexeAccountSerializer(serializers.ModelSerializer):\n class Meta:\n model = Account\n fields = ('uid', 'username', 'name', 'firstname', 'lastname', 'gid',)\n read_only_fields = ('uid', 'username', 'name', 'firstname',\n 'lastname', 'gid',)\n\n\nclass AnnexeCohortResultSerializer(serializers.ModelSerializer):\n dated_measure = serializers.SlugRelatedField(read_only=True,\n slug_field='measure')\n\n class Meta:\n model = CohortResult\n fields = ('uuid', 'owner', 'name', 'description', 'dated_measure',\n 'created_at', 'request_job_status', 'fhir_group_id')\n read_only_fields = ('owner', 'name', 'description', 'dated_measure',\n 'created_at', 'request_job_status', 'fhir_group_id')\n","sub_path":"exports/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":11179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"428266462","text":"import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nimport regex as re\n\nclass cubereader():\n def __init__(self, fpath):\n self.nAtoms = 0\n self.origin = np.zeros((3,), dtype=float)\n self.nBasis = np.zeros((3,), dtype=int)\n self.mInc = np.zeros((3,3,), dtype=float)\n\n with open(fpath) as f:\n content = f.readlines()\n\n content = [x.strip() for x in content]\n content = content[2:]\n\n l = re.split('\\s+', content[0])\n #print(l)\n self.nAtoms = int(l[0])\n self.origin[0] = l[1]\n self.origin[1] = l[2]\n self.origin[2] = l[3]\n\n self.iAn = np.zeros((self.nAtoms,), dtype=int)\n self.iCh = np.zeros((self.nAtoms,), dtype=float)\n self.iCd = np.zeros((self.nAtoms,3), dtype=float)\n\n content = content[1:]\n\n for i in range(3):\n l = re.split('\\s+', content[i])\n #print(l)\n self.nBasis[i] = l[0]\n self.mInc[i,0] = l[1]\n self.mInc[i,1] = l[2]\n self.mInc[i,2] = l[3]\n\n self.mInc = self.mInc.transpose()\n #print(self.mInc)\n #test = np.array([1, 1, 1])\n #print(np.matmul(self.mInc, test))\n content = content[3:]\n\n for i in range(self.nAtoms):\n l = re.split('\\s+', content[i])\n #print(l)\n self.iAn[i] = l[0]\n self.iCh[i] = l[1]\n self.iCd[i,0] = l[2]\n self.iCd[i,1] = l[3]\n self.iCd[i,2] = l[4]\n\n content = content[self.nAtoms:]\n self.field = np.zeros((self.nBasis[0], self.nBasis[1], self.nBasis[2]), dtype=float)\n #print(self.nBasis)\n idx = np.zeros((3,), dtype=int)\n print(self.nBasis)\n for line in content:\n l = line.split(' ')\n for e in l:\n #print(idx[0], idx[1], idx[2], e)\n self.field[idx[0], idx[1], idx[2]] = e\n idx[2] += 1\n if idx[2] >= self.nBasis[2]:\n idx[2] = 0\n idx[1] += 1\n if idx[1] >= self.nBasis[1]:\n idx[1] = 0\n idx[0] += 1\n\n def inGrid(self, pos):\n inside = True\n pos = np.round(pos)\n #print(pos)\n #print(self.nBasis)\n if pos[0] < 0 or pos[0] > float(self.nBasis[0]):\n #print(pos[0], \"outside\", 0, float(self.nBasis[0]))\n inside = False\n if pos[1] < 0 or pos[1] > float(self.nBasis[1]):\n #print(pos[1], \"outside\", 0, float(self.nBasis[1]))\n inside = False\n if pos[2] < 0 or pos[2] > float(self.nBasis[2]):\n #print(pos[2], \"outside\", 0, float(self.nBasis[2]))\n inside = False\n return inside\n\n def getPos(self, basis, val, pos, ldir):\n newpos = np.zeros((3,), dtype=float)\n if basis < 0 or basis > 3:\n sys.exit(\"basis choice invalid\")\n k = (val - pos[basis]) / ldir[basis]\n newpos = pos + (k * ldir)\n #newpos[basis] = val\n #print(\"from\", basis, val, pos, ldir, \"new\", newpos)\n return newpos\n\n def validLine(self, pos, ldir):\n valid = False\n points = []\n #print(\"CHECKING LINE\", ldir)\n if not ldir[0] == 0:\n test1 = self.getPos(0, 0, pos, ldir)\n if self.inGrid(test1):\n valid = True\n points.append(test1)\n test2 = self.getPos(0, self.nBasis[0], pos, ldir)\n if self.inGrid(test2):\n valid = True\n points.append(test2)\n if not ldir[1] == 0:\n test3 = self.getPos(1, 0, pos, ldir)\n if self.inGrid(test3):\n valid = True\n points.append(test3)\n test4 = self.getPos(1, self.nBasis[1], pos, ldir)\n if self.inGrid(test4):\n valid = True\n points.append(test4)\n if not ldir[2] == 0:\n test5 = self.getPos(2, 0, pos, ldir)\n #print(test5)\n if self.inGrid(test5):\n valid = True\n points.append(test5)\n test6 = self.getPos(2, self.nBasis[2], pos, ldir)\n #print(test6)\n if self.inGrid(test6):\n valid = True\n points.append(test6)\n #print(\"line\", pos, ldir, \"crosses through: \", points)\n return valid, points\n\n def interpolate(self, pos):\n fpos = np.floor(pos)\n x = int(fpos[0])\n y = int(fpos[1])\n z = int(fpos[2])\n v000 = self.field[x, y, z]\n v001 = self.field[x, y, z+1]\n v010 = self.field[x, y+1, z]\n v011 = self.field[x, y+1, z+1]\n v100 = self.field[x+1, y, z]\n v101 = self.field[x+1, y, z+1]\n v110 = self.field[x+1, y+1, z]\n v111 = self.field[x+1, y+1, z+1]\n npos = pos - fpos\n\n #reduce x\n vx00 = v000 + npos[0] * (v100 - v000)\n vx01 = v001 + npos[0] * (v101 - v001)\n vx10 = v010 + npos[0] * (v110 - v010)\n vx11 = v011 + npos[0] * (v111 - v011)\n\n #reduce y\n vxy0 = vx00 + npos[1] * (vx10 - vx00)\n vxy1 = vx01 + npos[1] * (vx11 - vx01)\n\n #reduce z\n vxyz = vxy0 + npos[2] * (vxy1 - vxy0)\n\n return vxyz\n\n\n def plotline(self, pos, dir, res, interpolate=False):\n \"\"\"\n res is the resolution (if 0 then assume gridsize)\n \"\"\"\n if res == 0:\n res = self.nBasis[np.argmax(dir)]\n # transform the position and direction to fit the grid, move position\n # to edge of grid\n iInc = np.linalg.inv(self.mInc)\n tpos = np.matmul(iInc, pos) - self.origin\n tdir = np.matmul(iInc, dir) - self.origin\n\n #print(\"transdir\", tdir)\n\n validCheck = self.validLine(tpos, tdir)\n if not validCheck[0]:\n sys.exit(\"line choice invalid\")\n #print(validCheck)\n start = validCheck[1][0]\n end = validCheck[1][1]\n # hacky way to avoid dups\n if np.array_equal(start, end):\n end = validCheck[1][2]\n\n if not tdir[0] == 0:\n if tdir[0] / (end[0] - start[0]) < 0:\n tstart = end\n tend = start\n else:\n tstart = start\n tend = end\n elif not tdir[1] == 0:\n if tdir[1] / (end[1] - start[1]) < 0:\n tstart = end\n tend = start\n else:\n tstart = start\n tend = end\n elif not tdir[2] == 0:\n if tdir[2] / (end[2] - start[2]) < 0:\n tstart = end\n tend = start\n else:\n tstart = start\n tend = end\n else:\n sys.exit(\"direction invalid\")\n\n ndir = (tend - tstart) / res\n #print(tstart, tend)\n #print(ndir)\n\n # pick integer values of position each time\n # ppos = parameterized position\n ppos = np.arange(res)\n data = np.zeros(res)\n for i in ppos:\n ipos = tstart + (ndir * i)\n if not interpolate:\n ipos = np.rint(ipos)\n data[i] = self.field[int(ipos[0]), int(ipos[1]), int(ipos[2])]\n else:\n data[i] = self.interpolate(ipos)\n\n plt.plot(ppos, data)\n #plt.show()\n\n\n\n # plot vs z only\n #data = self.field[0,0,:]\n #print(data)\n #plt.plot(np.arange(self.nBasis[2]) * self.mInc[2,2], data)\n #plt.show()\n\n def plotaverageline(self, axis):\n #print(np.shape(self.field))\n #print(np.shape(np.average(self.field, axis=0)))\n if axis == 0:\n data = np.average(np.average(self.field, axis=1), axis=1)\n plt.plot(np.arange(self.nBasis[0]), data)\n if axis == 1:\n data = np.average(np.average(self.field, axis=0), axis=1)\n plt.plot(np.arange(self.nBasis[1]), data)\n if axis == 2:\n data = np.average(np.average(self.field, axis=0), axis=0)\n plt.plot(np.arange(self.nBasis[2]), data)\n #plt.show()\n\n def plotsimpleplane(self, axis, pos):\n if axis == 0:\n plt.matshow(self.field[pos,:,:], cmap='hot')\n if axis == 1:\n plt.matshow(self.field[:,pos,:], cmap='hot')\n if axis == 2:\n plt.matshow(self.field[:,:,pos], cmap='hot')\n plt.colorbar()\n plt.show()\n\n def get_ppd(self, vec):\n nz = 0 - vec[0] - vec[1]\n n = np.array([1, 1, nz])\n return n\n\n def plane_l2n(self, a, b, c, d):\n n = np.array([a, b, c])\n p = d / c\n r0 = np.array([0, 0, p])\n return r0, n\n\n def plane_l2v(self, a, b, c, d):\n n = np.array([a, b, c])\n u = self.get_ppd(n)\n v = np.cross(n, u)\n p = d / c\n r0 = np.array([0, 0, p])\n return r0, u, v\n\n def plane_n2l(self, r0, n):\n a = n[0]\n b = n[1]\n c = n[2]\n d = np.dot(r0, n)\n return a, b, c, d\n\n def plane_n2v(self, r0, n):\n u = self.get_ppd(n)\n v = np.cross(n, u)\n return r0, u, v\n\n def plane_v2n(self, r0, u, v):\n n = np.cross(u, v)\n return r0, n\n\n def plane_v2l(self, r0, u, v):\n n = np.cross(u, v)\n a = n[0]\n b = n[1]\n c = n[2]\n d = np.dot(n, r0)\n return a, b, c, d\n\n def plane_intersection(self, r1, n1, r2, n2):\n #print(\"getting intersection for\", n1, n2)\n (a1, b1, c1, d1) = self.plane_n2l(r1, n1)\n (a2, b2, c2, d2) = self.plane_n2l(r2, n2)\n #print(\"d\", d1, d2)\n point = np.zeros((3,), dtype=float)\n case = 0\n if np.count_nonzero(n1) == 1:\n #print(\"n1 is special case\")\n b1 = np.nonzero(n1)[0]\n #print(\"non zero at index\", b1)\n point[b1] = d1 / n1[b1]\n #print(point)\n case += b1 + 2\n if np.count_nonzero(n2) == 1:\n #print(\"n2 is special case\")\n b2 = np.nonzero(n2)[0]\n #print(\"non zero at index\", b2)\n point[b2] = d2 / n2[b2]\n #print(point)\n case += b2 + 5\n #print(\"case\", case)\n if case == 0:\n # standard scenario, no normal has zeros\n point[0] = 0\n point[1] = (c1 * d2 - d1 * c2) / (c1 * b2 - b1 * c2)\n point[2] = (d1 - b1 * y) / c1\n if case == 2:\n # x set\n if b2 == 0:\n point[1] = 0\n point[2] = (a1 * d2 - a2 * d1) / (a1 * c2)\n else:\n point[2] = 0\n point[1] = (a1 * d2 - a2 * d1) / (a1 * b2)\n if case == 3:\n # y set\n if a2 == 0:\n point[0] = 0\n point[2] = (b1 * d2 - b2 * d1) / (b1 * c2)\n else:\n point[2] = 0\n point[0] = (b1 * d2 - b2 * d1) / (b1 * a2)\n if case == 4:\n # z set\n if a2 == 0:\n point[0] = 0\n point[1] = (c1 * d2 - d1 * c2) / (c1 * b2)\n else:\n point[1] = 0\n point[0] = (c1 * d2 - d1 * c2) / (c1 * a2)\n if case == 5:\n # x set\n if b1 == 0:\n point[1] = 0\n point[2] = (a2 * d1 - a1 * d2) / (a2 * c1)\n else:\n point[2] = 0\n point[1] = (a2 * d1 - a1 * d2) / (a2 * b1)\n if case == 6:\n # y set\n if a1 == 0:\n point[0] = 0\n point[2] = (b2 * d1 - b1 * d2) / (b2 * c1)\n else:\n point[2] = 0\n point[0] = (b2 * d1 - b1 * d2) / (b2 * a1)\n if case == 7:\n # z set\n if a1 == 0:\n point[0] = 0\n point[1] = (c2 * d1 - d2 * c1) / (c2 * b1)\n else:\n point[1] = 0\n point[0] = (c2 * d1 - d2 * c1) / (c2 * a1)\n if case == 8:\n # x, y set\n point[2] = 0\n if case == 9:\n # x, z set\n point[1] = 0\n if case == 10:\n # y, z set\n point[0] = 0\n #print(\"final point\", point)\n return point\n\n def get_jk(self, r0, u, v, p):\n if u[0] == 0 and v[0] == 0:\n # use y, z equations to find j, k\n k = ((u[1] * (p[2] - r0[2])) - (u[2] * (p[1] - r0[1]))) / (u[1] * v[2] - u[2] * v[1])\n j = (p[1] - r0[1] - (k * v[1])) / u[1]\n elif u[1] == 0 and v[1] == 0:\n # use x, z equations to find j, k\n k = ((u[0] * (p[2] - r0[2])) - (u[2] * (p[0] - r0[0]))) / (u[0] * v[2] - u[2] * v[0])\n j = (p[0] - r0[0] - (k * v[0])) / u[0]\n else:\n # use x, y equations to find j, k\n k = ((u[0] * (p[1] - r0[1])) - (u[1] * (p[0] - r0[0]))) / (u[0] * v[1] - u[1] * v[0])\n j = (p[0] - r0[0] - (k * v[0])) / u[0]\n return j, k\n\n def normalize(self, vec):\n mag = (vec[0] ** 2 + vec[1] ** 2 + vec[2] ** 2) ** 0.5\n if mag == 0:\n sys.exit(\"cannot normalize a zero vector\")\n return vec / mag\n\n def plotplane(self, plane_type, arg1=None, arg2=None, arg3=None, arg4=None, res=0, interpolate=False):\n if plane_type == 1:\n (p0, u, v) = self.plane_n2v(arg1, arg2)\n elif plane_type == 2:\n p0 = arg1\n u = arg2\n v = arg3\n elif plane_type == 3:\n (p0, u, v) = self.plane_l2v(arg1, arg2, arg3, arg4)\n else:\n sys.exit(\"plane type invalid\")\n iInc = np.linalg.inv(self.mInc)\n t0 = np.matmul(iInc, p0) - self.origin\n tu = np.matmul(iInc, u) - self.origin\n tv = np.matmul(iInc, v) - self.origin\n tn = self.normalize(np.cross(tu, tv))\n print(\"plane equation:\", \"pos\", t0, \"u vec\", tu, \"v vec\", tv, \"normal\", tn)\n\n # now have the transformed equation of the plane\n # can think of cube as planes with boundaries\n # step 0: normalize direction vectors\n nu = self.normalize(tu)\n nv = self.normalize(tv)\n if res == 0:\n res = self.nBasis[np.argmax(tu)]\n\n # step 1: take each cube plane and find intersections between planes\n\n # face 1: xy plane, z = 0\n c1n = np.array([0, 0, 1])\n c1p = np.array([0, 0, 0])\n\n l1dir = np.cross(tn, c1n)\n l1pos = self.plane_intersection(c1p, c1n, t0, tn)\n\n # face 2: xy plane, z = self.nBasis[2]\n c2n = np.array([0, 0, 1])\n c2p = np.array([0, 0, self.nBasis[2]])\n\n l2dir = np.cross(tn, c2n)\n l2pos = self.plane_intersection(c2p, c2n, t0, tn)\n\n # face 3: xz plane, y = 0\n c3n = np.array([0, 1, 0])\n c3p = np.array([0, 0, 0])\n\n l3dir = np.cross(tn, c3n)\n l3pos = self.plane_intersection(c3p, c3n, t0, tn)\n\n # face 4: xz plane, y = self.nBasis[1]\n c4n = np.array([0, 1, 0])\n c4p = np.array([0, self.nBasis[1], 0])\n\n l4dir = np.cross(tn, c4n)\n l4pos = self.plane_intersection(c4p, c4n, t0, tn)\n\n # face 5: yz plane, x = 0\n c5n = np.array([1, 0, 0])\n c5p = np.array([0, 0, 0])\n\n l5dir = np.cross(tn, c5n)\n l5pos = self.plane_intersection(c5p, c5n, t0, tn)\n\n # face 6: yz plane, x = self.nBasis[0]\n c6n = np.array([1, 0, 0])\n c6p = np.array([self.nBasis[0], 0, 0])\n\n l6dir = np.cross(tn, c6n)\n l6pos = self.plane_intersection(c6p, c6n, t0, tn)\n\n # step 2: are these intersections in the boundaries?\n (l1valid, l1pts) = self.validLine(l1pos, l1dir)\n #print(\"line 1: \", l1pos, l1dir, \"validity:\", l1valid, l1pts)\n (l2valid, l2pts) = self.validLine(l2pos, l2dir)\n #print(\"line 2: \", l2pos, l2dir, \"validity:\", l2valid, l2pts)\n (l3valid, l3pts) = self.validLine(l3pos, l3dir)\n #print(\"line 3: \", l3pos, l3dir, \"validity:\", l3valid, l3pts)\n (l4valid, l4pts) = self.validLine(l4pos, l4dir)\n #print(\"line 4: \", l4pos, l4dir, \"validity:\", l4valid, l4pts)\n (l5valid, l5pts) = self.validLine(l5pos, l5dir)\n #print(\"line 5: \", l5pos, l5dir, \"validity:\", l5valid, l5pts)\n (l6valid, l6pts) = self.validLine(l6pos, l6dir)\n #print(\"line 6: \", l6pos, l6dir, \"validity:\", l6valid, l6pts)\n\n # step 3: get a square in the basis of the plane that encompasses the intersection completely\n intersection_list = l1pts + l2pts + l3pts + l4pts + l5pts + l6pts\n if not intersection_list:\n sys.exit(\"no intersection possible\")\n is_np = np.array(intersection_list)\n #print(\"intersection list = \", is_np)\n is_u = np.zeros((len(intersection_list)), dtype=float)\n is_v = np.zeros((len(intersection_list)), dtype=float)\n for i, val in enumerate(is_np):\n (is_u[i], is_v[i]) = self.get_jk(t0, nu, nv, val)\n\n # step 4: find the range of constants in the equation of the plane (v form)\n ##print(\"is_u\", is_u, \"is_v\", is_v)\n umin = np.amin(is_u)\n umax = np.amax(is_u)\n vmin = np.amin(is_v)\n vmax = np.amax(is_v)\n\n # step 5: loop through points and perform interpolation if necessary\n urange = umax - umin\n vrange = vmax - vmin\n if vrange >= urange:\n vinc = vrange / res\n uinc = vinc\n else:\n uinc = urange / res\n vinc = uinc\n vni = int(math.ceil(vrange / vinc))\n uni = int(math.ceil(urange / uinc))\n data = np.zeros((uni, vni), dtype=float)\n for iu in np.arange(uni):\n for iv in np.arange(vni):\n dpos = np.array([0.1, 0.1, 0.1])\n ipos = t0 + (nu * (umin + (iu * uinc))) + (nv * (vmin + (iv * vinc)))\n if not interpolate:\n ipos = np.rint(ipos)\n if self.inGrid(ipos+dpos):\n #print(ipos)\n data[iu, iv] = self.field[int(ipos[0]), int(ipos[1]), int(ipos[2])]\n else:\n data[iu, iv] = np.nan\n else:\n dpos = dpos * 11\n if self.inGrid(ipos+dpos):\n data[iu, iv] = self.interpolate(ipos)\n else:\n data[iu, iv] = np.nan\n\n plt.matshow(data, cmap='hot')\n plt.colorbar()\n plt.show()\n\n def add_cube(self, cube):\n #checks\n\n self.field = self.field + cube.field\n return\n\n\n\n\nif __name__ == '__main__':\n args = sys.argv\n if len(args) > 3:\n cr1 = cubereader(args[2])\n cr2 = cubereader(args[3])\n cr1.plotaverageline(2)\n cr2.plotaverageline(2)\n if args[1] == '-add':\n cr3 = cubereader(args[2])\n cr3.add_cube(cr2)\n cr3.plotaverageline(2)\n plt.show()\n if len(args) > 2:\n cr = cubereader(args[2])\n if args[1] == '-line':\n cr.plotline(np.array([0, 0, 0]), np.array([0, 0, 1]), 0)\n plt.show()\n if args[1] == '-plane':\n cr.plotsimpleplane(2, 0)\n elif len(args) > 1:\n cr = cubereader(args[1])\n if len(args) > 2:\n cq = cubereader(args[2])\n cr.add_cube(cq)\n elif len(args) == 1:\n sys.exit(\"no input file given\")\n #pos = np.array([0, 0, 0])\n #dir = np.array([0, 0, 1])\n #res = 0\n #cr.plotline(pos, dir, res)\n #cr.plotaverageline(2)\n #r0 = np.array([0, 0, 0])\n #n = np.array([0, 0, 1])\n #u = np.array([1, 0, 0])\n #v = np.array([0, 1, 0])\n #res = 200\n #cr.plotplane(2, arg1=r0, arg2=u, arg3=v, res=res, interpolate=True)\n #cr.plotsimpleplane(2, 0)\n","sub_path":"environ/cubereader.py","file_name":"cubereader.py","file_ext":"py","file_size_in_byte":19773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"222600340","text":"import cv2\nimport numpy as np\nimport copy\nfrom preprocess import *\n\ndef line_segment(image,not_bin_image):\n im = image.copy()\n im_2 = not_bin_image.copy()\n bounds = []\n horiz_hist = np.count_nonzero(image, axis=1)\n i = 0\n while(i 4):\n im_2[i+int(count/2),:] = 0\n bounds.append(i+int(count/2))\n i+=j\n else:\n i+=1\n line = 1\n result_lines = []\n result_lines_not_bin = []\n for i in range(len(bounds)-1):\n result_lines.append(im[bounds[i]+1:bounds[i+1],:])\n \n result_lines_not_bin.append(im_2[bounds[i]+1:bounds[i+1],:])\n # cv2.imwrite(\"line\"+str(line)+\".png\",im_2[bounds[i]+1:bounds[i+1],:])\n line+=1\n return result_lines,result_lines_not_bin,line-1\n\ndef word_segment(image,not_bin_image,threshold,scale):\n (ys , xs )= image.shape\n img = cv2.resize(image,(xs*scale,ys*scale), interpolation=cv2.INTER_AREA)\n not_bin_image = cv2.resize(not_bin_image, (xs*scale, ys*scale), interpolation=cv2.INTER_AREA)\n im = img.copy()\n im_2 = not_bin_image.copy()\n vert_hist = np.count_nonzero(im > 127, axis=0)\n \n bounds = []\n k = 0\n while(k threshold):\n im[:,k+int(count/2)] = 255\n im_2[:,k+int(count/2)] = 0\n bounds.append(k+int(count/2))\n k+=j\n else:\n k+=1\n word = 1\n result_words = []\n for i in range(len(bounds)-1):\n result = im[:,bounds[i]+1:bounds[i+1]]\n result = im_2[:,bounds[i]+1:bounds[i+1]]\n result_words.append(result)\n word += 1\n\n return result_words,word-1\n\n\n\ndef word_seg(clean_img,clean_img_not_bin):\n lines, not_bin_lines, size_lines = line_segment(clean_img,clean_img_not_bin)\n all_words = []\n count = 0 \n for j in range(size_lines):\n words,size_words = word_segment(lines[j],not_bin_lines[j],10,5)# 1--> threshold : hyperparamter\n words = words[::-1]\n all_words += words\n return all_words\n","sub_path":"word_segment.py","file_name":"word_segment.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"326931933","text":"# -----------------------------------------------------------------------------------\r\n# rt4x4MatrixToTRS Version 1.0\r\n# Author: Ryan Trowbridge\r\n# Contact: admin@rtrowbridge.com \r\n#\r\n# Please give credit where credit is due this is written by Ryan Trowbridge\r\n# with lots of experamentation to try to get python to work correctly.\r\n# \r\n# This node is similar to a combination of two nodes that ship with Maya called fourByFourMatrix and the decomposeMatrix\r\n# It also has the attributes parentInverseMatrix, normalize, and offset TRS attributes\r\n# You can technicaly do what this node does with only Maya nodes but you would have to use several to mimic this one node\r\n#\r\n# To use this node:\r\n# Load this script as a plugin using the tool under the Main Menu Window\\Settings Preferences\\Plug-in Manager\r\n# Open the hypershade and navigate to general utilities\r\n# Select the rt4x4MatrixToTRS utility node in the menu to create it\r\n#\r\n# The user can connect, drive, or use expressions to create the 16 floats of a matrix for the input\r\n# The output of this node is the 16 floats turned into a matrix and converted into translate, euler rotation, and scale\r\n# There is a normalize option also that forces the matrix rotation vectors to be of unit length\r\n# -----------------------------------------------------------------------------------\t\r\n\r\nimport sys\r\nimport math\r\nimport maya.OpenMaya as OpenMaya\r\nimport maya.OpenMayaMPx as OpenMayaMPx\r\n\r\ndebug = False\r\n\r\n# -----------------------------------------------------------------------------------\r\n# define the node type name\r\n# define the node class\r\n# define the node unique id ( use: cmds.getClassification( 'nodeName' ) to find a nodes class)\r\n# -----------------------------------------------------------------------------------\r\nkMatrixUtilNodeTypeName = \"rt4x4MatrixToTRS\"\r\nkMatrixUtilNodeClassify = \"utility/general\"\r\nkMatrixUtilNodeId = OpenMaya.MTypeId(0x87105)\r\n#this is a non commercial plugin id, I might release one with a commercial id if requested\r\n\r\n\r\n# define a new matrixUtilNode class derived from the MPxNode class\r\nclass rtMatrixUtilNode(OpenMayaMPx.MPxNode):\r\n\r\n\t# class variables\r\n\t\r\n\tparentInverseMatrix = OpenMaya.MObject()\r\n\t\r\n\tmatrixIn = OpenMaya.MObject()\r\n\t\r\n\tin00 = OpenMaya.MObject()\r\n\tin01 = OpenMaya.MObject()\r\n\tin02 = OpenMaya.MObject()\r\n\tin03 = OpenMaya.MObject()\r\n\t\r\n\tin10 = OpenMaya.MObject()\r\n\tin11 = OpenMaya.MObject()\r\n\tin12 = OpenMaya.MObject()\r\n\tin13 = OpenMaya.MObject()\r\n\t\r\n\tin20 = OpenMaya.MObject()\r\n\tin21 = OpenMaya.MObject()\r\n\tin22 = OpenMaya.MObject()\r\n\tin23 = OpenMaya.MObject()\r\n\t\r\n\tin30 = OpenMaya.MObject()\r\n\tin31 = OpenMaya.MObject()\r\n\tin32 = OpenMaya.MObject()\r\n\tin33 = OpenMaya.MObject()\r\n\t\t\r\n\tout_t = OpenMaya.MObject()\r\n\tout_r = OpenMaya.MObject()\r\n\tout_s = OpenMaya.MObject()\r\n\t\r\n\toffset_t = OpenMaya.MObject()\r\n\toffset_r = OpenMaya.MObject()\r\n\toffset_s = OpenMaya.MObject()\r\n\t\r\n\teulRotateOrder = OpenMaya.MObject()\r\n\tnormalize = OpenMaya.MObject()\r\n\r\n\tdef __init__(self):\r\n\t\tOpenMayaMPx.MPxNode.__init__(self)\r\n\t\t\r\n\t# arguments ( self, MPlug, MDataBlock) \r\n\tdef compute(self, plug, dataBlock):\r\n\t\t\r\n\t\t# if these attributes are requested, recompute their values\r\n\t\tif plug == rtMatrixUtilNode.out_t or \\\r\n\t\t\trtMatrixUtilNode.out_r or rtMatrixUtilNode.out_s:\r\n\t\t\r\n\t\t\tif debug:\r\n\t\t\t\t# print to the output window if in compute()\r\n\t\t\t\tsys.__stdout__.write( \"##compute()\\n\" )\r\n\r\n\t\t\t# get MDataHandle's to attributes\r\n\t\t\t#\r\n\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\tpim_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.parentInverseMatrix )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue parentInverseMatrix\" )\r\n\t\t\t\traise\r\n\t\t\t\r\n\t\t\t###vector X\r\n\t\t\ttry:\r\n\t\t\t\tin00_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in00 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in00\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin01_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in01 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in01\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin02_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in02 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in02\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin03_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in03 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in03\" )\r\n\t\t\t\traise\r\n\t\t\t\r\n\t\t\t###vector Y\r\n\t\t\ttry:\r\n\t\t\t\tin10_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in10 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in10\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin11_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in11 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in11\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin12_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in12 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in12\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin13_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in13 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in13\" )\r\n\t\t\t\traise\r\n\t\t\t\r\n\t\t\t###vector Z\r\n\t\t\ttry:\r\n\t\t\t\tin20_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in20 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in20\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin21_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in21 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in21\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin22_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in22 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in22\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin23_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in23 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in23\" )\r\n\t\t\t\traise\r\n\t\t\t\r\n\t\t\t###vector T\r\n\t\t\ttry:\r\n\t\t\t\tin30_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in30 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in30\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin31_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in31 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in31\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin32_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in32 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in32\" )\r\n\t\t\t\traise\r\n\t\t\ttry:\r\n\t\t\t\tin33_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.in33 )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue in33\" )\r\n\t\t\t\traise\r\n\t\t\t\t\r\n\t\t\t###offset trs handles\r\n\t\t\ttry:\r\n\t\t\t\toffset_t_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.offset_t )\t\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue offset_t\" )\r\n\t\t\t\traise\r\n\r\n\t\t\ttry:\r\n\t\t\t\toffset_r_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.offset_r )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue offset_r\" )\r\n\t\t\t\traise\r\n\r\n\t\t\ttry:\r\n\t\t\t\toffset_s_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.offset_s )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue offset_s\" )\r\n\t\t\t\traise\r\n\t\t\t\t\r\n\t\t\t### user option attributes\r\n\t\t\ttry:\r\n\t\t\t\tnormalize_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.normalize)\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue normalize\" )\r\n\t\t\t\traise\r\n\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\trotOrder_dataHandle = dataBlock.inputValue( rtMatrixUtilNode.eulRotateOrder )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue eulRotateOrder\" )\r\n\t\t\t\traise\r\n\t\t\t\r\n\t\t\t###trs handles\r\n\t\t\ttry:\r\n\t\t\t\tot_dataHandle = dataBlock.outputValue( rtMatrixUtilNode.out_t )\t\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue out_t\" )\r\n\t\t\t\traise\r\n\t\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\tor_dataHandle = dataBlock.outputValue( rtMatrixUtilNode.out_r )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue out_r\" )\r\n\t\t\t\traise\r\n\t\t\t\t\r\n\t\t\ttry:\r\n\t\t\t\tos_dataHandle = dataBlock.outputValue( rtMatrixUtilNode.out_s )\r\n\t\t\texcept:\r\n\t\t\t\tsys.stderr.write( \"Failed to get inputValue out_s\" )\r\n\t\t\t\traise\r\n\t\t\t\t\r\n\t\t\t\t\r\n\t\t\t# get values from dataHandles\r\n\t\t\t#\r\n\t\t\tpInvMatrix_value = pim_dataHandle.asMatrix()\r\n\t\t\t\r\n\t\t\tin00_value = in00_dataHandle.asFloat()\r\n\t\t\tin01_value = in01_dataHandle.asFloat()\r\n\t\t\tin02_value = in02_dataHandle.asFloat()\r\n\t\t\tin03_value = in03_dataHandle.asFloat()\r\n\t\t\t\r\n\t\t\tin10_value = in10_dataHandle.asFloat()\r\n\t\t\tin11_value = in11_dataHandle.asFloat()\r\n\t\t\tin12_value = in12_dataHandle.asFloat()\r\n\t\t\tin13_value = in13_dataHandle.asFloat()\r\n\t\t\t\r\n\t\t\tin20_value = in20_dataHandle.asFloat()\r\n\t\t\tin21_value = in21_dataHandle.asFloat()\r\n\t\t\tin22_value = in22_dataHandle.asFloat()\r\n\t\t\tin23_value = in23_dataHandle.asFloat()\r\n\t\t\t\r\n\t\t\tin30_value = in30_dataHandle.asFloat()\r\n\t\t\tin31_value = in31_dataHandle.asFloat()\r\n\t\t\tin32_value = in32_dataHandle.asFloat()\r\n\t\t\tin33_value = in33_dataHandle.asFloat()\r\n\t\t\t\r\n\t\t\tnormalize_value = normalize_dataHandle.asBool()\r\n\t\t\t\r\n\t\t\toffset_t_value = offset_t_dataHandle.asFloatVector()\r\n\t\t\toffset_r_value = offset_r_dataHandle.asFloatVector()\r\n\t\t\toffset_s_value = offset_s_dataHandle.asFloatVector()\r\n\r\n\t\t\t\r\n\t\t\t# Note there is a Maya Python bug with enum attributes\r\n\t\t\t# you must use MDataHandle.asShort() to get the proper value\r\n\t\t\trotOrder_value = rotOrder_dataHandle.asShort()\r\n\t\t\t\r\n\t\t\t# get the matrix as vectors\r\n\t\t\tx_vector = OpenMaya.MVector(in00_value, in01_value, in02_value)\r\n\t\t\ty_vector = OpenMaya.MVector(in10_value, in11_value, in12_value)\r\n\t\t\tz_vector = OpenMaya.MVector(in20_value, in21_value, in22_value)\r\n\t\t\t\r\n\t\t\t# user option for normalizing the matrix rotation vectors\r\n\t\t\tif normalize_value:\r\n\t\t\t\tx_vector.normalize()\r\n\t\t\t\ty_vector.normalize()\r\n\t\t\t\tz_vector.normalize()\r\n\t\t\t\r\n\t\t\t# create a matrix from the input values\r\n\t\t\tgetMatrix = OpenMaya.MMatrix()\r\n\t\t\tmatrixList = (x_vector.x, x_vector.y, x_vector.z, in03_value,\r\n\t\t\t\t\ty_vector.x, y_vector.y, y_vector.z, in13_value,\r\n\t\t\t\t\tz_vector.x, z_vector.y, z_vector.z, in23_value,\r\n\t\t\t\t\tin30_value, in31_value, in32_value, in33_value)\r\n\t\t\t\t\t\t\r\n\t\t\tOpenMaya.MScriptUtil().createMatrixFromList(matrixList, getMatrix)\r\n\t\t\t\r\n\t\t\tif debug:\r\n\t\t\t\t#print matrix values\r\n\t\t\t\tsys.__stdout__.write(\"matrix out: ( \" + str(x_vector.x) + \", \" + str(x_vector.y) + \", \" + str(x_vector.z) + \", \" + str(0.0) + \", \\n\" + \\\r\n\t\t\t\t\t\t\t\tstr(y_vector.x) + \", \" + str(y_vector.y) + \", \" + str(y_vector.z) + \", \" + str(0.0) + \", \\n\" + \\\r\n\t\t\t\t\t\t\t\tstr(z_vector.x) + \", \" + str(z_vector.y) + \", \" + str(z_vector.z) + \", \" + str(0.0) + \", \\n\" + \\\r\n\t\t\t\t\t\t\t\tstr(in30_value) + \", \" + str(in31_value) + \", \" + str(in32_value) + \", \" + str(1.0) + \" )\\n\" )\r\n\t\r\n\t\t\t# Multiply parentInverseMatrix by the matrix created by the user\r\n\t\t\tfinalMatrix = ( getMatrix * pInvMatrix_value )\r\n\t\r\n\t\t\t# MTransformationMatrix\r\n\t\t\tmTM = OpenMaya.MTransformationMatrix( finalMatrix)\r\n\t\t\t\r\n\t\t\t# Get the translation\r\n\t\t\ttrans = mTM.getTranslation( OpenMaya.MSpace.kTransform )\r\n\t\t\t\r\n\t\t\t# Get the rotation\r\n\t\t\tmquat = mTM.rotation()\r\n\t\t\trot = mquat.asEulerRotation()\r\n\t\t\trot.reorderIt( rotOrder_value )\r\n\t\t\t\r\n\t\t\t# Get the scale\r\n\t\t\tscaleDoubleArray = OpenMaya.MScriptUtil()\r\n\t\t\tscaleDoubleArray.createFromList( [0.0, 0.0, 0.0], 3 )\r\n\t\t\tscaleDoubleArrayPtr = scaleDoubleArray.asDoublePtr()\r\n\t\t\t\r\n\t\t\tmTM.getScale( scaleDoubleArrayPtr, OpenMaya.MSpace.kTransform)\r\n\t\t\t\t\t\t\r\n\t\t\tx_scale = OpenMaya.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 0 )\r\n\t\t\ty_scale = OpenMaya.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 1 )\r\n\t\t\tz_scale = OpenMaya.MScriptUtil().getDoubleArrayItem( scaleDoubleArrayPtr, 2 )\r\n\t\t\t\r\n\t\t\tif debug:\r\n\t\t\t\t# print to the output window the output values that are to be set\r\n\t\t\t\tsys.__stdout__.write( \"rotate order: \" + str( rotOrder_value ) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"normalize: \" + str( normalize_value ) + \"\\n\" )\r\n\t\t\t\t\r\n\t\t\t\tsys.__stdout__.write( \"trans.x: \" + str(trans.x) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"trans.y: \" + str(trans.y) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"trans.z: \" + str(trans.z) + \"\\n\" )\r\n\r\n\t\t\t\tsys.__stdout__.write( \"rot.x: \" + str(math.degrees( rot.x )) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"rot.y: \" + str(math.degrees( rot.y )) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"rot.z: \" + str(math.degrees( rot.z )) + \"\\n\" )\r\n\t\t\t\t\r\n\t\t\t\tsys.__stdout__.write( \"x_scale: \" + str(x_scale) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"y_scale: \" + str(y_scale) + \"\\n\" )\r\n\t\t\t\tsys.__stdout__.write( \"z_scale: \" + str(z_scale) + \"\\n\" )\r\n\r\n\t\t\t# get the final result vectors as a MFloatVector\r\n\t\t\tresultTrans = OpenMaya.MFloatVector(trans.x, trans.y, trans.z)\r\n\t\t\tresultRot = OpenMaya.MFloatVector(math.degrees( rot.x ), math.degrees( rot.y ), math.degrees( rot.z ))\r\n\t\t\tresultScale = OpenMaya.MFloatVector( x_scale, y_scale, z_scale)\r\n\r\n\t\t\t# set the output trs values\r\n\t\t\tot_dataHandle.setMFloatVector( (resultTrans + offset_t_value) )\r\n\t\t\tor_dataHandle.setMFloatVector( (resultRot + offset_r_value) )\r\n\t\t\tos_dataHandle.setMFloatVector( (resultScale + offset_s_value) )\r\n\t\t\t\r\n\t\t\t# set the plug clean so maya knows it can update\r\n\t\t\tdataBlock.setClean(plug)\r\n\t\r\n\t\telse:\r\n\t\t\treturn OpenMaya.kUnknownParameter\r\n\t\t\r\n\t\treturn OpenMaya.MStatus.kSuccess\r\n\t\t\t\r\ndef nodeCreator():\r\n\r\n\treturn OpenMayaMPx.asMPxPtr( rtMatrixUtilNode() )\r\n\r\n# create and initialize the attributes to the node\r\ndef nodeInitializer():\r\n\r\n\tnAttr = OpenMaya.MFnNumericAttribute()\r\n\teAttr = OpenMaya.MFnEnumAttribute()\r\n\tnMAttr = OpenMaya.MFnMatrixAttribute()\r\n\tcAttr = OpenMaya.MFnCompoundAttribute()\r\n\t\r\n\t# create input attributes\r\n\t#\r\n\t\r\n\trtMatrixUtilNode.parentInverseMatrix = nMAttr.create( \"parentInverseMatrix\", \"pim\", OpenMaya.MFnMatrixAttribute.kDouble )\r\n\tnMAttr.setWritable(True)\r\n\tnMAttr.setStorable(True)\r\n\tnMAttr.setReadable(True)\r\n\tnMAttr.setKeyable(True)\r\n\t\r\n\t# Vector X\r\n\trtMatrixUtilNode.in00 = nAttr.create(\"in00\", \"i00\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.in01 = nAttr.create(\"in01\", \"i01\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.in02 = nAttr.create(\"in02\", \"i02\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.in03 = nAttr.create(\"in03\", \"i03\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\t# Vector Y\r\n\trtMatrixUtilNode.in10 = nAttr.create(\"in10\", \"i10\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\trtMatrixUtilNode.in11 = nAttr.create(\"in11\", \"i11\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\trtMatrixUtilNode.in12 = nAttr.create(\"in12\", \"i12\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.in13 = nAttr.create(\"in13\", \"i13\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\t# Vector Z\r\n\trtMatrixUtilNode.in20 = nAttr.create(\"in20\", \"i20\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\trtMatrixUtilNode.in21 = nAttr.create(\"in21\", \"i21\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\trtMatrixUtilNode.in22 = nAttr.create(\"in22\", \"i22\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.in23 = nAttr.create(\"in23\", \"i23\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\t# Vector T\r\n\trtMatrixUtilNode.in30 = nAttr.create(\"in30\", \"i30\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\trtMatrixUtilNode.in31 = nAttr.create(\"in31\", \"i31\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\trtMatrixUtilNode.in32 = nAttr.create(\"in32\", \"i32\", OpenMaya.MFnNumericData.kFloat, 0.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.in33 = nAttr.create(\"in33\", \"i33\", OpenMaya.MFnNumericData.kFloat, 1.0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\r\n\t# create TRS offset attributes\r\n\t#\r\n\trtMatrixUtilNode.offset_t = nAttr.createPoint(\"offsetTranslate\", \"oft\" )\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.offset_r = nAttr.createPoint(\"offsetRotate\", \"ofr\" )\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\trtMatrixUtilNode.offset_s = nAttr.createPoint(\"offsetScale\", \"ofs\" )\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\t\r\n\t\r\n\t# create TRS output attributes\r\n\t#\r\n\trtMatrixUtilNode.out_t = nAttr.createPoint(\"outputTranslate\", \"ot\" )\r\n\tnAttr.setWritable(False)\r\n\tnAttr.setStorable(False)\r\n\tnAttr.setReadable(True)\r\n\t\r\n\trtMatrixUtilNode.out_r = nAttr.createPoint(\"outputRotate\", \"or\" )\r\n\tnAttr.setWritable(False)\r\n\tnAttr.setStorable(False)\r\n\tnAttr.setReadable(True)\r\n\t\r\n\trtMatrixUtilNode.out_s = nAttr.createPoint(\"outputScale\", \"os\" )\r\n\tnAttr.setWritable(False)\r\n\tnAttr.setStorable(False)\r\n\tnAttr.setReadable(True)\r\n\t\r\n\t# create rotate order enum attribute\r\n\trtMatrixUtilNode.eulRotateOrder = eAttr.create( \"eulerRotateOrder\", \"ero\", 0 )\r\n\teAttr.addField(\"XYZ\", 0)\r\n\teAttr.addField(\"YZX\", 1)\r\n\teAttr.addField(\"ZXY\", 2)\r\n\teAttr.addField(\"XZY\", 3)\r\n\teAttr.addField(\"YXZ\", 4)\r\n\teAttr.addField(\"ZYX\", 5)\r\n\teAttr.setWritable(True)\r\n\teAttr.setStorable(True)\r\n\teAttr.setReadable(True)\r\n\teAttr.setKeyable(False)\r\n\t\r\n\t# create normalize attribute\r\n\trtMatrixUtilNode.normalize = nAttr.create(\"normalize\", \"n\", OpenMaya.MFnNumericData.kBoolean , 0)\r\n\tnAttr.setWritable(True)\r\n\tnAttr.setStorable(True)\r\n\tnAttr.setReadable(True)\r\n\tnAttr.setKeyable(True)\r\n\r\n\t# create compound attribute\r\n\t#\r\n\trtMatrixUtilNode.matrixIn = cAttr.create( \"matrixIn\", \"mi\" )\r\n\t\r\n\tcAttr.addChild( rtMatrixUtilNode.in00 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in01 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in02 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in03 )\r\n\t\r\n\tcAttr.addChild( rtMatrixUtilNode.in10 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in11 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in12 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in13 )\r\n\t\r\n\tcAttr.addChild( rtMatrixUtilNode.in20 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in21 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in22 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in23 )\r\n\t\r\n\tcAttr.addChild( rtMatrixUtilNode.in30 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in31 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in32 )\r\n\tcAttr.addChild( rtMatrixUtilNode.in33 )\r\n\r\n\r\n\t# add attribues\r\n\t#\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.normalize )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.eulRotateOrder )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.parentInverseMatrix )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.matrixIn )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.offset_t )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.offset_r )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.offset_s )\r\n\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.out_t )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.out_r )\r\n\trtMatrixUtilNode.addAttribute( rtMatrixUtilNode.out_s )\t\r\n\t\r\n\t# Setup which attributes affect each other\t\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.matrixIn, rtMatrixUtilNode.out_t )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.normalize, rtMatrixUtilNode.out_t )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.eulRotateOrder, rtMatrixUtilNode.out_t )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.parentInverseMatrix, rtMatrixUtilNode.out_t )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.offset_t, rtMatrixUtilNode.out_t )\r\n\t\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.matrixIn, rtMatrixUtilNode.out_r )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.normalize, rtMatrixUtilNode.out_r )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.eulRotateOrder, rtMatrixUtilNode.out_r )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.parentInverseMatrix, rtMatrixUtilNode.out_r )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.offset_r, rtMatrixUtilNode.out_r )\r\n\t\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.matrixIn, rtMatrixUtilNode.out_s )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.normalize, rtMatrixUtilNode.out_s )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.eulRotateOrder, rtMatrixUtilNode.out_s )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.parentInverseMatrix, rtMatrixUtilNode.out_s )\r\n\trtMatrixUtilNode.attributeAffects ( rtMatrixUtilNode.offset_s, rtMatrixUtilNode.out_s )\r\n\t\r\n\t\r\n# initialize the script plug-in\r\ndef initializePlugin(mobject):\r\n\tmplugin = OpenMayaMPx.MFnPlugin(mobject, \"Autodesk\", \"1.0\", \"Any\")\r\n\ttry:\r\n\t\tmplugin.registerNode( kMatrixUtilNodeTypeName, kMatrixUtilNodeId, nodeCreator, nodeInitializer, OpenMayaMPx.MPxNode.kDependNode, kMatrixUtilNodeClassify)\r\n\texcept:\r\n\t\tsys.stderr.write( \"Failed to register node: %s\" % kMatrixUtilNodeTypeName )\r\n\t\traise\r\n\r\n\r\n# uninitialize the script plug-in\r\ndef uninitializePlugin(mobject):\r\n\tmplugin = OpenMayaMPx.MFnPlugin(mobject)\r\n\ttry:\r\n\t\tmplugin.deregisterNode( kMatrixUtilNodeId )\r\n\texcept:\r\n\t\tsys.stderr.write( \"Failed to deregister node: %s\" % kMatrixUtilNodeTypeName )\r\n\t\traise\r\n","sub_path":"doc/source/rt4x4MatrixToTRS/rt4x4MatrixToTRS.py","file_name":"rt4x4MatrixToTRS.py","file_ext":"py","file_size_in_byte":21780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"336446026","text":"import os\nfrom logging import getLogger\n\nimport docopt\nfrom anybadge import Badge\n\nfrom cosmic_ray.config import ConfigDict, load_config\nfrom cosmic_ray.tools.survival_rate import survival_rate\nfrom cosmic_ray.work_db import use_db, WorkDB\n\nlog = getLogger()\n\n\ndef generate_badge():\n \"\"\"cr-badge\n\nUsage: cr-badge [--config ] \n\nGenerate badge file.\n\noptions:\n --config Configuration file to use instead of session configuration\n\"\"\"\n\n arguments = docopt.docopt(generate_badge.__doc__, version='cr-format 0.1')\n config_file = arguments['--config']\n badge_filename = arguments['']\n\n with use_db(arguments[''], WorkDB.Mode.open) as db:\n assert isinstance(db, WorkDB)\n if config_file:\n config = load_config(config_file)\n else:\n config = db.get_config()\n\n percent = 100 - survival_rate(db)\n\n badge = Badge(\n label=config.badge_label,\n value=percent,\n value_format=config.badge_format,\n thresholds=config.badge_thresholds,\n )\n\n log.info((\"Generating badge: \" + config.badge_format) % percent)\n\n try:\n os.unlink(badge_filename)\n except OSError:\n pass\n\n badge.write_badge(badge_filename)\n","sub_path":"src/cosmic_ray/tools/badge.py","file_name":"badge.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"373571133","text":"from django.conf.urls import patterns, url\nfrom issue.views import IssueViewSet\n\nissue_list = IssueViewSet.as_view({\n 'get': 'list',\n 'post': 'create'\n})\n\nissue_detail = IssueViewSet.as_view({\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy'\n})\n\nurlpatterns = patterns(\n '',\n url(r'^$', issue_list, name='issue-list'),\n url(r'^(?P\\d+)$', issue_detail, name='issue-detail'),\n)\n","sub_path":"django-apps/placed_backend/issue/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"321081258","text":"import unittest\r\nfrom common import huibenpara\r\nfrom common import commonfunction\r\nfrom interface import getCoursepage_test\r\n\r\n\r\n\r\nclass allCourses(unittest.TestCase):\r\n ''' 课程详情界面 '''\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n cls.url= huibenpara.host + '/huiben/client/course/getCourseInfo'\r\n cls.before_result=getCoursepage_test.allcourses().setUpClass()['data']['list']\r\n cls.api_results=commonfunction.setupRequest().get_result(cls.url, cls.before_result,\"id\",\r\n {'token': 'fb65286c559bf899', 'userId': '215630', 'isVip': '1'}, huibenpara.base_para)\r\n #print(cls.api_results)\r\n return cls.api_results\r\n\r\n def test_baseinfo(self):\r\n ''' 校验课程基本信息\"id\",\"price\",\"vipPrice\",\"discountPrice\",\"isVip\" '''\r\n self.basedata = commonfunction.setupBasedata().get_basedatas(\"plan_course\", \"id\", self.before_result)\r\n api_data=commonfunction.previewData().get_dict_values('data',self.api_results)\r\n api_dealresults = commonfunction.previewData().get_dict_datas([\"id\",\"price\",\"vipPrice\",\"discountPrice\",\"isVip\"], api_data)\r\n base_dealresults=commonfunction.previewData().get_list_listdict_data(self.basedata,[\"id\",\"price\",\"vipPrice\",\"discountPrice\",\"isVip\"])\r\n self.assertEqual(api_dealresults, base_dealresults)\r\n\r\n def test_responsetcode(self):\r\n '''resultCode=0'''\r\n responsecodes=commonfunction.previewData().get_dict_datas([\"resultCode\"],self.api_results)\r\n commonfunction.resultsAssert().assertcircle(responsecodes,\"resultCode\",\"0\")\r\n\r\n def test_contentnum(self):\r\n '''验证目录章节个数'''\r\n api_results=self.api_results\r\n apicontent=commonfunction.previewData().get_values(api_results,\"data\")\r\n self.apicontents=commonfunction.previewData().get_values(apicontent,\"planContents\")\r\n self.basecontent=commonfunction.setupBasedata().get_basedatas2(\"plan_content\",\"id\",\"courseId\",self.before_result)\r\n apilen=commonfunction.previewData().lld_len(self.apicontents)\r\n baselen=commonfunction.previewData().lld_len(self.basecontent)\r\n self.assertEqual(apilen,baselen)\r\n\r\n\r\n\r\n","sub_path":"interface/getCourseInfo_test.py","file_name":"getCourseInfo_test.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"42224533","text":"import sys\nimport re\n\ncontent = []\nnameOfFiles = []\nextensionHolder =[]\nnumberOfFiles = int(input())\ncountSpace = 0\ncount = 0\n\nfor i in range(numberOfFiles):\n fileName = input()\n for char in fileName:\n count += 1\n if(char == '.'):\n nameOfFiles.append(fileName[:count-1])\n extensionHolder.append(fileName[count-1:])\n while True:\n line = input()\n if line == '':\n countSpace += 1\n if countSpace == 3:\n\n print (\"EOF\")\n countSpace = 0\n count = 0\n break\n else:\n countSpace = 0\n count = 0\n content.append(re.sub('[$]', '',line))\n #print (line)\n\ncombinedFileName = \" \".join(nameOfFiles)\ncamelFileName = \" \".join(x for x in combinedFileName.title() if not x.isspace())\ncamelFileName2 = camelFileName.replace(\" \",\"\")\nfinalCamel = camelFileName2[0].lower() + camelFileName2[1:]\nfinalFileName = finalCamel + extensionHolder[0]\n\nprint(\"Below is the output:\")\nprint(finalFileName)\nfor line in content:\n print (line)\n","sub_path":"P1/concatenate.py","file_name":"concatenate.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"27415512","text":"# -*- coding: utf-8 -*-\nimport time\nfrom multiprocessing import Process\n\n\ndef dance(n):\n for i in range(n):\n print(\"----这是���%s次执行子进程----\" % i)\n time.sleep(0.1)\n\n\ndef main():\n p1 = Process(target=dance, args=(10, ))\n p1.start() # 开启线程\n print(\"---主线程在阻塞前---\")\n p1.join()\n print(\"---主线程在阻塞后---\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"多任务/进程/01-4 进程阻塞.py","file_name":"01-4 进程阻塞.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"648507217","text":"#!/usr/bin/env python\n\nimport time\nimport os\nimport subprocess\nimport sys\n\n\n\nwhile True: #endless cycle\n\n\ttemp=subprocess.check_output('/opt/vc/bin/vcgencmd measure_temp', shell=True) #read cpu temperature\n\ttemp_file=open('/home/pi/scripts/rasp_cpu_temp','w') #open file for write\n\ttemp_file.write(temp) #write temperature to file\n\ttemp_file.close() #close file\n\tos.system('scp /home/pi/scripts/rasp_cpu_temp maaxlee@178.62.249.235:/home/maaxlee/scripts/') #send file to remote host\n\n\tdht_temp=subprocess.check_output('sudo python /home/pi/Adafruit_Python_DHT/examples/matest.py | head -n1', shell=True)\n\tdht_temp_file=open('/home/pi/scripts/dht_temp','w')\n\tdht_temp_file.write(dht_temp)\n\tdht_temp_file.close()\n\tos.system('scp /home/pi/scripts/dht_temp maaxlee@178.62.249.235:/home/maaxlee/scripts/')\n\n\tdht_hum=subprocess.check_output('sudo python /home/pi/Adafruit_Python_DHT/examples/matest.py | tail -n1', shell=True)\n\tdht_hum_file=open('/home/pi/scripts/dht_hum','w')\n\tdht_hum_file.write(dht_hum)\n\tdht_hum_file.close()\n\tos.system('scp /home/pi/scripts/dht_hum maaxlee@178.62.249.235:/home/maaxlee/scripts/')\n\n\t\n\t#wait for five minutes\n\ttime.sleep(300)\n\n\n\n\n","sub_path":"full_temp_lcd.py","file_name":"full_temp_lcd.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"98784260","text":"\"\"\"Manage class and methods for sessions.\"\"\"\nimport logging\nimport utils\n\n# Load logging configuration\nlog = logging.getLogger(__name__)\n\n\ndef update_session_status(authorization: str, session_id: int, session_status: str):\n \"\"\"Update a session status.\"\"\"\n mutation = 'mutation{updateSessionById(input:{id:session_id,sessionPatch:{status:\"session_status\"}}){session{status}}}'\n mutation = mutation.replace('session_id', str(session_id)) # Use replace() instead of format() because of curly braces\n mutation = mutation.replace('session_status', str(session_status)) # Use replace() instead of format() because of curly braces\n mutation = {'query': mutation} # Convert to dictionary\n data = utils.execute_graphql_request(authorization, mutation)\n return data\n","sub_path":"scripts/init/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"652230699","text":"# -*- encoding: utf-8\n\"\"\"\nConfig handler.\n\"\"\"\n\nimport os\nimport json\nimport logging\n\nfrom . import helpers\nfrom . import suplemon_module\n\n\nclass Config:\n def __init__(self, app):\n self.app = app\n self.logger = logging.getLogger(__name__)\n self.default_config_filename = \"defaults.json\"\n self.default_keymap_filename = \"keymap.json\"\n self.config_filename = \"suplemon-config.json\"\n self.keymap_filename = \"suplemon-keymap.json\"\n self.home_dir = os.path.expanduser(\"~\")\n self.config_dir = os.path.join(self.home_dir, \".config\", \"suplemon\")\n\n self.defaults = {}\n self.keymap = {}\n self.config = {}\n\n def init(self):\n self.create_config_dir()\n return self.load_defaults()\n\n def path(self):\n return os.path.join(self.config_dir, self.config_filename)\n\n def keymap_path(self):\n return os.path.join(self.config_dir, self.keymap_filename)\n\n def set_path(self, path):\n parts = os.path.split(path)\n self.config_dir = parts[0]\n self.config_filename = parts[1]\n\n def load(self):\n path = self.path()\n config = False\n if not os.path.exists(path):\n self.logger.debug(\"Configuration file '{0}' doesn't exist.\".format(path))\n else:\n config = self.load_config_file(path)\n if config is not False:\n self.logger.debug(\"Loaded configuration file '{0}'\".format(path))\n self.config = self.merge_defaults(config)\n else:\n self.logger.info(\"Failed to load config file '{0}'.\".format(path))\n self.config = dict(self.defaults)\n self.load_keys()\n return config\n\n def load_keys(self):\n path = self.keymap_path()\n keymap = False\n if not os.path.exists(path):\n self.logger.debug(\"Keymap file '{0}' doesn't exist.\".format(path))\n return False\n keymap = self.load_config_file(path)\n if not keymap:\n self.logger.info(\"Failed to load keymap file '{0}'.\".format(path))\n return False\n # Prepend the user keys to the defaults to give the user config a higher priority\n keymap += self.keymap\n self.keymap = self.normalize_keys(keymap)\n return True\n\n def normalize_keys(self, keymap):\n \"\"\"Normalize the order of modifier keys in keymap.\"\"\"\n modifiers = [\"shift\", \"ctrl\", \"alt\", \"meta\"] # The modifiers in correct order\n for item in keymap:\n new_keys = []\n for key_item in item[\"keys\"]:\n parts = key_item.split(\"+\")\n key = parts[-1]\n if len(parts) < 2:\n new_keys.append(key)\n continue\n normalized = \"\"\n for mod in modifiers: # Add the used modifiers back in correct order\n if mod in parts:\n normalized += mod + \"+\"\n normalized += key\n new_keys.append(normalized)\n item[\"keys\"] = new_keys\n return keymap\n\n def load_defaults(self):\n if not self.load_default_config() or not self.load_default_keys():\n return False\n return True\n\n def load_default_config(self):\n path = os.path.join(self.app.path, \"config\", self.default_config_filename)\n config = self.load_config_file(path)\n if not config:\n self.logger.error(\"Failed to load default config file '{0}'!\".format(path))\n return False\n self.defaults = config\n return True\n\n def load_default_keys(self):\n path = os.path.join(self.app.path, \"config\", self.default_keymap_filename)\n config = self.load_config_file(path)\n if not config:\n self.logger.error(\"Failed to load default keymap file '{0}'!\".format(path))\n return False\n self.keymap = config\n return True\n\n def reload(self):\n \"\"\"Reload the config file.\"\"\"\n return self.load()\n\n def store(self):\n \"\"\"Write current config state to file.\"\"\"\n data = json.dumps(self.config)\n f = open(self.config_filename)\n f.write(data)\n f.close()\n\n def merge_defaults(self, config):\n \"\"\"Fill any missing config options with defaults.\"\"\"\n for prim_key in self.defaults.keys():\n curr_item = self.defaults[prim_key]\n if prim_key not in config.keys():\n config[prim_key] = dict(curr_item)\n continue\n for sec_key in curr_item.keys():\n if sec_key not in config[prim_key].keys():\n config[prim_key][sec_key] = curr_item[sec_key]\n return config\n\n def load_config_file(self, path):\n try:\n f = open(path)\n data = f.read()\n f.close()\n data = self.remove_config_comments(data)\n config = json.loads(data)\n return config\n except:\n return False\n\n def remove_config_comments(self, data):\n \"\"\"Remove comments from a 'pseudo' JSON config file.\n\n Removes all lines that begin with '#' or '//' ignoring whitespace.\n\n :param data: Commented JSON data to clean.\n :return: Cleaned pure JSON.\n \"\"\"\n lines = data.split(\"\\n\")\n cleaned = []\n for line in lines:\n line = line.strip()\n if helpers.starts(line, \"//\") or helpers.starts(line, \"#\"):\n continue\n cleaned.append(line)\n return \"\\n\".join(cleaned)\n\n def create_config_dir(self):\n if not os.path.exists(self.config_dir):\n try:\n os.makedirs(self.config_dir)\n except:\n self.app.logger.warning(\"Config folder '{0}' doesn't exist and couldn't be created.\".format(\n self.config_dir))\n\n def __getitem__(self, i):\n \"\"\"Get a config variable.\"\"\"\n return self.config[i]\n\n def __setitem__(self, i, v):\n \"\"\"Set a config variable.\"\"\"\n self.config[i] = v\n\n def __str__(self):\n \"\"\"Convert entire config array to string.\"\"\"\n return str(self.config)\n\n def __len__(self):\n \"\"\"Return length of top level config variables.\"\"\"\n return len(self.config)\n\n\nclass ConfigModule(suplemon_module.Module):\n \"\"\"Helper for shortcut for opening config files.\"\"\"\n def init(self):\n self.config_name = \"defaults.json\"\n self.config_default_path = os.path.join(self.app.path, \"config\", self.config_name)\n self.config_user_path = self.app.config.path()\n\n def run(self, app, editor, args):\n if args == \"defaults\":\n # Open the default config in a new file only for viewing\n self.open(app, self.config_default_path, read_only=True)\n else:\n self.open(app, self.config_user_path)\n\n def open(self, app, path, read_only=False):\n if read_only:\n f = open(path)\n data = f.read()\n f.close()\n file = app.new_file()\n file.set_name(self.config_name)\n file.set_data(data)\n app.switch_to_file(app.last_file_index())\n else:\n # Open the user config file for editing\n f = app.file_is_open(path)\n if f:\n app.switch_to_file(app.get_file_index(f))\n else:\n if not app.open_file(path):\n app.new_file(path)\n app.switch_to_file(app.last_file_index())\n","sub_path":"suplemon/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":7553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"46861401","text":"import pygame\nimport random\n\nclass FakeDoors(object):\n def __init__(self,spawnchunkx,spawnchunky,posx,posy,zielchunkx,zielchunky,zielposx,zielposy):\n self.spawnchunkx = spawnchunkx\n self.spawnchunky = spawnchunky\n\n self.spawnposx = posx\n self.spawnposy = posy\n\n self.posx = -9000\n self.posy = -9000\n\n self.zielchunkx = zielchunkx\n self.zielchunky = zielchunky\n\n self.zielposx = zielposx\n self.zielposy = zielposy\n self.image = pygame.image.load(\"Assets/Durchsichtig.png\")\n self.teleporting_range = 100\n\n self.chunkx = 0\n self.chunky = 0\n\n\n def check_teleport(self,playerposx,playerposy,chunkx,chunky,screen):\n self.chunkx = chunkx\n self.chunky = chunky\n self.playerposx = playerposx\n self.playerposy = playerposy\n if self.spawnchunkx == chunkx and self.spawnchunky == chunky:\n self.posx = self.spawnposx\n self.posy = self.spawnposy\n else:\n self.posx = -9000\n self.posy = -9000\n screen.blit(self.image,(self.posx,self.posy))\n Abstand_x = abs(self.posx - playerposx)\n Abstand_y = abs(self.posy - playerposy)\n #print(\"Abstand X: \"+str(Abstand_x))\n #print(\"Abstand Y: \"+str(Abstand_y))\n if Abstand_x <= self.teleporting_range and Abstand_y <= self.teleporting_range:\n self.chunkx = self.zielchunkx\n self.chunky = self.zielchunky\n self.playerposx = self.zielposx\n self.playerposy = self.zielposy\n print(\"Player teleportiert\")\n\n return(self.chunkx,self.chunky,self.playerposx,self.playerposy)\n\n","sub_path":"FakeDoors.py","file_name":"FakeDoors.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"216309472","text":"# WIM Python API workshop: 2019-02-01\n# Helge Marahrens: hmarahre@iu.edu\n\n# //- 1. read API documentation\n# careful: this is a wrapper, not the API itself\n# 1. they do not always make it easy to access the response\n# 2. you have no guarantee that they respect the response limit\n\n# //- 2. import packages\nimport wikipedia\nimport time\nimport pandas as pd\nimport csv\nfrom collections import defaultdict\n\n# //- 3. authentication\n# no authentication needed\n\n# //- 4. build get request\n# //- 5. send get request – (check server response)\nresult = wikipedia.page(\"Tokyo\")\n\n\n# //- 6. explore data structures\nprint(result.url)\nprint(result.title)\nprint(result.coordinates)\n\n# create a mini-dataframe of cities\ncity_list = [\"Tokyo\", \"New York City\", \"Paris\", \"London\", \"Hannover, Germany\"]\ncities_dict = defaultdict(list)\nfor city in city_list:\n result = wikipedia.page(city)\n cities_dict[result.title] = [result.url,\n float(result.coordinates[0]),\n float(result.coordinates[1])]\ndf = pd.DataFrame.from_dict(cities_dict, orient='index')\ndf.columns = ['url', 'lat', 'long']\n\n# //- 7. save data\n## save as csv\n#df.to_csv(\"cities.csv\")\n","sub_path":"2019-01-31_API/Wikipedia_2019-02-01_hmarahre.py","file_name":"Wikipedia_2019-02-01_hmarahre.py","file_ext":"py","file_size_in_byte":1196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"630633303","text":"#!/usr/bin/env python3\nfrom mysql.connector import MySQLConnection, Error\nimport psutil\nimport os\nimport time\nimport datetime\nimport mysql.connector\n\n#--------------------------------------------------------------------------------------------------\ndef getCPUtemperature():\n t_cpu = os.popen('vcgencmd measure_temp').readline()\n t_cpu = str(t_cpu.replace(\"temp=\",\"\").replace(\"'C\\n\",\"\"))\n return(t_cpu)\n\ndef getCPUusage():\n cpu_usage = str(psutil.cpu_percent(interval=1))\n return(cpu_usage)\n \n\ndef insert_to_db():\n \n try:\n connection = mysql.connector.connect(host='oege.ie.hva.nl',\n user='oosterr4',\n password='wAK1P#ygvQMnX$',\n db='zoosterr4')\n cursor = connection.cursor()\n except:\n print (\"Geen verbinding met de database\")\n \n temperature = (getCPUtemperature())\n usage = (getCPUusage())\n \n insert_query = \"INSERT INTO TAB_CPU (Temperature,Cpu_Load,Date,Time) VALUES (%s, %s, %s, %s)\"\n \n try:\n cursor.execute(insert_query, (str(temperature), str(usage), str(datetime.datetime.fromtimestamp(time.time()).strftime(\"%Y-%m-%d\")), str(datetime.datetime.fromtimestamp(time.time()).strftime(\"%H:%M:%S\"))))\n \n if cursor.lastrowid:\n print ('laatste id: ', cursor.lastrowid)\n else:\n print ('laatste id niet gevonden')\n \n connection.commit()\n \n except Error as error:\n print(error)\n \n finally:\n cursor.close()\n connection.close()\n\ndef read_from_db():\n try:\n connection = mysql.connector.connect(host='oege.ie.hva.nl',\n user='oosterr4',\n password='wAK1P#ygvQMnX$',\n db='zoosterr4')\n cursor = connection.cursor()\n except:\n print (\"Geen verbinding met de database\")\n \n sql = \"SELECT * FROM TAB_CPU ORDER BY ID DESC LIMIT 1\"\n cursor.execute(sql)\n result = cursor.fetchall()\n if result is not None:\n print ('CPU temperature: ' , result[0][1], '°C | usage: ' , result[0][2], '% | time: ' , result[0][4], ' | datum: ' , result[0][3])\n \n#-----------------------------------------------------------------------------------------------------------------------------------\n\ndef main():\n while True:\n read_from_db() \n insert_to_db()\n time.sleep(1)\n\n \nif __name__ == '__main__':\n try:\n main()\n \n except KeyboardInterrupt:\n connection.close()\n print (\"Disconnecting...\")","sub_path":"Pikey (3)/Database.py","file_name":"Database.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"84496247","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('appherokureg', '0002_workshop_active'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='workshop',\n name='picture_url',\n field=models.URLField(blank=True),\n ),\n ]\n","sub_path":"appherokureg/migrations/0003_auto_20170206_0408.py","file_name":"0003_auto_20170206_0408.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"315360488","text":"\"\"\"Quiz URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path, include\nfrom preguntas import views\n\n\n\nurlpatterns = [\n path('', views.inicio, name='inicio'),\n path('admin/', admin.site.urls),\n path('jugar/', views.jugar, name='jugar'),\n path('juego/', views.juego, name='juego'),\n path('resultado//', views.resultado_pregunta, name='resultado'),\n path('registro/', views.registro, name=\"registro\"),\n path('accounts/', include('django.contrib.auth.urls')),\n path('ranking/', views.ranking, name='ranking'),\n path('estadisticas/', views.estadisticas, name='estadisticas'),\n path('reiniciar/',views.reiniciar,name=\"reiniciar\"),\n \n \n\n\n]\n","sub_path":"Quiz/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"328529114","text":"import sys\nimport json\nfrom flask import Flask, render_template, request, redirect, Response\nfrom flask_cors import CORS, cross_origin\n\nfrom storage import QUESTIONS\nimport tool\n\n# initialization\napp = Flask(__name__)\n\nCORS(app)\n\n@app.route('/receiver', methods=['POST'])\n@cross_origin()\ndef worker():\n data = request.get_json()\n\n print(data[\"search\"])\n\n print(\"hello world\")\n\n x = {\n \"Questions\": tool.search_query(data[\"search\"]),\n }\n\n\n # convert into JSON:\n y = json.dumps(x)\n \n\n return y\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=\"5000\", debug=True)\n\n","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"382301259","text":"import elasticsearch\nfrom elasticsearch_dsl import Search, Q\nfrom elasticsearch_dsl.query import MatchAll, Terms\nfrom python.config import es, INDEX_NAME\n\ndef add_aggregations(search):\n search.aggs.bucket('instant_bookable', 'terms', field='instant_bookable')\n search.aggs.bucket('property_type', 'terms', field='property_type')\n search.aggs.bucket('bed_type', 'terms', field='bed_type')\n search.aggs.bucket('room_type', 'terms', field='room_type')\n search.aggs.bucket('neighbourhood', 'terms', field='neighbourhood')\n search.aggs.bucket('cancellation_policy', 'terms', field='cancellation_policy')\n return search\n\ndef filter_facets(search, facets):\n post_filter = MatchAll()\n for name, value in facets.items():\n if value == 0:\n value = 'false'\n if value == 1:\n value = 'true'\n print(name, value)\n f = Terms(**{name: [value]})\n post_filter &= f\n return search.filter(post_filter)\n\n\n# User name, summary and description for now.\ndef weighted_search(query, operator=\"or\", size=10, facets=None):\n query = Q(\"multi_match\", query=query, operator=operator, fields=[\"name\", \"description\", \"summary\", \"neighbourhood\", \"space\", \"amenities\"])\n\n search = Search(using=es, index=INDEX_NAME)\n search = search.query(query)\n\n search = add_aggregations(search)\n search = filter_facets(search, facets)\n\n return search[0:size].execute()\n\ndef advanced_search(query, operator=\"and\", size=10, facets=None):\n must = []\n for k in query.keys():\n if query[k] == '': # Prevent searching for empty fields.\n continue\n must.append({\"match\": {k: {\"query\": query[k], \"operator\": \"and\"}}})\n print(must)\n body = {\n \"bool\": {\n \"must\": must\n }\n }\n\n search = Search(using=es, index=INDEX_NAME)\n search.query = Q(body)\n\n search = add_aggregations(search)\n search = filter_facets(search, facets)\n\n return search[0:size].execute()\n\nif __name__ == '__main__':\n es = elasticsearch.Elasticsearch()\n res = weighed_search(\"indische buurt free wifi\")\n for hit in res[\"hits\"][\"hits\"]:\n print(hit[\"_score\"])\n print(\"%(name)s\" % hit[\"_source\"])\n","sub_path":"src/python/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"375847701","text":"import tablero as tb\nimport player as pl\n\nWHITE = \"white\"\nBLACK = \"black\"\n\n\nclass Game:\n # ive decided since the number of pieces is capped but the type of pieces is not\n # (pawn transformations), I've already coded much of the modularity to support just\n # using a dictionary of pieces\n def __init__(self):\n self.playersturn = WHITE\n self.check = 0\n self.message = \"this is where prompts will go\"\n self.gameboard = tb.Gameboard()\n print(\"chess program. enter moves in algebraic notation separated by space\")\n self.main()\n\n def main(self):\n self.message = \"\"\n\n print(\"Jugador 1:\")\n player_1 = self.parse_player(WHITE)\n print(\"Jugador 2:\")\n player_2 = self.parse_player(BLACK)\n\n checkmate = 0\n reyahogado = False\n self.print_board()\n while checkmate == 0 and not reyahogado:\n\n print(self.message)\n self.message = \"\"\n action_list = self.gameboard.get_action_list(self.playersturn)\n\n if self.playersturn == WHITE:\n if len(action_list) > 0:\n self.execute_action(player_1.play(action_list, self.gameboard))\n else:\n checkmate = 1\n elif self.playersturn == BLACK:\n if len(action_list) > 0:\n self.execute_action(player_2.play(action_list, self.gameboard))\n else:\n checkmate = -1\n else:\n print(\"ERROR: algo no ha ido demasiado bien\")\n\n self.print_board()\n\n if self.playersturn == BLACK:\n self.playersturn = WHITE\n else:\n self.playersturn = BLACK\n reyahogado = self.gameboard.comprobar_ahogado()\n\n if checkmate == 1:\n print(\"Ganan las negras\")\n elif checkmate == -1:\n print(\"Ganan las blancas\")\n elif reyahogado:\n print(\"Rey ahogado\")\n\n def execute_action(self, action):\n self.message = \"that is a valid move\"\n self.gameboard.execute_action(action)\n\n @staticmethod\n def parse_player(color):\n print(\"1-Humano\")\n print(\"2-Random\")\n print(\"3-Montecarlo\")\n print(\"4-Montecarlo Plano\")\n while True:\n a = input()\n if a == '1':\n return pl.Human(color)\n if a == '2':\n return pl.Random(color)\n if a == '3':\n return pl.MonteCarlo(color)\n if a == '4':\n return pl.MonteCarloPlano(color)\n\n def print_board(self):\n print(\" 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |\")\n for i in range(0, 8):\n print(\"-\" * 32)\n print(chr(i + 97), end=\"|\")\n for j in range(0, 8):\n item = self.gameboard.gameboard.get((i, j), \" \")\n print(str(item) + ' |', end=\" \")\n print()\n print(\"-\" * 32)\n\nGame()\n","sub_path":"juego.py","file_name":"juego.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"322997605","text":"import komand\nfrom .schema import DeleteUserInput, DeleteUserOutput, Input, Output, Component\n# Custom imports below\nimport requests\nimport urllib.parse\nfrom komand.exceptions import PluginException\n\n\nclass DeleteUser(komand.Action):\n def __init__(self):\n super(self.__class__, self).__init__(\n name='delete_user',\n description=Component.DESCRIPTION,\n input=DeleteUserInput(),\n output=DeleteUserOutput())\n\n def run(self, params={}):\n # Get the user by email\n email = params.get(Input.USER_EMAIL)\n okta_url = self.connection.okta_url\n\n url = requests.compat.urljoin(okta_url, f\"/api/v1/users/{urllib.parse.quote(email)}\")\n\n # Search for the user by email to get the id\n response = self.connection.session.get(url)\n\n if response.status_code != 200:\n self.logger.error(f\"Okta: Lookup User by Email failed: {response.text}\")\n return {Output.SUCCESS: False}\n\n data = response.json()\n\n user_id = data['id']\n send_email_param = {\n \"sendEmail\": params.get(Input.SEND_ADMIN_EMAIL)\n }\n\n # Deactivate the user by id\n self.logger.info(f\"Deactivating user ID: {user_id}\")\n url = requests.compat.urljoin(okta_url, f\"/api/v1/users/{user_id}\")\n response = self.connection.session.delete(url, params=send_email_param)\n\n if response.status_code == 401:\n self.logger.error(\"Okta: Invalid token or domain\")\n\n if response.status_code >= 400:\n raise PluginException(cause=\"Delete User failed.\",\n assistance=f\"Okta Deactivate User failed. Response was: {response.text}\")\n\n return {Output.SUCCESS: True}\n","sub_path":"okta/komand_okta/actions/delete_user/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"342475513","text":"import matplotlib.pyplot as plt\r\nimport matplotlib as mpl\r\nimport numpy as np\r\nfrom matplotlib import cm\r\n\r\n\r\n'''\r\n Change value to rgb\r\n used as distance field visualization\r\n return [rr,gg,bb]\r\n'''\r\n\r\ndef _changeValueToColor(maxValue,minValue,value):\r\n rr = 0\r\n gg = 0\r\n bb = 0\r\n \r\n\r\n\r\n if(value0.75):\r\n rr =1\r\n gg = 1-(temp-0.75)/0.25\r\n if(gg<0):\r\n gg=0.0\r\n bb=0\r\n return [rr,gg,bb]\r\n \r\n if(temp>0.5):\r\n rr = (temp-0.5)/0.25\r\n gg=1\r\n bb=0\r\n return [rr,gg,bb]\r\n \r\n if(temp>0.25):\r\n rr = 0\r\n gg=1\r\n bb=1-(temp-0.25)/0.25\r\n return [rr,gg,bb]\r\n \r\n return [0,temp/0.25,1]\r\n\r\n\r\ndef plot_examples(colormaps):\r\n \"\"\"\r\n Helper function to plot data with associated colormap.\r\n \"\"\"\r\n np.random.seed(19680801)\r\n data = np.random.randn(30, 30)\r\n n = len(colormaps)\r\n fig, axs = plt.subplots(1, n, figsize=(n * 2 + 2, 3),\r\n constrained_layout=True, squeeze=False)\r\n for [ax, cmap] in zip(axs.flat, colormaps):\r\n psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4)\r\n fig.colorbar(psm, ax=ax)\r\n plt.show()\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n fig, ax = plt.subplots(figsize=(6, 1))\r\n fig.subplots_adjust(bottom=0.5)\r\n #colorArray = np.zeros((101,3))\r\n\r\n #indexing 2d-numpy array with[]:\r\n #And it could modify the array\r\n\r\n #255 0 0 -> red\r\n #0 0 255 -> blue\r\n\r\n\r\n viridis = cm.get_cmap('viridis', 101)\r\n colorArray = viridis(np.linspace(0, 1, 101))\r\n\r\n \r\n maxVV = 30 #10\r\n minVV = 0\r\n #replace colobar with our values\r\n for i in range(101):\r\n ratio = i/100.0\r\n if(i == 100):\r\n ratio = 1.0\r\n \r\n # rr = 255.0*(1.0- ratio)\r\n # gg = 0.0\r\n # bb = 255.0 * ratio\r\n \r\n # colorArray[i,0] = rr/255.0\r\n # colorArray[i,1] = gg/255.0\r\n # colorArray[i,2] = bb/255.0\r\n \r\n [rr,gg,bb] = _changeValueToColor(maxVV,minVV,ratio*(maxVV-minVV)+minVV)\r\n colorArray[i,0] = rr\r\n colorArray[i,1] = gg\r\n colorArray[i,2] = bb\r\n\r\n newcmp = mpl.colors.ListedColormap(colorArray) \r\n #plot_examples([newcmp]) \r\n # print(\"Color Array:\\n\",colorArray)\r\n\r\n norm2 = mpl.colors.Normalize(vmin=minVV, vmax=maxVV)\r\n fig.colorbar(mpl.cm.ScalarMappable(norm=norm2, cmap=newcmp),\r\n cax=ax, orientation='horizontal', label='Distance error colorbar(unit: mm)')\r\n plt.show()\r\n\r\n # cm = mpl.cm.cool\r\n\r\n #print('viridis.colors', viridis.colors)\r\n\r\n # ccmap = mpl.colors.Colormap(colorArray)\r\n\r\n # # #cmap = mpl.cm.cool\r\n # norm2 = mpl.colors.Normalize(vmin=5, vmax=10)\r\n\r\n # fig.colorbar(mpl.cm.ScalarMappable(norm=norm2, cmap=ccmap),\r\n # cax=ax, orientation='horizontal', label='Some Units')\r\n \r\n # plt.show()","sub_path":"colorBar.py","file_name":"colorBar.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"173247408","text":"import re\nfrom netaddr import IPNetwork\n\n\nhcl_conditions = [\"==\", \"!=\", \">\", \"<\", \">=\", \"<=\", \"&&\", \"||\", \"!\"]\n\n\n# A helper function that will be used to flatten a multi-dimensional multi-nested list\ndef flatten_list(input):\n new_list = []\n for i in input:\n if type(i) is list:\n i = flatten_list(i)\n for j in i:\n new_list.extend(j)\n else:\n new_list.extend(i)\n return new_list\n\n\ndef generate_target_resource( target_string ):\n target = target_string.strip()\n target = 'resource.{}'.format(target).split('.')\n\n if target[-1] in ['id', 'name']:\n target.pop(-1)\n\n return target\n\n\ndef expand_variable(tf_conf, value):\n regex = r'\\${var.(.*)\\}'\n matches = re.match(regex, value)\n\n if matches is None:\n if 'module' in value:\n return 'None'\n\n return value\n\n if matches.group(1) not in tf_conf['variable']:\n return value\n\n return tf_conf['variable'][matches.group(1)]\n\n\ndef check_if_cidr( value ):\n regex = r'(1[0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9])\\.(1[0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9])\\.(1[0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9])\\.(1[0-9][0-9]|2[0-4][0-9]|25[0-5]|[0-9][0-9]|[0-9])\\/(3[0-2]|2[0-9]|1[0-9]|[0-9])'\n matches = re.match(regex, value)\n\n if matches is not None:\n return True\n\n return False\n\n\ndef is_ip_in_cidr(ip_cidr, cidr):\n for ip_network in cidr:\n if check_if_cidr(ip_cidr) and check_if_cidr(ip_network):\n if IPNetwork(ip_cidr) in IPNetwork(ip_network):\n return True\n\n return False\n\n\n# A helper function that compares port related data with given dictionary\ndef check_sg_rules(tf_conf, security_group, condition, proto, from_port, to_port, ports, cidr):\n\n if 'cidr_blocks' in security_group:\n if type(security_group['cidr_blocks']) is list:\n for i in range(0,len(security_group['cidr_blocks'])):\n if not check_if_cidr(security_group['cidr_blocks'][i]):\n security_group['cidr_blocks'][i] = expand_variable(tf_conf,\n security_group['cidr_blocks'][i]\n ).get('default',\n security_group['cidr_blocks'][i])\n else:\n if not check_if_cidr(security_group['cidr_blocks']):\n security_group['cidr_blocks'] = expand_variable(tf_conf,\n security_group['cidr_blocks']\n ).get('default',\n security_group['cidr_blocks'])\n\n\n return validate_sg_rule(should_present=condition, proto=proto, from_port=from_port, to_port=to_port, ports=ports, cidr=cidr, params=assign_sg_params(security_group))\n\n\ndef assign_sg_params(rule):\n from_port = int(rule.get('from_port', 0))\n to_port = int(rule.get('to_port', 0))\n\n protocol = [proto for proto in [rule.get('protocol', '-1')]]\n\n # TODO: Make IANA Protocol numbers matching here.\n # http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml\n if protocol[0] == '-1' or type(protocol[0]) is int:\n protocol = ['tcp', 'udp']\n\n protocol[0] = protocol[0].lower()\n\n cidr_blocks = rule.get('cidr_blocks', [])\n\n if type(cidr_blocks) is not list:\n cidr_blocks = [cidr_blocks]\n\n if to_port == 0 and from_port == 0:\n to_port = 65535\n\n if from_port > to_port:\n raise AssertionError('Invalid configuration from_port can not be bigger than to_port. {} > {} {} in {}'.format(from_port,\n to_port,\n protocol,\n cidr_blocks))\n\n return dict(protocol=protocol, from_port=from_port, to_port=to_port, cidr_blocks=cidr_blocks)\n\n\ndef validate_sg_rule(should_present, proto, from_port, to_port, ports, cidr, params):\n from_port = int(from_port)\n to_port = int(to_port)\n\n assert from_port <= to_port, 'Port range is defined incorrectly within the Scenario. ' \\\n 'Define it {}-{} instead of {}-{}.'.format(from_port,\n to_port,\n to_port,\n from_port)\n defined_range = set(range(params['from_port'], params['to_port']+1))\n\n if should_present:\n in_string = 'not in'\n given_range = set([int(port) for port in ports])\n intersection = not(given_range & defined_range)\n from_to_port = ','.join(ports)\n else:\n in_string = 'in'\n given_range = set(range(from_port, to_port+1))\n intersection = given_range & defined_range\n from_to_port = str(from_port) + '-' + str(to_port)\n\n if intersection and is_ip_in_cidr(cidr, params['cidr_blocks']):\n raise AssertionError(\"Port {}/{} {} {}/{} for {}\".format(\n proto,\n '{}-{}'.format(params['from_port'], params['to_port']),\n in_string,\n '/'.join(params['protocol']),\n from_to_port,\n params['cidr_blocks']))\n\n return True\n\ndef change_value_in_dict(target_dictionary, path_to_change, value_to_change):\n if type(path_to_change) is str:\n path_to_change = path_to_change.split('.')\n\n if type(path_to_change) is not list:\n return False\n\n for x in xrange(0,len(path_to_change)):\n for condition in hcl_conditions:\n if condition in path_to_change[x]:\n return False\n\n path_to_adjust = '[\"{}\"]'.format('\"][\"'.join(path_to_change))\n\n try:\n target = eval('target_dictionary{}'.format(path_to_adjust))\n\n for key, value in value_to_change.items():\n if 'type' in value:\n type_key = value['type']\n source = value\n source['referenced_name'] = key\n\n if type_key not in target:\n target[type_key] = list()\n elif type_key in target and type(target[type_key]) is not list:\n target[type_key] = [target[type_key]]\n\n target[type_key].append(source)\n\n target.update(value_to_change)\n\n try:\n exec('target_dictionary{}.update({})'.format(path_to_adjust, target))\n except:\n # Yes I know, this is against PEP8.\n pass\n\n except KeyError:\n pass\n\n\ndef strip_conditions(string):\n for condition in hcl_conditions:\n string = string.replace(condition, \"\")\n\n\n string = string.split(\" \")\n\n return string[0]\n","sub_path":"terraform_compliance/common/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":7166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"95102146","text":"from blog.models import Comment, Tag\n\nfrom django import template\n\nregister = template.Library()\n\n#May want to ditch this for a middleware that passes in the comments object so that I can do the manipulations in the actual template\n@register.simple_tag\ndef recent_comments():\n \n comments = Comment.objects.select_related('entry').filter(deleted=False, spam=False).order_by('-id')[:3]\n output = ''\n return output\n \n@register.simple_tag\ndef tags():\n tags = Tag.objects.order_by('?')[:10]\n return tags","sub_path":"unit_12/mysite/blog/templatetags/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"270841743","text":"def build_profile(first, last, **user_info):\n \"\"\"Build a dictionary for a user.\"\"\"\n user_info['first'] = first\n user_info['last'] = last\n return user_info\n# Create two users with different kinds\n# of information.\nuser_0 = build_profile('albert', 'einstein',\nlocation='princeton')\nuser_1 = build_profile('marie', 'curie',\nlocation='paris', field='chemistry')\nprint(user_0)\nprint(user_1)","sub_path":"cheat_sheets/functions/arbitrary_no_keword_arguments.py","file_name":"arbitrary_no_keword_arguments.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"301568380","text":"# For RPi and rpiCam\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport io\nimport time\nimport numpy as np\nimport picamera\n\nfrom PIL import Image\nfrom tflite_runtime.interpreter import Interpreter\n\n\ndef load_labels(path):\n with open(path, \"r\") as f:\n return {i: line.strip() for i, line in enumerate(f.readlines())}\n\n\ndef set_input_tensor(interpreter, image):\n tensor_index = interpreter.get_input_details()[0][\"index\"]\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image\n\n\ndef classify_image(interpreter, image, top_k=1):\n \"\"\"Returns a sorted array of classification results.\"\"\"\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n output_details = interpreter.get_output_details()[0]\n output = np.squeeze(interpreter.get_tensor(output_details[\"index\"]))\n\n # If the model is quantized (uint8 data), then dequantize the results\n if output_details[\"dtype\"] == np.uint8:\n scale, zero_point = output_details[\"quantization\"]\n output = scale * (output - zero_point)\n\n ordered = np.argpartition(-output, top_k)\n return [(i, output[i]) for i in ordered[:top_k]]\n\n\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\"--model\", help=\"File path of .tflite file.\", required=True)\n parser.add_argument(\"--labels\", help=\"File path of labels file.\", required=True)\n args = parser.parse_args()\n\n labels = load_labels(args.labels)\n\n interpreter = Interpreter(args.model)\n interpreter.allocate_tensors()\n _, height, width, _ = interpreter.get_input_details()[0][\"shape\"]\n\n with picamera.PiCamera(resolution=(640, 480), framerate=30) as camera:\n camera.start_preview()\n try:\n stream = io.BytesIO()\n for _ in camera.capture_continuous(\n stream, format=\"jpeg\", use_video_port=True\n ):\n stream.seek(0)\n image = (\n Image.open(stream)\n .convert(\"RGB\")\n .resize((width, height), Image.ANTIALIAS)\n )\n start_time = time.time()\n results = classify_image(interpreter, image)\n elapsed_ms = (time.time() - start_time) * 1000\n label_id, prob = results[0]\n stream.seek(0)\n stream.truncate()\n camera.annotate_text = \"%s %.2f\\n%.1fms\" % (\n labels[label_id],\n prob,\n elapsed_ms,\n )\n finally:\n camera.stop_preview()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"classify-cam-rpi.py","file_name":"classify-cam-rpi.py","file_ext":"py","file_size_in_byte":2772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"444871027","text":"import os\nimport re\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nfrom collections import defaultdict\n\n\ndef gather_data():\n\ttest_dir='/home/hoangntbn/Desktop/20192/project2/20news-bydate/20news-bydate-test'\n\ttrain_dir='/home/hoangntbn/Desktop/20192/project2/20news-bydate/20news-bydate-train'\n\tlist_newsgroups=[newsgroup for newsgroup in os.listdir(train_dir)]\n\tlist_newsgroups.sort()\n\tstemmer=PorterStemmer()\n\tstop_words=set(stopwords.words('english'))\n\tdef collect_data_from(dir_path,list_newsgroups):\n\t\tdata=[]\n\t\tfor group_id,newsgroup in enumerate(list_newsgroups):\n\t\t\tpath=dir_path+'/'+newsgroup+'/'\n\t\t\tfiles=[(file_name,path+file_name) for file_name in os.listdir(path)]\n\t\t\tfiles.sort()\n\t\t\tfor file_name,file_path in files:\n\t\t\t\twith open(file_path,'rb') as f:\n\t\t\t\t\ttext=f.read().decode('utf-8','ignore').lower()\n\t\t\t\twords=[stemmer.stem(word) for word in re.split('\\W+',text) if word not in stop_words and word.isalpha()]\n\t\t\t\tcontent=' '.join(words)\n\t\t\t\tdata.append(str(group_id)+''+file_name+''+content)\n\t\treturn data\n\t\n\ttest_data=collect_data_from(test_dir,list_newsgroups)\n\ttrain_data=collect_data_from(train_dir,list_newsgroups)\n\twith open('/home/hoangntbn/Desktop/20192/project2/20news-bydate/train_processed.txt','w') as f:\n\t\tf.write('\\n'.join(train_data))\n\twith open('/home/hoangntbn/Desktop/20192/project2/20news-bydate/test_processed.txt','w') as f:\n\t\tf.write('\\n'.join(test_data))\n\ndef generate_vocabulary(data_path,min_df=6):\n\tdef compute_idf(df,corpus_size):\n\t\treturn np.log10(corpus_size/df)\n\twith open(data_path,'r') as f:\n\t\tlines=f.read().splitlines()\n\tdoc_count=defaultdict(int)\n\tcorpus_size=len(lines)\n\tfor line in lines:\n\t\tfeatures=line.split('')\n\t\twords=set(features[-1].split())\n\t\tfor word in words:\n\t\t\tdoc_count[word]+=1\n\twords_idf=[(word,compute_idf(df,corpus_size)) for word,df in doc_count.items() if df>=min_df]\n\twords_idf.sort(key=lambda tup : -tup[1])\n\tprint(\"vocabulary size: \"+str(len(words_idf)))\n\twith open('/home/hoangntbn/Desktop/20192/project2/20news-bydate/words_idf.txt','w')as f:\n\t\tf.write('\\n'.join([word+''+str(idf) for word,idf in words_idf]))\n\ndef get_tf_idf(data_path):\n\twith open('/home/hoangntbn/Desktop/20192/project2/20news-bydate/words_idf.txt','r')as f:\n\t\twords_idf=[(line.split('')[0], float(line.split('')[1])) for line in f.read().splitlines()]\n\tIDF=dict(words_idf)\n\tID=dict([(word,index) for index,(word,idf_val) in enumerate(words_idf)])\n\twith open(data_path,'r') as f:\n\t\tdocs=[(line.split('')[0]+''+line.split('')[1]+'', line.split('')[2]) for line in f.read().splitlines()]\n\tdata_tf_idf=[]\n\tfor header,text in docs:\n\t\twords=[word for word in text.split() if word in IDF]\n\t\tword_set=set(words)\n\t\tmax_tf=max([words.count(word) for word in word_set])\n\t\twords_tf_idf=[]\n\t\tsum_squares=0.0\n\t\tfor word in word_set:\n\t\t\ttf=words.count(word)\n\t\t\ttf_idf=(IDF[word]*tf)/max_tf\n\t\t\twords_tf_idf.append((ID[word],tf_idf))\n\t\t\tsum_squares+=tf_idf**2\n\t\twords_tf_idf_normalized=[str(index)+':'+str(tf_idf/np.sqrt(sum_squares)) for index,tf_idf in words_tf_idf] \n\t\tdata_tf_idf.append(header+' '.join(words_tf_idf_normalized))\n\twith open('/home/hoangntbn/Desktop/20192/project2/20news-bydate/test_tf_idf_vector.txt','w') as f:\n\t\tf.write('\\n'.join(data_tf_idf))\n\n# gather_data()\n# generate_vocabulary('/home/hoangntbn/Desktop/20192/project2/20news-bydate/train_processed.txt',6)\nget_tf_idf('/home/hoangntbn/Desktop/20192/project2/20news-bydate/test_processed.txt')\n\n","sub_path":"logistic/tfidf.py","file_name":"tfidf.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"1510353","text":"from urllib.request import urlopen\nfrom urllib.error import HTTPError\nfrom urllib.error import URLError\nfrom bs4 import BeautifulSoup\n\ndef getHTML(url):\n try:\n html = urlopen(url)\n except HTTPError as e:\n print(\"HTTPError\")\n return None\n try:\n bs = BeautifulSoup(html.read(), 'html.parser')\n # for sibling in bs.find('table',{'border':'1'}).tr.next_siblings:\n # print(sibling)\n # print(bs)\n return bs\n except AttributeError as e:\n print(\"AttributeError\")\n return None\n\ndef findSpan(beautifulsoap):\n nameList = beautifulsoap.findAll('span',{'class':'green'})\n for name in nameList:\n print(name.get_text())\n\n#findSpan(getHTML(\"http://www.pythonscraping.com/pages/warandpeace.html\"))\n\ndef findChildren(beautifulsoap):\n for child in beautifulsoap.find('table',{'id':'giftList'}).children:\n print(child)\n print(\"\\n\")\n#findChildren(getHTML(\"http://www.pythonscraping.com/pages/page3.html\"))\n\n\ndef findSiblings(beautifulsoap):\n for sibling in beautifulsoap.find('table',{'id':'giftList'}).tr\\\n .next_siblings:\n print(sibling)\n\ndef findParents(beautifulsoap):\n print(beautifulsoap.find('img',{'src':'../img/gifts/img1.jpg'})\\\n .parent.previous_sibling.get_text())\n\nfindParents(getHTML(\"http://www.pythonscraping.com/pages/page3.html\"))","sub_path":"chapter2.py","file_name":"chapter2.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"325641215","text":"# 题目需要生成子数组的数量。要求生成出的子数组中含有奇数的个数恰好为K\n# Point1:\n# 与LeetCode992同样性质,在滑动窗口中,生成含有奇数的个数恰好为k,不好���成。但是生成奇数的个数不超过k的子数组,容易生成。所以做一个变化:\n# 恰好有k个奇数子数组个数 = 不超过k个奇数的子数组个数 - 不超过k-1个奇数的子数组个数.所以有atMost(nums, k) - atMost(nums, k-1)\n# Point2:\n# 如何获得奇数个数不超过k的子数组数目? 答案为当窗口合法时,窗口的大小累计的总和为不超过k种类的子数组数组。\n# 例如[1, 2, 3], k=2, 则不超过2的子数组为[1], [1,2], [1, 2,3], [2], [2,3], [3]总数目为6\n# 在使用滑动窗口时,\n# 有效窗口为[1](窗口长度为1,结果+1), 相当于生成以1结尾的子数组为[1]\n# [1,2](窗口长度为2,结果+2), 相当于生成以2结尾的子数组为[1,2], [2]\n# [2,3](窗口长度为3,结果+3), 相当于生成以3结尾的子数组为[1,2,3], [2,3], [3]\n# Point3:\n# 使用oddCount来记录窗口中现在含有多少个奇数\n\n# 方法1:sliding window(队列) + oddCount计数器\nfrom collections import deque\nclass Solution:\n def numberOfSubarrays(self, nums: List[int], k: int) -> int:\n def atMost(nums, k):\n queue = deque()\n oddCount = 0\n result = 0\n for element in nums:\n queue.append(element)\n if element % 2 != 0:\n oddCount += 1\n while oddCount > k:\n popoutNum = queue.popleft()\n if popoutNum % 2 != 0:\n oddCount -= 1\n\n result += len(queue)\n return result\n\n return atMost(nums, k) - atMost(nums, k - 1)\n\n# 方法2:sliding window(指针) + oddCount计数器\nclass Solution:\n def numberOfSubarrays(self, nums: List[int], k: int) -> int:\n def atMost(nums, k):\n left = 0\n oddCount = 0\n result = 0\n for index, element in enumerate(nums):\n if element %2 != 0:\n oddCount += 1\n while oddCount > k:\n if nums[left] %2 != 0:\n oddCount -=1\n left += 1\n result += index -left + 1\n return result\n return atMost(nums, k) - atMost(nums, k-1)\n\n","sub_path":"面试-LeetCode题/基础算法8-滑动窗口(SlidingWindow)/LeetCode1248(CountNumberofNiceSubrrys)/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"209401999","text":"from flask import request, url_for, current_app, jsonify\nfrom werkzeug.wrappers import Response as ResponseBase\nfrom odpc.utils.common import OrderedDict\nfrom odpc.utils.output_json import output_json\nfrom flask.helpers import _endpoint_from_view_func\nfrom functools import partial, wraps\nimport operator, sys, re, difflib\nfrom types import MethodType\n\nDEFAULT_REPRESENTATIONS = [('application/json', output_json)]\n\n\nclass odooApi(object):\n\n def __init__(self, app=None, prefix='',\n default_mediatype='application/json',\n catch_all_404s=False, serve_challenge_on_401=False,\n url_part_order='bae', errors=None):\n self.representations = OrderedDict(DEFAULT_REPRESENTATIONS)\n self.urls = {}\n self.prefix = prefix\n self.default_mediatype = default_mediatype\n self.catch_all_404s = catch_all_404s\n self.serve_challenge_on_401 = serve_challenge_on_401\n self.url_part_order = url_part_order\n self.errors = errors or {}\n self.blueprint_setup = None\n self.endpoints = set()\n self.resources = []\n self.app = None\n self.blueprint = None\n\n if app is not None:\n self.app = app\n self.init_app(app)\n\n def init_app(self, app):\n # If app is a blueprint, defer the initialization\n try:\n app.record(self._deferred_blueprint_init)\n # Flask.Blueprint has a 'record' attribute, Flask.Api does not\n except AttributeError:\n self._init_app(app)\n else:\n self.blueprint = app\n\n def _complete_url(self, url_part, registration_prefix):\n \"\"\"This method is used to defer the construction of the final url in\n the case that the Api is created with a Blueprint.\n :param url_part: The part of the url the endpoint is registered with\n :param registration_prefix: The part of the url contributed by the\n blueprint. Generally speaking, BlueprintSetupState.url_prefix\n \"\"\"\n parts = {\n 'b': registration_prefix,\n 'a': self.prefix,\n 'e': url_part\n }\n return ''.join(parts[key] for key in self.url_part_order if parts[key])\n\n @staticmethod\n def _blueprint_setup_add_url_rule_patch(blueprint_setup, rule, endpoint=None, view_func=None, **options):\n \"\"\"Method used to patch BlueprintSetupState.add_url_rule for setup\n state instance corresponding to this Api instance. Exists primarily\n to enable _complete_url's function.\n :param blueprint_setup: The BlueprintSetupState instance (self)\n :param rule: A string or callable that takes a string and returns a\n string(_complete_url) that is the url rule for the endpoint\n being registered\n :param endpoint: See BlueprintSetupState.add_url_rule\n :param view_func: See BlueprintSetupState.add_url_rule\n :param **options: See BlueprintSetupState.add_url_rule\n \"\"\"\n\n if callable(rule):\n rule = rule(blueprint_setup.url_prefix)\n elif blueprint_setup.url_prefix:\n rule = blueprint_setup.url_prefix + rule\n options.setdefault('subdomain', blueprint_setup.subdomain)\n if endpoint is None:\n endpoint = _endpoint_from_view_func(view_func)\n defaults = blueprint_setup.url_defaults\n if 'defaults' in options:\n defaults = dict(defaults, **options.pop('defaults'))\n blueprint_setup.app.add_url_rule(rule, '%s.%s' % (blueprint_setup.blueprint.name, endpoint),\n view_func, defaults=defaults, **options)\n\n def _deferred_blueprint_init(self, setup_state):\n \"\"\"Synchronize prefix between blueprint/api and registration options, then\n perform initialization with setup_state.app :class:`flask.Flask` object.\n When a :class:`flask_restful.Api` object is initialized with a blueprint,\n this method is recorded on the blueprint to be run when the blueprint is later\n registered to a :class:`flask.Flask` object. This method also monkeypatches\n BlueprintSetupState.add_url_rule with _blueprint_setup_add_url_rule_patch.\n :param setup_state: The setup state object passed to deferred functions\n during blueprint registration\n :type setup_state: flask.blueprints.BlueprintSetupState\n \"\"\"\n\n self.blueprint_setup = setup_state\n if setup_state.add_url_rule.__name__ != '_blueprint_setup_add_url_rule_patch':\n setup_state._original_add_url_rule = setup_state.add_url_rule\n setup_state.add_url_rule = MethodType(odooApi._blueprint_setup_add_url_rule_patch,\n setup_state)\n if not setup_state.first_registration:\n raise ValueError('odoo-restful blueprints can only be registered once.')\n self._init_app(setup_state.app)\n\n def _init_app(self, app):\n if len(self.resources) > 0:\n for resource, urls, kwargs in self.resources:\n self._register_view(app, resource, *urls, **kwargs)\n\n def add_resource(self, resource, prefix, **kwargs):\n if self.app is not None:\n self._register_view(self.app, resource, prefix, **kwargs)\n else:\n raise RuntimeError\n\n def _register_view(self, app, resource, prefix, **kwargs):\n for attr in dir(resource):\n view_func = getattr(resource, attr)\n if view_func and hasattr(view_func, 'endpoint') and hasattr(view_func, 'url') and isinstance(\n view_func.endpoint, str):\n self.endpoints.add(view_func.endpoint)\n rule = self._complete_url(view_func.url, prefix)\n app.add_url_rule(rule, view_func=view_func, endpoint=resource._name + view_func.endpoint, **kwargs)\n\n def url_for(self, resource, **values):\n \"\"\"Generates a URL to the given resource.\n Works like :func:`flask.url_for`.\"\"\"\n endpoint = resource.endpoint\n if self.blueprint:\n endpoint = '{0}.{1}'.format(self.blueprint.name, endpoint)\n return url_for(endpoint, **values)\n\n def unauthorized(self, response):\n \"\"\" Given a response, change it to ask for credentials \"\"\"\n\n if self.serve_challenge_on_401:\n realm = current_app.config.get(\"HTTP_BASIC_AUTH_REALM\", \"flask-restful\")\n challenge = u\"{0} realm=\\\"{1}\\\"\".format(\"Basic\", realm)\n\n response.headers['WWW-Authenticate'] = challenge\n return response\n","sub_path":"odpc/extends/flask_api/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"322115942","text":"import pandas as pd\nimport nltk\nimport re\nfrom collections import Counter\nfrom nltk.corpus import stopwords\n\ndf = pd.read_csv('./CSV/perfumeReviews.csv', sep='delimiter', engine=\"python\", error_bad_lines=False)\ndf.columns = ['Text']\ndf.Text = df.Text.str.replace('\\d+', '')\ndf.Text = df.Text.map(lambda x: x.lstrip('\\t').rstrip('aAbBcC'))\n\nWORD = re.compile(r'\\w+')\n\nexclude = {'~', ':', '\\\\', '|', '+', '}', ')', '>', '/', '@', '&', '<', '^', '-', '{', ';', '!', '[', '=', '_', '#', '`', '?', ',', '\"', '.', '%', '$', '(', '*', ']'}\ntagged_list = []\nCategory = []\nvals = []\ntokens = []\nreviewsWithFeat = {}\nregex = re.compile(r'[\\n\\r\\t]')\n\n\ndef extract_feature(review):\n review = review.lower()\n review = re.sub(r'(http|www)\\S+', '', review)\n review = re.sub(r'\\d+', '', review)\n review = regex.sub(\" \", review)\n review = review.split(\" \")\n review = \" \".join([word for word in review if word not in stopwords.words('english')])\n review = \"\".join(ch for ch in review if ch not in exclude)\n flist = []\n words = []\n tokens = nltk.word_tokenize(review)\n tagged = nltk.pos_tag(tokens)\n\n if len(tokens) > 1:\n for i in range(len(tagged) - 1):\n if i+2 <= len(tagged)-1:\n if (\n tagged[i][1] == 'JJ' and (tagged[i + 1][1] == 'NN' or tagged[i + 1][1] == 'NNS')\n or (tagged[i][1] == 'RB' or tagged[i][1] == 'RBR' or tagged[i][1] == 'RBS') and tagged[i + 1][1] == 'JJ' and not (tagged[i + 2][1] == 'NN' or tagged[i + 2][1] == 'NNS')\n or tagged[i][1] == 'JJ' and tagged[i + 1][1] == 'JJ' and not (tagged[i + 2][1] == 'NN' or tagged[i + 2][1] == 'NNS')\n or (tagged[i][1] == 'NN' or tagged[i][1] == 'NNS') and tagged[i + 1][1] == 'JJ' and not (tagged[i + 2][1] == 'NN' or tagged[i + 2][1] == 'NNS')\n or (tagged[i][1] == 'RB' or tagged[i][1] == 'RBR' or tagged[i][1] == 'RBS') and (tagged[i + 1][1] == 'VB' or tagged[i + 1][1] == 'VBD' or tagged[i + 1][1] == 'VBN' or tagged[i + 1][1] == 'VBG')\n ):\n flist.append(tagged[i][0] + \" \" + tagged[i + 1][0])\n elif (\n tagged[i][1] == 'JJ' and (tagged[i + 1][1] == 'NN' or tagged[i + 1][1] == 'NNS')\n or (tagged[i][1] == 'RB' or tagged[i][1] == 'RBR' or tagged[i][1] == 'RBS') and (tagged[i + 1][1] == 'VB' or tagged[i + 1][1] == 'VBD' or tagged[i + 1][1] == 'VBN' or tagged[i + 1][1] == 'VBG')\n ):\n flist.append(tagged[i][0] + \" \" + tagged[i + 1][0])\n\n return flist\n\n\ni=1\nfor review_text in df.Text:\n feature = extract_feature(review_text)\n [vals.append(word) for word in feature]\n print(i)\n i=i+1\n\ncount = Counter(vals)\nmin_support = int((len(df[\"Text\"])/100)*2)\ncategories = []\nfor category in count.most_common():\n if category[1] >= min_support:\n categories.append(category)\n else:\n break\n\n\ndef categorize(reviewText):\n featureList = extract_feature(reviewText)\n c = \"Neutral\"\n for categ in categories:\n if categ[0] in featureList:\n c = categ[0]\n return c\n\n\nStackValues = df.Text.apply(lambda x: categorize(x))\ndf['category'] = StackValues\ndf.Text = df.Text.str.replace('[\",]', '')\ndf.to_csv(\"CSV/CategorisedPerfumeReviews.csv\", index=False)\nprint(len(categories))\n","sub_path":"CategorizeReviews/new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"179801518","text":"\"\"\"\nZadanie 5 - twierdzenie Pitagorasa\na) Poproś użytkownika o podanie długości boków A, B i C i sprawdź czy w ogóle możliwe jest\nutworzenie z nich trójkąta 🙂\nb) Odpowiedz czy trójkąt jest trójkątem pitagorejskim.\nc) Szczególnym przypadkiem jest trójkąt egipski o stosunkach długości 3:4:5.\nSprawdź czy trójkąt pitagorejski jest trójkątem egipskim.\nd) Uwzględnij, że kolejność danych nie musi mieć znaczenia! \n\"\"\"\nA = int(input(\"Podaj długość boku A = \"))\nB = int(input(\"Podaj długość boku B = \"))\nC = int(input(\"Podaj długość boku C = \"))\n\nprint(\"Sprawdźmy czy możemy z tych boków zrobić trójkąt...\")\n\nif A > B:\n temp = A\n A = B\n B = temp\nif A > C:\n temp = A\n A = C\n C = temp\nif B > C:\n temp = B\n B = C\n C = temp\n'''\n A B C\njesli A jest wieksze od B to niech A zamieni sie miejscem z B\nBAC\njesli A jest wieksze dd C to niech A zamieni sie miejscami z C\nBCA\njesli B jest wieksze od C to niech B zmiani sie miejscami z C\nCBA \n '''\nprint(\"Sprawdzam trójkąt\", A, B, C)\n\"\"\"\nTrójkąt powstanie gdy a + b > c\n\"\"\"\nis_triangle = False\nif A + B > C:\n print(\"Jest możliwość utworzniea trójkąta\")\n is_triangle = True\nelse:\n print(\"Brak możliwości utworzenia trójkąta.\")\n\nif is_triangle and A**2 + B**2 == C**2:\n print(\"Powstał trójkąt pitagorejski\")\n if A/3 == B/4 == C/5:\n print(\"Ponadto szczególny przypadek - trójkąt egipski\")\nelif is_triangle:\n print(\"Utworzono trójkąt\")\n\n","sub_path":"Konstrukcje warunkowe/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"529651481","text":"WIDTH = 30\nHEIGHT = 18\nTITLE = 'MAZE'\n\n# create a sprite based on the \"player\" image\n# position at the top left corner\n# control the sprite with the arrow keys\n# the speed of the sprite enables \"graceful\" \n# movement with the keyboard\np = image('player', (0, 0)).speed(5).keys()\n\n# center the player on the 0,0 grid cell\np.move_to((0, 0))\n\n# these two nested for loops iterate over\n# every grid cell on the canvas\nfor y in range(HEIGHT):\n for x in range(WIDTH):\n \t# don't create a block on the location of\n \t# the player OR the location of the green cell\n if (x, y) == (0, 0) or (x, y) == (29, 16):\n continue\n # make a random decision to color the cell red\n # this number can be adjusted to make the cells\n # smaller or larger.\n if rand(1, 3) > 2.5:\n shape(RECT, RED, (x, y))\n\n# a callback function for when the player reaches \n# the green destination\ndef win(b, p):\n text('YOU WIN', BLUE)\n gameover()\n \n# draw a green destination cell on the bottom right\nd = shape(RECT, GREEN, (WIDTH-1, HEIGHT-1), tag='destination')\n\n# if the player reaches this cell, execute the 'win' callback\nd.collides(p, win)\n\n# register the 'r' key for resetting the game\nkeydown('r', reset)\n","sub_path":"maze-1.py","file_name":"maze-1.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"220137272","text":"import numpy as np\nimport tensorflow as tf\n\n# 创建图和会话\ngraph = tf.Graph()\nsess = tf.InteractiveSession(graph = graph)\n# 导入Inception 模型\nmodel_fn = 'tensorflow_inception_graph.pb'\nwith tf.gfile.FastGFile(model_fn, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n# 定义t_input 为输入的图像\nt_input = tf.placeholder(np.float32, name=\"input\")\nimagenet_mean = 117.0 # 为图像减去一个像素均值,再训练Inception 模型的时候,已经做了减去均值的操作,\n\n#从3个维度 [height, width, channel] 变为4个维度 [batch, height, width, channel]\n# 使用expand_dims 可以增加一维\nt_preprocessed = tf.expand_dims(t_input - imagenet_mean,0)\n# 使用import_graph_def 导入模型\ntf.import_graph_def(graph_def,{'input':t_preprocessed})\n# 找到所有卷积层\nlayers = [op.name for op in graph.get_operations() if op.type == 'Conv2D' and 'import/' in op.name]\n\nprint(layers)\nprint('Number of layers', len(layers))\n\n# 输出mixed4d_3x3_bottleneck_pre_relu的形状\nname = 'mixed4d_3x3_bottleneck_pre_relu'\nprint('shape of %s : %s' % (name, str(graph.get_tensor_by_name(\"import/\"+name + ':0').get_shape())))","sub_path":"deepdream/load_inception.py","file_name":"load_inception.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"63104288","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 13 11:00:37 2018\n\n@author: Administrator\n\"\"\"\n\n#默认情况下Python用一个字典来保存一个对象的实例属性\n#使__slots__来告诉Python不要使字典,且只给一个固定集合的属性分配空间。\n\n\n\nclass MyClass(object):\n __slots__ = ['name', 'identifier']\n def __init__(self, name, identifier):\n self.name = name\n self.identifier = identifier\n self.set_up()","sub_path":"tutorat/10_slot.py","file_name":"10_slot.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"461006671","text":"from pyspark.rdd import RDD\nfrom pyspark.sql import DataFrame\nfrom pyspark.sql import SparkSession\n\n\ndef initSpark():\n spark = SparkSession \\\n .builder \\\n .appName(\"Python Spark SQL basic example\") \\\n .config('spark.executor.memory', '32g') \\\n .config('spark.driver.memory', '32g') \\\n .getOrCreate()\n return spark\n\n\ndef dataParallelization():\n spark = initSpark()\n df = spark.read.format('csv')\\\n .option('encoding', \"UTF-8\")\\\n .option('header', 'true')\\\n .load(\"./data/hotel_bookings.csv\")\n return df\n","sub_path":"data_init.py","file_name":"data_init.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"315394806","text":"# Thomas Roux\n# 18 April 2019\n# Fibonacci sequence to 1000\n\n# Set starting variables\na = 1\nb = 1\nnum = 0\n\n# Calculate sequence\nprint(a,b, end = \" \")\nwhile True:\n num = a + b\n if num > 1000:\n break\n print(num, end = \" \")\n a = b\n b = num\n\n ","sub_path":"ex7_5.py","file_name":"ex7_5.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"12154821","text":"#--------------------Declarations of classes and containers used in the program----------------#\n\nclass Entry:\n def __init__(self, border, date, measure, value, average):\n self.border = border\n self.date = date\n self.measure = measure\n self.value = int(value)\n self.average = average\n \ndef get_border_date_measure(filename):\n master_dict = {}\n f = open(filename, 'r')\n f.readline()\n currentdate = str(-1)\n while f:\n line = f.readline()\n if not line:\n break\n cols = line.split(\",\")\n border = cols[3]\n date = cols[4].split(\" \")[0]\n if currentdate!=date:\n currmonth_bm = {}\n currentdate = date\n measure = cols[5]\n value = int(cols[6])\n bm_key = border + \" \" + measure\n key = border + \",\" + date + \",\" + measure\n if bm_key in currmonth_bm:\n master_dict[key] += value\n else:\n currmonth_bm[bm_key] = 1.0\n master_dict[key] = value\n return master_dict\n \ndef generate_output_records(master_dict):\n cumsum_measures = {}\n output = []\n months_passed = 0\n currentdate = str(-1)\n key_strings = list(master_dict.keys())\n for i in range(len(key_strings)-1, -1, -1):\n measure = key_strings[i].split(\",\")[2]\n date = key_strings[i].split(\",\")[1]\n border = key_strings[i].split(\",\")[0]\n if(currentdate == \"-1\"):\n currentdate = date\n border_measure = border + \" \" + measure\n if date!=currentdate:\n months_passed += 1\n currentdate = date\n if border_measure in cumsum_measures:\n temp = cumsum_measures[border_measure]/months_passed\n if(temp - int(temp) >= 0.5):\n avg = int(temp) + 1\n else:\n avg = int(temp)\n cumsum_measures[border_measure] += master_dict[key_strings[i]]\n else:\n cumsum_measures[border_measure] = master_dict[key_strings[i]]\n avg = 0\n output.append(Entry(border, date, measure, master_dict[key_strings[i]], avg))\n return output\n \ndef write_to_file(filename, output_records_sorted):\n fout = open(filename, 'w')\n fout.write(\"Border,Date,Measure,Value,Average\\n\")\n for i in range(len(output_records_sorted)-1, -1, -1):\n fout.write(output_records_sorted[i].border + \",\" + output_records_sorted[i].date + \" 12:00:00 AM,\" + output_records_sorted[i].measure + \",\" + str(output_records_sorted[i].value) + \",\" + str(output_records_sorted[i].average) + \"\\n\")\n","sub_path":"insight_testsuite/temp/src/auxfuncs.py","file_name":"auxfuncs.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"205363121","text":"#!/usr/bin/python3\n# coding: utf-8\n\n__author__ = 'Nairy'\n__version__ = '2.0'\n__contact__ = '__Nairy__#7181 or https://www.github.com/znairy'\n\nimport socket\nimport time\nfrom datetime import datetime\nfrom threading import Thread\nfrom termcolor import colored\nfrom sys import exit\n\nclass MunityServer(object):\n ''' client side '''\n def __init__(self, host, port):\n date = datetime.today()\n self.Address = (host, port)\n self.server = None\n self.Allusers = []\n self.privateNameUsers = []\n self.privateUsers = []\n self.waitRoom = []\n self.buffer = 1024\n self.log = open(f'{date.year}-{date.month}-{date.day}.log', 'w')\n self.commands = [\n ('/commands', '\\n /version\\n /contact\\n /clear\\n /namecolor\\n /listusers\\n /private\\n /accept\\n /decline\\n /leave'),\n ('/version', '2.0'),\n ('/contact', ' Discord: __Nairy__#7181 or https://www.github.com/zNairy'),\n ('/clear', None),\n ('/namecolor', None),\n ('/listusers', None),\n ('/private', None),\n ('/accept', None),\n ('/decline', None),\n ('/leave', None),\n ('/invites', None)\n ]\n\n def Start(self):\n self.CreateSocket()\n self.main()\n\n def CreateSocket(self):\n try:\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.server.bind(self.Address)\n self.server.listen(10)\n except Exception as err:\n print(colored(err, 'red'))\n exit(1)\n\n def NumOfInvites(self, conn):\n conn.send(str(len(self.OnWaitRoom(conn))).encode())\n\n def LeavePrivateRoom(self, conn):\n user = self.OnPrivateRoom(conn)\n if(user):\n user = user[0]\n for _user in user:\n if(_user is not conn):\n _user.send(f'\\n [*] {self.CheckNickName(_user)}, seu parceiro acabou de te deixar...\\n Voltando ao chat geral...'.encode())\n conn.send(f'\\n [*] Você deixou o chat privado com {self.CheckNickName(_user)}\\n Voltando ao chat geral...'.encode())\n self.privateNameUsers.remove(self.CheckNickName(_user))\n \n for _user in self.privateUsers:\n if(user == _user):\n self.privateUsers.remove(user)\n else:\n conn.send('\\n Vocẽ não está em uma sala privada...'.encode())\n \n def AddUser(self, conn, nick):\n self.Allusers.append((conn, nick))\n\n def OnPrivateRoom(self, conn):\n return [user for user in self.privateUsers if conn in user]\n\n def OnWaitRoom(self, conn):\n return [user for user in self.waitRoom if conn in user]\n\n def SendNotificationPrivate(self, users, conn):\n for _user in users:\n if(_user is not conn):\n _user.send(f'\\n [*] Você está no privado com {self.CheckNickName(conn)} agora!'.encode())\n conn.send(f'\\n [*] Você está no privado com {self.CheckNickName(_user)} agora!'.encode())\n\n def CheckInvite(self, conn, nickname, users):\n for invite in users:\n for _user in invite:\n if(_user is not conn and self.CheckNickName(_user) == nickname):\n return invite\n\n def AddPrivateNameUsers(self, users):\n for _user in users:\n self.privateNameUsers.append(self.CheckNickName(_user))\n\n def Accept(self, conn, nickname):\n user = self.OnPrivateRoom(conn)\n if(not user):\n user = self.OnWaitRoom(conn)\n if(user):\n _user = self.CheckInvite(conn, nickname, user)\n if(_user):\n self.AddPrivateNameUsers(_user)\n self.SendNotificationPrivate(_user, conn)\n self.privateUsers.append(_user)\n self.waitRoom.remove(_user)\n else:\n conn.send(f'\\n Você não tem invites de {nickname}'.encode())\n else:\n conn.send(f'\\n Você não tem pedidos para sala privada...'.encode())\n else:\n conn.send(f'\\n Você já está em uma conversa privada, use /leave para sair...'.encode())\n\n def Decline(self, conn, nickname):\n user = self.OnWaitRoom(conn)\n if(user):\n _user = self.CheckInvite(conn, nickname, user)\n if(_user):\n self.waitRoom.remove(_user)\n else:\n conn.send(f'\\n Você não tem invites de {nickname}'.encode())\n else:\n conn.send(f'\\n Você não tem pedidos para sala privada...'.encode())\n\n def AddOnPrivate(self, nick, conn):\n user = self.OnPrivateRoom(conn)\n if(not user):\n user = self.CheckUser(nick)\n if(user):\n user = user[0]\n if(self.CheckNickName(conn) != user[1]):\n user[0].send(f'\\n Olá {nick}, o usuário {self.CheckNickName(conn)} quer se conectar com você!'.encode())\n self.waitRoom.append((conn, user[0]))\n else:\n conn.send(f' Você não pode mandar convite a sí mesmo...'.encode())\n else:\n conn.send(f' Usuario {nick} não está online...'.encode())\n else:\n conn.send(f'\\n Você já está em uma conversa privada, use /leave para sair...'.encode())\n\n def CheckNickName(self, connection):\n return [conn[1] for conn in self.Allusers if conn[0] == connection][0]\n \n def NickAvailable(self, nickname):\n for name in self.ListOfUsers().split('\\n'):\n if(nickname.strip() == name):\n return False\n \n return True\n\n def ListOfUsers(self):\n return ''.join(f'{user[1]}\\n' for user in self.Allusers)\n\n def CheckUser(self, nick):\n return [user for user in self.Allusers if user[1] == nick]\n\n def SendCommand(self, cmd, conn):\n command = self.CommandAvailable(cmd.split(' ')[0])\n if(command):\n command = command[0]\n if(command[0] == '/listusers'):\n conn.send(f'\\n{self.ListOfUsers()}'.encode())\n elif(command[0] == '/private'):\n if(cmd[9:].strip() != \"\"):\n self.AddOnPrivate(cmd[9:].strip(), conn)\n else:\n conn.send('\\n Passe o nome do usuário como argumento: /private nome_do_usuario'.encode())\n elif(command[0] == '/accept'):\n if(cmd[8:].strip() != \"\"):\n self.Accept(conn, cmd[8:].strip())\n else:\n conn.send(f'\\n Passe o nome do usuário como argumento: /accept nome_do_usuario'.encode())\n elif(command[0] == '/decline'):\n if(cmd[9:].strip() != \"\"):\n self.Decline(conn, cmd[9:].strip())\n else:\n conn.send('\\n Passe o nome do usuário como argumento: /decline nome_do_usuario'.encode())\n elif(command[0] == '/leave'):\n self.LeavePrivateRoom(conn)\n elif(command[0] == '/invites'):\n self.NumOfInvites(conn)\n else:\n conn.send(f' {command[1]}'.encode())\n else:\n conn.send(f' Erro: comando {cmd} inválido ou inexistente...'.encode())\n\n def CommandAvailable(self, cmd):\n return [command for command in self.commands if cmd == command[0]]\n\n def RemoveConnection(self, apl):\n self.Allusers.remove([user for user in self.Allusers if user[1] == apl][0])\n \n def ListenUsers(self, conn, apl):\n while True is not False:\n user_message = conn.recv(self.buffer).decode('utf-8')\n if(not user_message):\n print(colored(f' {apl} has left...', 'red'))\n if(self.OnPrivateRoom(conn)):\n self.LeavePrivateRoom(conn)\n self.RemoveConnection(apl)\n break\n elif(user_message[0] == '/'):\n self.SendCommand(user_message, conn)\n else:\n onprivate = self.OnPrivateRoom(conn)\n if(onprivate):\n if(onprivate[0][0] != conn):\n onprivate[0][0].send(f'\\n[{apl}]: {user_message}'.encode())\n else:\n onprivate[0][1].send(f'\\n[{apl}]: {user_message}'.encode())\n else:\n self.WriteLog(user_message, apl)\n for user in self.Allusers:\n if(user[0] is not conn and user[1] not in self.privateNameUsers):\n user[0].send(f'\\n[{apl}]: {user_message}'.encode())\n\n def WriteLog(self, message, apl):\n hour = datetime.now()\n hour = hour.strftime('%H-%M-%S ')\n self.log.write(f'{hour} - [{apl}]: {message}\\n')\n \n def main(self):\n print(colored(f' Server open in: {self.Address[0]}:{self.Address[1]}', 'yellow'))\n try:\n while True:\n connection, adress = self.server.accept()\n apl = connection.recv(self.buffer);apl = apl.decode('utf-8')\n if(self.NickAvailable(apl)):\n connection.send(f' Welcome to Munity {apl}\\n Você está no canal geral!'.encode())\n self.AddUser(connection, apl)\n print(colored(f' {apl} - has joinned...', 'green'))\n thread = Thread(target=self.ListenUsers, args=(connection, apl));thread.daemon=True;thread.start()\n else:\n connection.send(f'\\n Esse nick name já está em uso, por favor reinicie e tente outro...'.encode())\n\n except KeyboardInterrupt:\n print()\n exit(0)\n\ndef main():\n chat = MunityServer('127.0.0.1', 5555)\n chat.Start()\n\nif __name__ == '__main__':\n main()","sub_path":"MunityServer.py","file_name":"MunityServer.py","file_ext":"py","file_size_in_byte":9994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"594956364","text":"# methods for different learning rules\nimport jax.numpy as jnp\n\n__all__ = [\n 'learning_stationary',\n 'learning_switching'\n]\n\ndef learning_stationary(outcomes, choices, priors):\n\n N, K = outcomes.shape\n\n select_observed = jnp.eye(K)[choices]\n\n alpha_t = priors[..., 0] + select_observed * outcomes\n beta_t = priors[..., 1] + select_observed * (1 - outcomes)\n\n return jnp.stack([alpha_t, beta_t], -1)\n\n\ndef learning_switching(outcomes, choices, priors):\n raise NotImplementedError","sub_path":"bandits/learning_algos.py","file_name":"learning_algos.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"277129877","text":"from pyspark.sql import SparkSession\nfrom pyspark.sql import functions as F\n\nspark = SparkSession.builder.appName('5th_exercise').getOrCreate()\ndf = spark.read.option(\"inferSchema\", \"true\").csv(\"yellow_tripdata_2019-06.csv\", header=True).cache()\ndff = spark.read.option(\"inferSchema\", \"true\").csv(\"taxi*\", header=True)\n\ndf1 = df.groupBy('PULocationID', 'DOLocationID').count()\ndf2 = df1.select(F.max('count'))\n\ndf2 = df2.join(df1, F.col('max(count)') == F.col('count'))\n\ndf2 = df2.select('PULocationID', 'DOLocationID')\n\ndff1 = dff.select('LocationID', F.col('Zone').alias('PULocationZone'))\ndff2 = dff.select('LocationID', F.col('Zone').alias('DOLocationZone'))\n\ndf3 = dff1.join(df2, dff1.LocationID == F.col('PULocationID'))\ndf3 = dff2.join(df3, dff2.LocationID == F.col('DOLocationID'))\n\ndf3 = df3.select(F.col('PULocationZone'), F.col('DOLocationZone'))\n\ndf3.show()","sub_path":"Taller Dataframe -Taxis/9th_point_DF.py","file_name":"9th_point_DF.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"645926183","text":"# -*- coding: utf-8 -*-\nfrom prettytable import PrettyTable\nfrom fuzzywuzzy import fuzz\nfrom progressbar import ProgressBar\nimport time\nfrom colorama import init, Fore, Back, Style\nimport uuid\nfrom os import system as cmd\nimport random\nimport pyprimes\n\ninit()\n\ndef intro():\n\n\tcmd(\"cls\")\n\n\tprint(Fore.MAGENTA)\n\tprint(\"Привет!\")\n\ttime.sleep(1)\n\n\tprint(Fore.RED)\n\tprint(\"Листая ленту вк Я удивел пост \\\"10 интересных библиотек для питона\\\"\")\n\ttime.sleep(3)\n\n\tprint(Fore.CYAN)\n\tprint(\"Ну ладно, посмотрим ЧТО из этого получится...\")\n\ttime.sleep(2)\n\n\tprint(Fore.MAGENTA)\n\tprint(\"Кстати, слово 'посмотрим' и 'получится' похожи на \" + str(fuzz.ratio('конь', 'питон')) + '%!')\n\ttime.sleep(3.5)\n\n\tprint(Fore.CYAN)\n\tprint(\"Давайте найдем простые числа от 2 то 100!\\n\")\n\ttime.sleep(1)\n\n\tprint(Fore.RESET)\n\ndef middle():\n\n\ttable = PrettyTable([\"числа\", \"простота\", \"рандомное поле\"])\n\n\n\tbar = ProgressBar(maxval=100)\n\tbar.start()\n\n\tfor i in range(2,50):\n\t\tif pyprimes.isprime(i):\n\n\t\t\ttable.add_row([i, 'простое', None])\n\t\telse:\n\t\t\ttable.add_row([i, '-', None])\n\n\t\t#update progress bar\n\t\tbar.update(i*2)\n\t\ttime.sleep(0.02)\n\n\tbar.finish()\n\n\ttime.sleep(1)\n\t#cmd(\"cls\")\n\n\tprint(\"\\nРезультат/\")\n\ttime.sleep(0.5)\n\n\tprint(Fore.RED, '\\n\\t\\tТоп 10 первых чисел\\n',Fore.RESET)\n\n\tresult = table.get_string(sortby=\"простота\", fields=[\"числа\", \"простота\"], start=0, end=10, reversesort=True).split(\"\\n\")\n\n\tfor i in result:\n\t\tif i.find(\"простое\") != -1:\n\t\t\tprint(Fore.MAGENTA, i)\n\t\telse:\n\t\t\tprint(Fore.RESET, i)\n\t\n\ttime.sleep(2.5)\n\ndef final():\n\tprint(\"\\n\\n\")\n\tprint(\"Получите в подарок уникальный ID.\")\n\tprint(\"Генерация...\")\n\ttime.sleep(1.5)\n\n\tbar = ProgressBar(maxval=10)\n\tbar.start()\n\n\tfor i in range(1,10,2):\n\t\tbar.update(i)\n\t\ttime.sleep(0.2)\n\tbar.finish()\n\n\tid_ = str(uuid.uuid4())\n\n\tprint(Fore.WHITE, id_ , Fore.RESET)\n\nintro()\nmiddle()\nfinal()\n","sub_path":"python/colorful text in cmd/wheet.py","file_name":"wheet.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"74589329","text":"import sys\nsys.stdin = open(\"10994_별 찍기 - 19.txt\", \"rt\")\n\n\ndef solution(n):\n stars = [[' '] * (4*n-3) for _ in range(4*n-3)]\n\n def fill(n, idx):\n if n == 1:\n stars[idx][idx] = '*'\n return\n for i in range(idx, idx + (4*n-3)):\n stars[idx][i] = stars[idx+(4*n-3)-1][i] = stars[i][idx] = stars[i][idx+(4*n-3)-1] = '*'\n return fill(n-1, idx+2)\n\n fill(n, 0)\n\n for row in stars:\n print(''.join(row))\n\n\nif __name__ == \"__main__\":\n input = sys.stdin.readline\n n = int(input())\n print(solution(n))\n","sub_path":"BaekJoon/분류 안 된 문제들/재귀/10994_별 찍기 - 19.py","file_name":"10994_별 찍기 - 19.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"571844768","text":"\n# Encrypt using the da Vinci method.\nwith open('input.txt') as f_in:\n text = f_in.read()\n\nwith open('output.txt', 'w') as f_out:\n f_out.write('Шифр да Винчи:')\n f_out.write(text[::-1])\n\n\n# Decrypt using the da Vinci method.\nwith open('output.txt') as f_in:\n text_one = f_in.read()\n text_one_r = text_one[::-1]\n\nwith open('record.txt', 'w') as f_out:\n f_out.write('Расшифрованный тескст:')\n f_out.write(text_one_r[:len(text)])\n f_out.write('\\n')\n\n\n# Code in the second wayю\nENG_LET_1 = ['A', 'E', 'I', 'O', 'U', 'U', 'a', 'e', 'i', 'o', 'u', 'u']\n\nENG_LET_2 = ['B', 'C', 'D', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N',\n 'P', 'Q', 'R', 'S', 'T', 'V', 'W', 'X', 'Y', 'Z',\n 'b', 'c', 'd', 'f', 'g', 'h','j', 'k', 'l', 'm', 'n',\n 'p', 'q', 'r', 's', 't', 'v', 'w', 'x', 'y', 'z', ]\n\nRUS_LET_2 = ['Б','В','Г','Д','Ж','З','Й','К','Л','М', 'Н','П', 'Р', 'С', 'Т', 'С',\n 'Ф', 'Х', 'Ц','Ч', 'Ш','Щ','Ъ','Ь', 'б', 'в', 'г' ,'д','ж','з','й','к','л','м', 'н', 'п', 'р', 'с',\n 'т', 'с', 'ф', 'х', 'ц','ч', 'ш','щ','ъ', 'ь']\n\nRUS_LET_1 = ['А', 'Е', 'Ё', 'И', 'О', 'У', 'Ы', 'Э', 'Ю', 'Я',\n 'а', 'е', 'ё', 'и', 'о', 'у', 'ы', 'э', 'ю', 'я',]\n\nwith open('input.txt') as f_in:\n text = f_in.read()\n _replaced = text\nwith open('output.txt', 'a') as f_out:\n for i in range(len(text)):\n if text[i] in ENG_LET_1:\n _replaced = text.replace((text[i]), '1')\n text = _replaced\n if text[i] in ENG_LET_2:\n _replaced = text.replace((text[i]), '2')\n text = _replaced\n if text[i] in RUS_LET_2:\n _replaced = text.replace((text[i]), '2')\n text = _replaced\n if text[i] in RUS_LET_1:\n _replaced = text.replace((text[i]), '1')\n text = _replaced\n f_out.write('Кодировка замен��й:')\n f_out.write(_replaced)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"204549069","text":"import shapely\r\nfrom shapely.geometry import Point\r\nfrom shapely.geometry import LineString\r\nfrom shapely.geometry import Polygon\r\n\r\nprint(Point(0,0).distance(Point(1,1)))#距离\r\ndonut = Point(0, 0).buffer(2.0).difference(Point(0, 0).buffer(1.0))\r\nprint(donut)\r\nprint(donut.centroid.wkt)# 质点\r\nprint(donut.representative_point().wkt) # 内点,计算量小\r\n\r\next = [(0, 0), (0, 2), (2, 2), (2, 0), (0, 0)]\r\nint = [(1, 0), (0.5, 0.5), (1, 1), (1.5, 0.5), (1, 0)][::-1]\r\npolygon = Polygon(ext, [int])\r\n\r\nprint(polygon.is_valid)\r\n\r\n\r\n\r\nfrom shapely.geometry import Point\r\nfrom shapely.strtree import STRtree\r\npolygons=[]\r\nfor i in range(0,50):\r\n for j in range(0,50):\r\n polygon=Point([i,j]).buffer(1.0)\r\n polygon.__setattr__('id',str(i)+str(j))\r\n polygons.append(polygon)\r\ntree=STRtree(polygons)\r\nresult=tree.query(Point(1,1))\r\nfor r in result:\r\n print(r.__getattribute__('id'))\r\n\r\n\r\n\r\nprint('----------------------')\r\nfrom shapely import wkt\r\nfrom shapely.ops import polygonize\r\npolygon=wkt.loads('POLYGON ((220 350, 400 440, 635 249, 380 80, 174 164, 179 265, 220 350))')\r\npolyline=wkt.loads('LINESTRING (570 400, 392 315, 299 215, 430 140, 530 240, 450 360, 460 480)')\r\npolyline2=wkt.loads('LINESTRING (560 400, 200 400)')\r\nboundary=polygon.boundary\r\npolyline=polyline.union(boundary).union(polyline2)\r\nlineList = []\r\nfor i in range(0,len(polyline.geoms)):\r\n lineList.append(polyline.geoms[i])\r\n\r\nclipPolygon=polygonize(lineList)\r\nbuffer=polygon.buffer(1)\r\nfor c in clipPolygon:\r\n if buffer.contains(c):\r\n print(c)\r\n\r\n\r\n","sub_path":"tools/geo/topo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"270853273","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n'''\n@File : data_preprocessing.py\n@Time : 2021/07/13 09:54:03\n@Author : searobbersandduck \n@Version : 1.0\n@Contact : searobbersandduck@gmail.com\n@License : (C)Copyright 2020-2021, MIT\n@Desc : None\n'''\n\n# here put the import lib\n\nimport os\nimport sys\nROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))\nsys.path.append(ROOT)\n\nfrom tqdm import tqdm\nfrom glob import glob\nimport shutil\nimport SimpleITK as sitk\nimport numpy as np\nimport shutil\n\nfrom external_lib.MedCommon.utils.data_io_utils import DataIO\nfrom external_lib.MedCommon.utils.image_postprocessing_utils import ImagePostProcessingUtils\nfrom external_lib.MedCommon.utils.mask_bounding_utils import MaskBoundingUtils\nfrom external_lib.MedCommon.utils.mask_utils import MaskUtils\nfrom external_lib.MedCommon.utils.image_show_utils import ImageShowUtils\n\nerror_list = [\n '4409402',\n '4623558',\n '4825713',\n '5007511',\n '5101568',\n '1237062',\n '1285440',\n '1305155',\n '1397046',\n '1445543',\n '1902661',\n '1935168',\n '2094368',\n '2182657',\n '2504214',\n '2602401',\n '2670896',\n '2693475',\n '2813431',\n '2835569',\n '2999343',\n '3772244',\n '3839904',\n '3869885',\n '3949254',\n '3998837',\n '4185501',\n '4303024',\n '4381668',\n '4409402',\n '4440093',\n '4465419',\n '4577418',\n '4587103',\n '4479986',\n '4455178',\n '4238407',\n '4285331',\n '4503839',\n '4597717',\n '4634411',\n '4669297',\n '4981609'\n]\n\ndef step_1_check_folder_format(root, out_root):\n '''\n root = '/data/medical/brain/gan/hospital_6_multi_classified/CTA2DWI-多中心-20201102'\n out_root = '/data/medical/brain/gan/cta2dwi_multi_classified'\n\n\n note: \n 1. 确认各组数据中是否都至少包含CTA和DWI两组数据,并统计各自数据的数量\n 2. 将数据重新copy到新的路径下,并只保留CTA和DWI两个文件夹,\n 同时有CTA1和CTA2两个文件夹时,将CTA1重命名成CTA\n\n .\n ├── CTA阴性(108例)\n │   ├── 六院-CTA阴性(69)\n │   ├── 南通大学-阴性-血管(14)\n │   └── 闵中心-阴性-血管(25)\n └── 阳性-闭塞(188例)\n ├── 六院-DWI闭塞病例(105)\n ├── 六院-阳性-血管闭塞(25)\n ├── 六院-阳性-血管闭塞(37)\n ├── 南通大学-阳性-血管闭塞(5)\n └── 闵中心-阳性-血管闭塞(16)\n\n 具体展开其中的一个文件夹:\n 闵中心-阳性-血管闭塞(16)$ tree -L 2\n .\n ├── 101878640\n │   ├── CTA\n │   └── DWI\n ├── 102512839-101477685\n │   ├── CTA\n │   └── DWI\n ├── 102661445\n │   ├── CTA\n │   └── DWI\n ├── 102869917\n │   ├── CTA\n │   └── DWI\n\n '''\n pn_roots = os.listdir(root)\n '''\n .\n ├── CTA阴性(108例)\n └── 阳性-闭塞(188例) \n '''\n\n def comprass_modalities(modalities):\n pairs = []\n if 'CTA' in modalities:\n pairs.append('CTA')\n elif 'CTA1' in modalities:\n pairs.append('CTA1')\n elif 'CTA2' in modalities:\n pairs.append('CTA2')\n \n if 'DWI' in modalities:\n pairs.append('DWI')\n elif 'DWI1' in modalities:\n pairs.append('DWI1')\n elif 'DWI2' in modalities:\n pairs.append('DWI2')\n return pairs\n\n\n for pn_root in pn_roots:\n pn_path = os.path.join(root, pn_root)\n for hospital_name in os.listdir(pn_path):\n hospital_path = os.path.join(pn_path, hospital_name)\n \n for pid in tqdm(os.listdir(hospital_path)):\n try:\n if len(pid) != 7:\n continue\n pid_path = os.path.join(hospital_path, pid)\n modalities = os.listdir(pid_path)\n pairs_modalities = []\n pairs_modalities = comprass_modalities(modalities)\n \n for m in pairs_modalities:\n src_path = os.path.join(pid_path, m)\n dst_path = src_path.replace(root, out_root)\n if dst_path.endswith('1') or dst_path.endswith('2'):\n dst_path = dst_path[:-1]\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n shutil.copytree(src_path, dst_path)\n print('copy from {} to {o}'.format(src_path, dst_path))\n\n \n if len(pairs_modalities) != 2:\n print(pid_path)\n except Exception as e:\n print('Error case:\\t', pid)\n\ndef convert_dcm_to_nii(in_root, out_root):\n '''\n tree -L 1\n .\n ├── 1124013\n ├── 1140092\n ├── 1195207\n ├── 1399063\n ├── 1424031\n ├── 1534457\n ├── 1870593\n ├── 1944927\n\n '''\n \n for pid in tqdm(os.listdir(in_root)):\n patient_path = os.path.join(in_root, pid)\n out_sub_root = os.path.join(out_root, pid)\n os.makedirs(out_sub_root, exist_ok=True)\n cta_path = os.path.join(patient_path, 'CTA')\n cta_image = DataIO.load_dicom_series(cta_path)\n out_cta_file = os.path.join(out_sub_root, 'CTA.nii.gz')\n sitk.WriteImage(cta_image['sitk_image'], out_cta_file)\n\n dwi_path = os.path.join(patient_path, 'DWI')\n dwi_image = DataIO.load_dicom_series(dwi_path)\n out_dwi_file = os.path.join(out_sub_root, 'DWI.nii.gz')\n sitk.WriteImage(dwi_image['sitk_image'], out_dwi_file)\n\ndef step_2_dcm_to_nii(in_root, out_root):\n for pn_root in os.listdir(in_root):\n pn_path = os.path.join(in_root, pn_root)\n for hospital_name in os.listdir(pn_path):\n hospital_path = os.path.join(pn_path, hospital_name)\n convert_dcm_to_nii(hospital_path, out_root)\n\n\ndef cerebral_parenchyma_segmentation_new_algo(\n data_root=None, \n out_dir = None\n ):\n import torch\n from external_lib.MedCommon.experiments.seg.brain.parenchyma.inference.inference import load_inference_opts\n from external_lib.MedCommon.segmentation.runner.train_seg import SegmentationTrainer\n opts = load_inference_opts()\n model = SegmentationTrainer.load_model(opts)\n model = torch.nn.DataParallel(model).cuda()\n model.eval()\n\n for pid in tqdm(os.listdir(data_root)):\n pid_path = os.path.join(data_root, pid)\n if not os.path.isdir(pid_path):\n print('patient path not exist!\\t{}'.format(pid_path))\n continue\n cta_file = os.path.join(pid_path, 'CTA.nii.gz')\n if not os.path.isfile(cta_file):\n print('cta file not exist!\\t{}'.format(cta_file))\n continue\n image, pred_mask = SegmentationTrainer.inference_one_case(model, cta_file, is_dcm=False)\n out_cta_dir = os.path.join(out_dir, pid, 'CTA')\n os.makedirs(out_cta_dir, exist_ok=True)\n out_cta_file = os.path.join(out_cta_dir, 'CTA.nii.gz')\n out_cta_mask_file = os.path.join(out_cta_dir, 'CTA_MASK.nii.gz')\n\n sitk.WriteImage(image, out_cta_file)\n sitk.WriteImage(pred_mask, out_cta_mask_file) \n\ndef step_3_3_segment_cerebral_parenchyma_connected_region(root_dir = '/data/medical/cardiac/cta2mbf/20201216/3.sorted_mask'):\n # root_dir = '/data/medical/cardiac/cta2mbf/20201216/3.sorted_mask'\n for pid in tqdm(os.listdir(root_dir)):\n pid_path = os.path.join(root_dir, pid)\n if not os.path.isdir(pid_path):\n continue\n cta_root = os.path.join(pid_path, 'CTA')\n \n in_cta_file = os.path.join(cta_root, 'CTA_MASK.nii.gz')\n out_cta_file = os.path.join(cta_root, 'CTA_MASK_connected.nii.gz')\n\n try:\n if os.path.isfile(in_cta_file):\n in_mask = sitk.ReadImage(in_cta_file)\n out_mask_sitk = ImagePostProcessingUtils.get_maximal_connected_region_multilabel(in_mask, mask_labels=[1])\n out_mask_sitk = MaskUtils.fill_hole(out_mask_sitk, radius=6)\n sitk.WriteImage(out_mask_sitk, out_cta_file)\n except Exception as e:\n print(e)\n print('====> Error case:\\t{}'.format(pid))\n\ndef extract_cta_cerebral_parenchyma_zlayers(\n cta_root, \n mask_root, \n out_root,\n cta_pattern = 'CTA/CTA.nii.gz', \n mask_pattern = 'CTA/DWI_BBOX_MASK.nii.gz'):\n pids = os.listdir(mask_root)\n for pid in tqdm(pids):\n cta_file = os.path.join(cta_root, pid, cta_pattern)\n mask_file = os.path.join(mask_root, pid, mask_pattern)\n in_image = sitk.ReadImage(cta_file)\n in_mask = sitk.ReadImage(mask_file)\n out_image, out_mask = MaskBoundingUtils.extract_target_area_by_mask_zboundary(in_image, in_mask)\n out_dir = os.path.join(out_root, pid)\n os.makedirs(out_dir, exist_ok=True)\n out_image_file = os.path.join(out_dir, 'CTA.nii.gz')\n sitk.WriteImage(out_image, out_image_file)\n out_mask_file = os.path.join(out_dir, 'MASK.nii.gz')\n sitk.WriteImage(out_mask, out_mask_file)\n\n\ndef extract_cta_cerebral_parenchyma(\n cta_root, \n mask_root, \n out_root, \n cta_pattern = 'CTA.nii.gz', \n mask_pattern = 'MASK.nii.gz', \n out_pattern = 'CTA_parenchyma.nii.gz'\n ):\n pids = os.listdir(mask_root)\n for pid in tqdm(pids):\n try:\n cta_file = os.path.join(cta_root, pid, cta_pattern)\n mask_file = os.path.join(mask_root, pid, mask_pattern)\n out_file = os.path.join(out_root, pid, out_pattern)\n in_image = sitk.ReadImage(cta_file)\n in_mask = sitk.ReadImage(mask_file)\n out_image = ImagePostProcessingUtils.extract_region_by_mask(in_image, in_mask, default_value=-1024, mask_label=1)\n sitk.WriteImage(out_image, out_file)\n except Exception as e:\n print('====> Error case:\\t{}'.format(pid))\n print(e)\n pass\n\n\n\n\ndef generate_dwi_bbox_mask(in_root, out_root, dwi_pattern='DWI.nii.gz', out_dwi_mask_pattern='DWI_BBOX_MASK.nii.gz'):\n for pid in tqdm(os.listdir(in_root)):\n try:\n dwi_file = os.path.join(in_root, pid, dwi_pattern)\n dwi_image = sitk.ReadImage(dwi_file)\n size = dwi_image.GetSize()\n size = size[::-1]\n bbox_mask_arr = np.ones(size, dtype=np.uint8)\n bbox_mask = sitk.GetImageFromArray(bbox_mask_arr)\n bbox_mask.CopyInformation(dwi_image)\n out_sub_dir = os.path.join(out_root, pid)\n os.makedirs(out_sub_dir, exist_ok=True)\n out_mask_file = os.path.join(out_sub_dir, out_dwi_mask_pattern)\n sitk.WriteImage(bbox_mask, out_mask_file)\n # copy dwi文件到指定路径,方便后续操作\n src_file = dwi_file\n dst_file = os.path.join(out_sub_dir, os.path.basename(src_file))\n shutil.copyfile(src_file, dst_file)\n print('hello world!')\n except Exception as e:\n print(e)\n continue\n\n\n\ndef extract_dwi_cerebral_parenchyma(\n dwi_root, \n mask_root, \n out_root, \n dwi_pattern = 'registried_dwi.nii.gz',\n mask_pattern = 'MASK.nii.gz',\n out_dwi_pattern = 'registried_dwi_parenchyma.nii.gz', \n mask_label=1\n ):\n for pid in tqdm(os.listdir(dwi_root)):\n try:\n dwi_file = os.path.join(dwi_root, pid, dwi_pattern)\n mask_file = os.path.join(mask_root, pid, mask_pattern)\n if not os.path.isfile(dwi_file):\n continue\n if not os.path.isfile(mask_file):\n continue\n dwi_image = DataIO.load_nii_image(dwi_file)['sitk_image']\n mask_image = DataIO.load_nii_image(mask_file)['sitk_image']\n extracted_dwi_image = ImagePostProcessingUtils.extract_region_by_mask(dwi_image, mask_image, default_value=-1024, mask_label=mask_label)\n \n # 将脑实质中小于0的值,设置为-1024,避免造成干扰\n tmp_arr = sitk.GetArrayFromImage(extracted_dwi_image)\n tmp_arr[tmp_arr<0] = -1024\n extracted_dwi_image = sitk.GetImageFromArray(tmp_arr)\n extracted_dwi_image.CopyInformation(dwi_image)\n\n out_sub_dir = os.path.join(out_root, pid)\n os.makedirs(out_sub_dir, exist_ok=True)\n out_dwi_file = os.path.join(out_sub_dir, out_dwi_pattern)\n\n sitk.WriteImage(extracted_dwi_image, out_dwi_file)\n except Exception as e:\n print(e)\n print('====> Error case:\\t{}'.format(pid))\n\ndef merge_cerebral_parenchyma_mask_and_dwi_bbox(\n parenchyma_mask_root, \n dwi_bbox_mask_root, \n out_root, \n parenchyma_mask_pattern='MASK.nii.gz',\n dwi_mask_pattern='registried_dwi_bbox.nii.gz',\n out_mask_pattern='final_mask.nii.gz'\n ): \n for pid in tqdm(os.listdir(dwi_bbox_mask_root)):\n try:\n parenchyma_mask_file = os.path.join(parenchyma_mask_root, pid, parenchyma_mask_pattern)\n dwi_bbox_mask_file = os.path.join(dwi_bbox_mask_root, pid, dwi_mask_pattern)\n parenchyma_mask_image = sitk.ReadImage(parenchyma_mask_file)\n dwi_bbox_mask_image = sitk.ReadImage(dwi_bbox_mask_file)\n parenchyma_mask_arr = sitk.GetArrayFromImage(parenchyma_mask_image)\n dwi_bbox_mask_arr = sitk.GetArrayFromImage(dwi_bbox_mask_image)\n merged_mask_arr = parenchyma_mask_arr * dwi_bbox_mask_arr\n merged_mask_arr = np.array(merged_mask_arr, np.uint8)\n merged_mask_image = sitk.GetImageFromArray(merged_mask_arr)\n merged_mask_image.CopyInformation(parenchyma_mask_image)\n \n out_sub_dir = os.path.join(out_root, pid)\n os.makedirs(out_sub_dir, exist_ok=True)\n out_mask_file = os.path.join(out_sub_dir, out_mask_pattern)\n\n sitk.WriteImage(merged_mask_image, out_mask_file) \n except Exception as e:\n print(e)\n print('====> Error case:\\t{}'.format(pid))\n\ndef copy_train_data(data_root, out_root, cta_pattern='fixed_cta.nii.gz'):\n '''\n 很多数据的分辨率太低,将能达到要求的数据copy到另外的文件夹\n '''\n os.makedirs(out_root, exist_ok=True)\n min_z = 10000\n max_z = 0\n for pid in tqdm(os.listdir(data_root)):\n cta_file = os.path.join(data_root, pid, cta_pattern)\n cta_image = sitk.ReadImage(cta_file)\n size = cta_image.GetSize()\n print('{}\\t{}'.format(pid, size))\n if size[2] < 100:\n continue\n if min_z > size[2]:\n min_z = size[2]\n if max_z < size[2]:\n max_z = size[2]\n src_file = os.path.join(data_root, pid)\n dst_file = os.path.join(out_root, pid)\n shutil.copytree(src_file, dst_file)\n print('min z:\\t{},\\t\\tmax z:\\t{}'.format(min_z, max_z))\n\n\n# 生成切面图像\ndef genereate_mpr_slice(data_root, out_root, \n src_pattern='fixed_cta.nii.gz', \n dst_pattern='registried_dwi.nii.gz'\n ):\n for pid in tqdm(os.listdir(data_root)):\n try:\n src_image_file = os.path.join(data_root, pid, src_pattern)\n dst_image_file = os.path.join(data_root, pid, dst_pattern)\n # out_sub_root = os.path.join(out_root, pid)\n out_sub_root = out_root\n os.makedirs(out_sub_root, exist_ok=True)\n out_src_prefix = '{}_src'.format(pid)\n out_dst_prefix = '{}_dst'.format(pid)\n src_image = sitk.ReadImage(src_image_file)\n dst_image = sitk.ReadImage(dst_image_file)\n ImageShowUtils.save_volume_to_mpr_jpg(src_image, out_sub_root, 150, 50, out_src_prefix)\n ImageShowUtils.save_volume_to_mpr_jpg(dst_image, out_sub_root, 400, 200, out_dst_prefix)\n except Exception as e:\n print('====> Error case:\\t{}'.format(pid))\n pass\n\n\ndef data_preprocessing():\n data_root = '/data/medical/brain/gan/cta2dwi_multi_classified'\n\n # step_2_dcm_to_nii(os.path.join(data_root, '0.ori'), \n # os.path.join(data_root, '3.sorted_nii'))\n\n # step 3 cerebral parenchyma segmentation\n # cerebral_parenchyma_segmentation_new_algo(\n # os.path.join(data_root, '3.sorted_nii'), \n # os.path.join(data_root, '3.sorted_mask')\n # )\n # step_3_3_segment_cerebral_parenchyma_connected_region(\n # os.path.join(data_root, '3.sorted_mask')\n # )\n\n # extract_cta_cerebral_parenchyma_zlayers(\n # os.path.join(data_root, '3.sorted_mask'), \n # os.path.join(data_root, '3.sorted_mask'), \n # os.path.join(data_root, '4.cropped_nii')\n # )\n\n # generate_dwi_bbox_mask(\n # os.path.join(data_root, '3.sorted_nii'),\n # os.path.join(data_root, '4.cropped_nii')\n # )\n\n # registration : run data_preprocessing_registration_dwi2cta.py\n\n # extract_dwi_cerebral_parenchyma(\n # os.path.join(data_root, '4.registration_batch'), \n # os.path.join(data_root, '4.cropped_nii'), \n # os.path.join(data_root, '4.registration_batch')\n # )\n\n # merge_cerebral_parenchyma_mask_and_dwi_bbox(\n # os.path.join(data_root, '4.cropped_nii'), \n # os.path.join(data_root, '4.registration_batch'), \n # os.path.join(data_root, '4.registration_batch')\n # )\n\n copy_train_data(\n os.path.join(data_root, '4.registration_batch'), \n os.path.join(data_root, '5.train_batch')\n )\n\n\ndef convert_dcm_to_nii_history(in_root, out_root):\n pids = os.listdir(in_root)\n for pid in tqdm(pids):\n try:\n out_sub_root = os.path.join(out_root, pid)\n os.makedirs(out_sub_root, exist_ok=True)\n\n patient_path = os.path.join(in_root, pid, 'NCCT')\n suid = os.listdir(patient_path)[0]\n cta_path = os.path.join(patient_path, suid)\n cta_image = DataIO.load_dicom_series(cta_path)\n out_cta_file = os.path.join(out_sub_root, 'CTA.nii.gz')\n sitk.WriteImage(cta_image['sitk_image'], out_cta_file)\n\n patient_path = os.path.join(in_root, pid, 'DWI')\n suid = os.listdir(patient_path)[0]\n dwi_path = os.path.join(patient_path, suid, 'bxxx')\n dwi_image = DataIO.load_dicom_series(dwi_path)\n out_dwi_file = os.path.join(out_sub_root, 'DWI.nii.gz')\n sitk.WriteImage(dwi_image['sitk_image'], out_dwi_file) \n except Exception as e:\n print('====> Error case:\\t', pid)\n continue \n\n\n# def data_preprocessing_batch1():\n# data_root = '/data/medical/brain/gan/cta2dwi_history_pos'\n# # data_root = '/data/medical/brain/gan/cta2dwi_history_neg'\n\n# # convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm'), \n# # os.path.join(data_root, '3.sorted_nii'))\n# # convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm_neg'), \n# # os.path.join(data_root, '3.sorted_nii'))\n\n# # '''\n# # deleted algo\n# # '''\n# # # step 3 cerebral parenchyma segmentation\n# # # cerebral_parenchyma_segmentation_new_algo(\n# # # os.path.join(data_root, '3.sorted_nii'), \n# # # os.path.join(data_root, '3.sorted_mask')\n# # # )\n# # # step_3_3_segment_cerebral_parenchyma_connected_region(\n# # # os.path.join(data_root, '3.sorted_mask')\n# # # )\n\n# # # extract_cta_cerebral_parenchyma_zlayers(\n# # # os.path.join(data_root, '3.sorted_mask'), \n# # # os.path.join(data_root, '3.sorted_mask'), \n# # # os.path.join(data_root, '4.cropped_nii')\n# # # )\n\n# # # generate_dwi_bbox_mask(\n# # # os.path.join(data_root, '3.sorted_nii'),\n# # # os.path.join(data_root, '4.cropped_nii')\n# # # )\n \n \n# # generate_dwi_bbox_mask(\n# # os.path.join(data_root, '3.sorted_nii'),\n# # os.path.join(data_root, '3.sorted_mask')\n# # )\n \n\n# # extract_cta_cerebral_parenchyma(\n# # os.path.join(data_root, '4.cropped_nii'), \n# # os.path.join(data_root, '4.cropped_nii'), \n# # os.path.join(data_root, '4.cropped_nii'), \n# # )\n\n# # registration : run data_preprocessing_registration_dwi2cta.py\n\n# # extract_dwi_cerebral_parenchyma(\n# # os.path.join(data_root, '4.registration_batch'), \n# # os.path.join(data_root, '4.cropped_nii'), \n# # os.path.join(data_root, '4.registration_batch')\n# # )\n\n\n# extract_dwi_cerebral_parenchyma(\n# os.path.join(data_root, '4.registration_batch_2d'), \n# os.path.join(data_root, '3.sorted_mask'), \n# os.path.join(data_root, '4.registration_batch_2d'), \n# mask_pattern='_brain_mask.nii.gz'\n# )\n\n# # merge_cerebral_parenchyma_mask_and_dwi_bbox(\n# # os.path.join(data_root, '4.cropped_nii'), \n# # os.path.join(data_root, '4.registration_batch'), \n# # os.path.join(data_root, '4.registration_batch')\n# # )\n\n# # copy_train_data(\n# # os.path.join(data_root, '4.registration_batch'), \n# # os.path.join(data_root, '5.train_batch')\n# # )\n\n# # genereate_mpr_slice(\n# # os.path.join(data_root, '5.train_batch'), \n# # os.path.join(data_root, '6.mpr')\n# # )\n\n\ndef data_preprocessing_batch1():\n data_root = '/data/medical/brain/gan/cta2dwi_history_pos'\n # data_root = '/data/medical/brain/gan/cta2dwi_history_neg'\n\n # step 1\n # convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm'), \n # os.path.join(data_root, '3.sorted_nii'))\n # convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm_neg'), \n # os.path.join(data_root, '3.sorted_nii'))\n \n # step 2\n # segment by 2d algorithm\n \n # step 3\n # generate_dwi_bbox_mask(\n # os.path.join(data_root, '3.sorted_nii'),\n # os.path.join(data_root, '3.sorted_mask')\n # )\n \n\n # step 4\n # registration : run data_preprocessing_registration_dwi2cta.py\n\n # step 5\n # extract_dwi_cerebral_parenchyma(\n # os.path.join(data_root, '4.registration_batch_2d'), \n # os.path.join(data_root, '3.sorted_mask'), \n # os.path.join(data_root, '4.registration_batch_2d'), \n # mask_pattern='_brain_mask.nii.gz'\n # )\n\n # step 6\n # merge_cerebral_parenchyma_mask_and_dwi_bbox(\n # os.path.join(data_root, '3.sorted_mask'), \n # os.path.join(data_root, '4.registration_batch_2d'), \n # os.path.join(data_root, '4.registration_batch_2d'), \n # parenchyma_mask_pattern='_brain_mask.nii.gz'\n # )\n\n # step 7\n # copy_train_data(\n # os.path.join(data_root, '4.registration_batch_2d'), \n # os.path.join(data_root, '5.train_batch_2d')\n # )\n\n # step 8\n # genereate_mpr_slice(\n # os.path.join(data_root, '5.train_batch_2d'), \n # os.path.join(data_root, '6.mpr_2d')\n # )\n\n # step 5\n extract_dwi_cerebral_parenchyma(\n os.path.join(data_root, '4.registration_batch_2d_parenchyma'), \n os.path.join(data_root, '3.sorted_mask'), \n os.path.join(data_root, '4.registration_batch_2d_parenchyma'), \n mask_pattern='_brain_mask.nii.gz'\n )\n\n # step 6\n merge_cerebral_parenchyma_mask_and_dwi_bbox(\n os.path.join(data_root, '3.sorted_mask'), \n os.path.join(data_root, '4.registration_batch_2d_parenchyma'), \n os.path.join(data_root, '4.registration_batch_2d_parenchyma'), \n parenchyma_mask_pattern='_brain_mask.nii.gz'\n )\n\n # step 7\n copy_train_data(\n os.path.join(data_root, '4.registration_batch_2d_parenchyma'), \n os.path.join(data_root, '5.train_batch_2d_parenchyma')\n )\n\n # step 8\n genereate_mpr_slice(\n os.path.join(data_root, '5.train_batch_2d_parenchyma'), \n os.path.join(data_root, '6.mpr_2d_parenchyma')\n )\n\n\ndef remove_error_pairs(data_root):\n for pid in tqdm(os.listdir(data_root)):\n patient_path = os.path.join(data_root, pid)\n if pid not in error_list:\n continue\n print('remove\\t', pid)\n shutil.rmtree(patient_path)\n\n\nif __name__ == '__main__':\n # step_1_check_folder_format('/data/medical/brain/gan/hospital_6_multi_classified/CTA2DWI-多中心-20201102', \n # '/data/medical/brain/gan/cta2dwi_multi_classified')\n\n # data_preprocessing()\n # data_preprocessing_batch1()\n # remove_error_pairs('/data/medical/brain/gan/cta2dwi_all_2d_parenchyma/5.train_batch_2d_parenchyma')\n remove_error_pairs('/ssd/zhangwd/cta2mbf/cta2dwi_all_2d_parenchyma/5.train_batch_2d_parenchyma')","sub_path":"data_preprocessing/data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":24992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"50370398","text":"def sort_words(string):\n words = string.split()\n \"\"\"\n append lowercase and copy using list comprehension \n \"\"\"\n words = [w.lower() + w for w in words]\n \"\"\"\n sort string using sort function\n \"\"\"\n words.sort()\n \"\"\"\n assemble the rest of list by string separated by space \n \"\"\"\n words = [w[len(w)//2:] for w in words]\n\nstring = \"banana ORANGE apple\"\nprint(sort_words(string))\n\n","sub_path":"chanllenges/sort_string.py","file_name":"sort_string.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"291361349","text":"'''\n Holds utility functions need across the different components\n'''\nimport os\n\ndef calculate_file_uid(filename):\n \"\"\"This is a basic function. Right now only returns the file size. For logs they very unlikely to \n be the same if a log has been rotated. A better function would be an md5sum\"\"\"\n uid = None\n if filename:\n try:\n uid = str(os.stat(filename).st_size)\n except OSError:\n uid = None\n return uid\n \n","sub_path":"utility_functions.py","file_name":"utility_functions.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"626731922","text":"\n\nimport os\nimport platform\nfrom collections import defaultdict\nglobal api_call\nglobal myNet\nglobal pathDict\nglobal fileNames\n\n\n##################### Empty Global variables declaration\nweight_seed_idx = 0\npreprocess_seed_idx = 0\n\n\n\n\n##################### External Data Fetch API keys\n\napi_call ={}\nmyNet = {}\nnetParams=defaultdict(lambda: defaultdict())\n\napi_call['zillow_zid'] = 'xyz'\napi_call['bing_key'] = 'xyz'\napi_call['google_streetside_key'] = 'xyz'\napi_call['google_aerial_key'] = 'xyz'\napi_call['google_meta_key'] = 'xyz'\n\n\napi_call['google_aerial_key'] = str(api_call['google_aerial_key'][2])\napi_call['google_meta_key'] = str(api_call['google_meta_key'][2])\n\n\n\n\n#################### Seed Arrays\nseed_arr = [553, 292, 394, 874, 445, 191, 161, 141, 213,436,754,991,302,992,223,645,724,944,\n 232,123,321, 909,784,239,337,888,666, 400,912,255,983,902,846,345,\n 854,989,291,486,444,101,202,304,505,607,707,808,905, 900, 774,272]\n\n#################### PREPROCESSING PARAMETERS\npp_vars = {}\npp_vars['standardise'] = True\npp_vars['rand_brightness'] = False\npp_vars['rand_contrast'] = False\npp_vars['rand_rotate'] = False\npp_vars['rand_flip'] = True\npp_vars['rand_crop'] = False\npp_vars['central_crop'] = True\n\n\n\n##################### NET PARAMETERS\nmyNet['num_labels'] = 2\nmyNet['optimizer'] = 'ADAM'\nmyNet['learning_rate'] = 0.0005\nmyNet['momentum'] = 0.9\nmyNet['learning_rate_decay_rate'] = 0.95\nmyNet['batch_norm_decay'] = 0.9\nmyNet['batch_size'] = 128\nmyNet['lr_decay_steps'] = 3000 # how many examples to see before making a decay\n# If you are learning a very complex function then setting lr_decay_steps = train_size, makes sense. But if the\n# function is not very complex and you feet that the function can be marginally learned in 1-3 steps than set it to\n# train_size/5 or somthing like that. This would ensure that the high learning rate doesnt make the optimization just\n# from minimas.\n\n##################### BATCH PARAMETERS\n\n\n\n#################### CONV parameters\nnetParams['conv1']['conv_shape'] = [3,3,3,64]\nnetParams['conv1']['conv_stride'] = 1\nnetParams['conv1']['conv_pad'] = 'SAME'\nnetParams['conv1']['pool_size'] = 2\nnetParams['conv1']['pool_stride'] = 2\nnetParams['conv1']['pool_pad'] = 'SAME'\nnetParams['conv1']['keep_prob'] = 0.5\n\nnetParams['conv2']['conv_shape'] = [3,3,64,128]\nnetParams['conv2']['conv_stride'] = 1\nnetParams['conv2']['conv_pad'] = 'SAME'\nnetParams['conv2']['pool_size'] = 2\nnetParams['conv2']['pool_stride'] = 2\nnetParams['conv2']['pool_pad'] = 'SAME'\nnetParams['conv2']['keep_prob'] = 0.5\n\nnetParams['conv3']['conv_shape'] = [3,3,128,256]\nnetParams['conv3']['conv_stride'] = 1\nnetParams['conv3']['conv_pad'] = 'SAME'\nnetParams['conv3']['pool_size'] = 2\nnetParams['conv3']['pool_stride'] = 2\nnetParams['conv3']['pool_pad'] = 'SAME'\nnetParams['conv3']['keep_prob'] = 0.5\n\nnetParams['conv4']['conv_shape'] = [3,3,256,256]\nnetParams['conv4']['conv_stride'] = 1\nnetParams['conv4']['conv_pad'] = 'SAME'\nnetParams['conv4']['pool_size'] = 2\nnetParams['conv4']['pool_stride'] = 2\nnetParams['conv4']['pool_pad'] = 'SAME'\nnetParams['conv4']['keep_prob'] = 0.5\n\nnetParams['fc1']['shape'] = [None, 1280]\nnetParams['fc1']['keep_prob'] = 0.5\nnetParams['fc2']['shape'] = [1280, 1280]\nnetParams['fc2']['keep_prob'] = 0.5\nnetParams['fc3']['shape'] = [1280, 1000]\nnetParams['fc3']['keep_prob'] = 0.8\n\nnetParams['softmax']['shape'] = [1000, 2]\n\n\n\n##################### Other important\n\n\n\n##################### IMAGE/MODEL PATH\nfileNames = {}\nfileNames['rsized_img_file'] = 'resized_image_arr.pickle'\nfileNames['batch_img_file'] = 'batch_img_arr.pickle'\n\n\n##################### IMAGE PATHS\npathDict = {}\n\nif platform.platform().split('-')[0] == 'Darwin':\n pathDict['parent_path'] = \"/Users/sam/All-Program/App-DataSet/HouseClassification/\"\nelse:\n pathDict['parent_path'] = r\"C:\\Users\\newline\\Documents\\ImageClassification\\data\"\npathDict['statistics_path'] = os.path.join(pathDict['parent_path'], \"statistics\")\npathDict['data_model_path'] = os.path.join(pathDict['parent_path'], 'data_models')\npathDict['pin_batch_row_meta_path'] = os.path.join(pathDict['statistics_path'], 'pin_batch_row_meta')\n\n##### Aerial Images from Google\npathDict['google_aerial_image_path'] = os.path.join(pathDict['parent_path'], \"input_images\", \"aerial_images\", \"google\")\npathDict['google_streetside_image_path'] = os.path.join(pathDict['parent_path'], \"input_images\", \"streetside_images\", \"google\")\npathDict['google_overlayed_image_path'] = os.path.join(pathDict['parent_path'],\"input_images\",\"overlayed_images\",\"google\")\npathDict['google_aerial_stats_path'] = os.path.join(pathDict['statistics_path'], \"aerial_images\", \"google\")\n# pathDict['aerial_rsized_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\")\npathDict['google_aerial_batch_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\", \"google\",'batch_data')\npathDict['google_overlayed_batch_path'] = os.path.join(pathDict['data_model_path'],\"overlayed_images\",\"google\",'batch_data')\npathDict['google_aerial_ckpt_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\", \"google\", 'checkpoint')\npathDict['google_overlayed_ckpt_path'] = os.path.join(pathDict['data_model_path'], \"overlayed_images\", \"google\", 'checkpoint')\npathDict['google_aerial_smry_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\", \"google\", 'summary')\npathDict['google_overlayed_smry_path'] = os.path.join(pathDict['data_model_path'], \"overlayed_images\", \"google\", 'summary')\n\n##### Aerial Images from Bing\npathDict['bing_aerial_image_path'] = os.path.join(pathDict['parent_path'], \"input_images\", \"aerial_images\", \"bing\")\npathDict['bing_streetside_image_path'] = os.path.join(pathDict['parent_path'], \"input_images\", \"streetside_images\", \"bing\")\npathDict['bing_overlayed_image_path'] = os.path.join(pathDict['parent_path'],\"input_images\",\"overlayed_images\",\"bing\")\npathDict['bing_aerial_stats_path'] = os.path.join(pathDict['statistics_path'], \"aerial_images\", \"bing\")\n# pathDict['aerial_rsized_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\")\npathDict['bing_aerial_batch_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\", \"bing\", 'batch_data')\npathDict['bing_aerial_ckpt_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\", \"bing\", 'checkpoint')\npathDict['bing_aerial_smry_path'] = os.path.join(pathDict['data_model_path'], \"aerial_images\", \"bing\", 'summary')\n\n\n\n##### Assessor Images\n\npathDict['assessor_image_path'] = os.path.join(pathDict['parent_path'], \"input_images\", \"assessor_images\")\npathDict['assessor_dl_stats_path'] = os.path.join(pathDict['statistics_path'], \"assessor_images\", 'data_loader')\npathDict['assessor_dp_stats_path'] = os.path.join(pathDict['statistics_path'], \"assessor_images\", 'data_prep')\npathDict['assessor_rsized_path'] = os.path.join(pathDict['data_model_path'], \"assessor_images\")\npathDict['assessor_batch_path'] = os.path.join(pathDict['data_model_path'], \"assessor_images\", 'batch_data')\npathDict['assessor_ckpt_path'] = os.path.join(pathDict['data_model_path'], \"assessor_images\", 'checkpoint')\npathDict['assessor_smry_path'] = os.path.join(pathDict['data_model_path'], \"assessor_images\", 'summary')\n\n\n\n##### Streetside Images\npathDict['streetside_image_path'] = os.path.join(pathDict['parent_path'], \"input_images\", \"streetside_images\")\npathDict['streetside_dl_stats_path'] = os.path.join(pathDict['statistics_path'], \"streetside_images\", 'data_loader')\npathDict['streetside_dp_stats_path'] = os.path.join(pathDict['statistics_path'], \"streetside_images\", 'data_prep')\npathDict['streetside_rsized_path'] = os.path.join(pathDict['data_model_path'], \"streetside_images\")\npathDict['streetside_batch_path'] = os.path.join(pathDict['data_model_path'], \"streetside_images\", 'batch_data')\npathDict['streetside_ckpt_path'] = os.path.join(pathDict['data_model_path'], \"streetside_images\", 'checkpoint')\npathDict['streetside_smry_path'] = os.path.join(pathDict['data_model_path'], \"streetside_images\", 'summary')\n\n\n\n##### Other Stats directory\npathDict['assessor_pred_stats'] = os.path.join(pathDict['statistics_path'], 'assessor_images')\npathDict['google_aerial_pred_stats'] = os.path.join(pathDict['statistics_path'], 'aerial_images')\npathDict['google_overlayed_pred_stats'] = os.path.join(pathDict['statistics_path'], 'overlayed_images')\n\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"49231069","text":"import json\nimport math\nimport os\nimport random\n\nfrom glob import glob\nfrom os.path import join as pj\n\ndef add_item(in_list, out_dict):\n for item in in_list:\n try:\n out_dict[os.path.basename(item).split('.')[0]].append(item)\n except:\n out_dict[os.path.basename(item).split('.')[0]] = [item]\n\ndef parse_folder_ft_det(input_path, subfolders, valid_ratio):\n \"\"\"\n Point of having this as a separate interface is that\n we can easily deal with changes of file structure.\n\n Args:\n\n\n Returns:\n res_dict: {data_key: [abs_img_path, abs_label_path], ...}\n\n \"\"\"\n if subfolders:\n folder_list = [item for item in os.listdir(input_path) if os.path.isdir(pj(input_path, item))]\n img_list = []\n anno_list = []\n for folder in folder_list:\n img_list += glob(pj(input_path, folder, 'images', '*')) # TODO: file type check\n anno_list += glob(pj(input_path, folder, 'labels', '*'))\n else:\n img_list = glob(pj(input_path, 'images', '*'))\n anno_list = glob(pj(input_path, 'labels', '*'))\n img_list = [im for im in img_list if im.endswith('.jpg')]\n anno_list = [ann for ann in anno_list if ann.endswith('.json')]\n # returns a dict where each entry is a list: [abspath_to_img, abspath_to_anno]\n res_dict = {}\n add_item(img_list, res_dict)\n add_item(anno_list, res_dict)\n for k, v in res_dict.items():\n # remove that from the dict if one file(img/ann) is missing\n if len(v) == 1:\n del res_dict[k]\n\n shuffled_list = list(res_dict.keys())\n random.shuffle(shuffled_list)\n train_size = int(math.floor(len(shuffled_list) * (1 - float(valid_ratio))))\n\n return res_dict, shuffled_list, train_size\n\ndef find_det_parent_class(in_cls, finegrained_cls):\n if finegrained_cls:\n if in_cls.startswith('pl'):\n return 'pl'\n elif in_cls.startswith('pm'):\n return 'pm'\n elif in_cls.startswith('ph'):\n return 'ph'\n else:\n return in_cls\n else:\n if in_cls.startswith('p'):\n return 'p'\n if in_cls.startswith('i'):\n return 'i'\n return in_cls\n\ndef ft_mask_conversion(width, height, anno):\n xmin = width\n ymin = height\n xmax = 0\n ymax = 0\n point_set = []\n # if anno is a dictionary\n for i in range(int(len(anno['polygon']) / 2)):\n x = anno['polygon']['x' + str(i + 1)]\n y = anno['polygon']['y' + str(i + 1)]\n if(x < 0):\n x = 0\n if(x > width):\n x = width\n if(y < 0):\n y = 0\n if (y > height):\n y = height\n xmin = min(x, xmin)\n ymin = min(y, ymin)\n xmax = max(x, xmax)\n ymax = max(y, ymax)\n point_set.append(x)\n point_set.append(y)\n\n # A simple check to detect the illegal segementation annotation\n if len(point_set) <= 4:\n print('Illegal segmentation annotation!')\n return\n\n return [xmin, ymin, xmax, ymax], point_set\n\n# PREDEFINED_CLASSES_GENERIC = ['i','p', 'wo', 'rn', 'lo', 'tl', 'ro']\nPRE_DEFINE_CATEGORIES_GENERIC = {'i': 1, 'p': 2, 'wo': 3, 'rn': 4, 'lo': 5, \n 'tl': 6, 'ro': 7}\nPREDEFINED_CLASSES = ['io', 'wo', 'ors', 'p10', 'p11', \n 'p26', 'p20', 'p23', 'p19', 'pne',\n 'rn', 'ps', 'p5', 'lo', 'tl',\n 'pg', 'sc1','sc0', 'ro', 'pn',\n 'po', 'pl', 'pm']\n\ndef merge_cls(inFile, outFile):\n with open(inFile) as f:\n json_list = json.load(f)\n out_json_list = []\n for ann in json_list:\n if ann['category_name'] in ['ors', 'sc0', 'sc1']:\n continue\n elif ann['category_name'].startswith('p'):\n ann['category_name'] = 'p'\n ann['category_id'] = PRE_DEFINE_CATEGORIES_GENERIC['p']\n elif ann['category_name'] == 'io':\n ann['category_name'] = 'i'\n ann['category_id'] = PRE_DEFINE_CATEGORIES_GENERIC['i']\n else:\n ann['category_id'] = PRE_DEFINE_CATEGORIES_GENERIC[ann['category_name']]\n out_json_list.append(ann)\n with open(outFile, 'w') as f:\n f.write(json.dumps(out_json_list))\n\nif __name__ == '__main__':\n merge_cls('/media/yingges/Data/201910/FT/FTData/ft_det_cleanedup/ignore_toosmall/11_30/og_files/generic_valid_sizethr625_fpn.json',\n '/media/yingges/Data/201910/FT/FTData/ft_det_cleanedup/ignore_toosmall/11_30/og_files/merged_generic_valid_sizethr625_fpn.json')","sub_path":"datasets/yx_toolset/python/utils/data_conversion.py","file_name":"data_conversion.py","file_ext":"py","file_size_in_byte":4553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"548874195","text":"#usr/bin/env python3\n\nimport socket\nimport sys\nimport os\nimport pickle\nimport datetime\nimport time \n\n#Note: \n#\n#\n#\n# For more info: github.com/davidcawork\n\n#Global Vars\nMSG_PROXPY_HI = '[ProxPy] Logger activated!'\nMSG_PROXPY_LOG_DATA = '[ProxPy] Log data'\nMSG_PROXPY_LOG_BYE = '[ProxPy] Bye!'\nMSG_PROXPY_LOG_REQ = '[ProxPy] Log data: Request'\nMSG_PROXPY_LOG_RPLY = '[ProxPy] Log data: Reply'\nMSG_PROXPY_BYE = '[ProxPy] Turning off ProxPy Logger ....'\nBUFFER_SIZE = 1024*5\n\n\n#To get our socket UDP, where we will hear logs from ProxPy\ndef get_our_socket(port = '8010'):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('',port))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s\n except:\n new_port = randint(8000, 9000)\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.bind(('',new_port))\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return s\n\n#To create log dir and get fd \ndef create_logs(name,current_time):\n isLogDirCreate = False\n path = os.getcwd()\n list_dir = os.listdir(path)\n LogDir = 'logs'\n for files in list_dir:\n if files == LogDir:\n try:\n isLogDirCreate = True\n log_file=open(path +'/'+LogDir+'/log_'+name+'_'+current_time.strftime('%Y-%m-%d')+'.log','a+')\n except:\n print('Error: cannot create log files: '+path +'/'+LogDir+'/log_'+\n name+'_'+current_time.strftime('%Y-%m-%d')+'.log')\n \n if not isLogDirCreate:\n os.mkdir(path+'/'+LogDir)\n try:\n log_file=open(path +'/'+LogDir+'/log_'+name+'_'+current_time.strftime('%Y-%m-%d')+'.log','a+')\n except:\n print('Error: cannot create log files: '+path +'/'+LogDir+'/log_'\n +name+'_'+current_time.strftime('%Y-%m-%d')+'.log')\n \n return log_file\n\n\n#To get ProxPy str time format\ndef get_str_time_ProxPy():\n return ('['+(datetime.datetime.now()).strftime('%H:%M:%S')+']')\n\n#To say welcome\ndef welcome():\n print(get_str_time_ProxPy() + MSG_PROXPY_HI)\n#To log incoming data\ndef logger(file_to_log, data):\n \n try:\n if data[1] == MSG_PROXPY_LOG_REQ:\n file_to_log.write(get_str_time_ProxPy() +'(REQUEST) Method: ' +data[2][0]+' | Version: '+data[2][1]\n +' | IP_server: '+data[2][3]+' | IP_client: '+data[2][4]+' | Port_client: '+str(data[2][5])+' | URL: '+data[2][2]+'\\n' )\n elif data[1] == MSG_PROXPY_LOG_RPLY:\n file_to_log.write(get_str_time_ProxPy() +'(Reply) State: ' +data[2][1]+' | Version: '+data[2][0]\n +' | IP_server: '+data[2][3]+' | IP_client: '+data[2][4]+' | Port_client: '+str(data[2][5])+' | URL: '+data[2][2]+'\\n' )\n \n except:\n file_to_log.close()\n exit(-1)\n\n\n\nif __name__ == \"__main__\":\n #Check argv's\n if len(sys.argv) != 2:\n print('Error: Usage: pyhton3 ' + sys.argv[0] + ' ')\n exit(0)\n else:\n\n #To say welcome \n welcome()\n\n\n #Just create a socket, and bind it\n our_port = int(sys.argv[1])\n name = 'ProxPy' \n s = get_our_socket(our_port)\n\n #To create log dir and get fd \n current_time = datetime.datetime.now()\n logs = create_logs(name,current_time)\n\n\n try: \n while True:\n #Wait for logs :))\n data_b,addr = s.recvfrom(BUFFER_SIZE)\n\n #Recover the list with pickle\n data = pickle.loads(data_b)\n\n if data:\n\n if data[0] == MSG_PROXPY_LOG_DATA:\n logger(logs, data)\n\n elif data[0] == MSG_PROXPY_LOG_BYE:\n #Only for SSOO releases the bind made to the port\n s.close()\n break\n else:\n break\n \n\n except KeyboardInterrupt:\n #Only for SSOO releases the bind made to the port\n s.close()\n\n print('\\n\\n'+get_str_time_ProxPy() + MSG_PROXPY_BYE)\n","sub_path":"ProxPy/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":4109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"445616895","text":"import sys\nimport math\n\nif __name__ == \"__main__\":\n n = int(sys.stdin.readline().strip())\n ps = []\n for i in range(n):\n ps.append([int(x) for x in sys.stdin.readline().strip().split(\" \")])\n\n\n def dot(v1, v2):\n return v1[0] * v2[0] + v1[1] * v2[1]\n\n def vector(p1, p2):\n return [p2[0] - p1[0], p2[1] - p1[1]]\n\n def point_distance(p1, p2):\n v = vector(p1, p2)\n return math.sqrt(dot(v, v))\n\n min_dis = 1e9 \n min_center = 0\n centers = []\n \n for i in range(0, 1 << (n - 1)):\n clusters1 = [ps[0]]\n clusters2 = [ps[-1]]\n flag = [False] * n\n for j in range(1, n - 1):\n if (i & 1 << j):\n clusters1.append(ps[j])\n flag[j] = True\n for j in range(1, n - 1):\n if not flag[j]:\n clusters2.append(ps[j])\n\n len1 = len(clusters1)\n len2 = len(clusters2)\n\n center1 = [0, 0]\n for p in clusters1:\n center1[0] += p[0] / len1\n center1[1] += p[1] / len1\n\n center2 = [0, 0]\n for p in clusters2:\n center2[0] += p[0] / len2\n center2[1] += p[1] / len2\n\n\n dis = 0\n for p in clusters1:\n dis += point_distance(p, center1)\n for p in clusters2:\n dis += point_distance(p, center2)\n\n if dis < min_dis:\n min_dis = dis\n min_center = [center1, center2]\n\n for center in min_center:\n sys.stdout.write(\"%.3f %.3f\"% (center[0], center[1]) + '\\n')\n\n","sub_path":"written_exam/gelinshentong6.py","file_name":"gelinshentong6.py","file_ext":"py","file_size_in_byte":1543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"610207442","text":"path = \"C:\\\\Users\\\\t8709339\\\\Desktop\\\\talpiot\\\\procton\\\\Board_detection_manager\\\\Soldier_Detection\\\\stds\"\nimport os\n\n\ndef frame_movie(filename):\n import cv2\n vidcap = cv2.VideoCapture(filename)\n success, image = vidcap.read()\n count = 0\n while success:\n cv2.imwrite(filename + \" \" + \"frame%d.png\" % count, image)\n success, image = vidcap.read()\n print('Read a new frame: ', success)\n count += 1\n\n\nfor filename in os.listdir(path):\n if filename.endswith(\".mp4\"):\n frame_movie(path + \"\\\\\" + filename)\n continue\n else:\n continue\n","sub_path":"Board_detection_manager/Soldier_Detection/mp4_to_png.py","file_name":"mp4_to_png.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"214680119","text":"#!/usr/bin/env python2\n# -*- coding: UTF-8 -*-\n# File: param.py\n# Author: Yuxin Wu \n\nimport tensorflow as tf\nfrom abc import abstractmethod, ABCMeta\nimport operator\n\nfrom .base import Callback\nfrom ..utils import logger\nfrom ..tfutils import get_op_var_name\n\n__all__ = ['HyperParamSetter', 'HumanHyperParamSetter',\n 'ScheduledHyperParamSetter']\n\nclass HyperParamSetter(Callback):\n \"\"\"\n Base class to set hyperparameters after every epoch.\n \"\"\"\n __metaclass__ = ABCMeta\n\n TF_VAR = 0\n OBJ_ATTR = 1\n\n def __init__(self, param, shape=[]):\n \"\"\"\n :param param: either a name of the variable in the graph, or a (object, attribute) tuple\n :param shape: shape of the param\n \"\"\"\n if isinstance(param, tuple):\n self.param_type = HyperParamSetter.OBJ_ATTR\n self.obj_attr = param\n self.readable_name = param[1]\n else:\n self.param_type = HyperParamSetter.TF_VAR\n self.readable_name, self.var_name = get_op_var_name(param)\n self.shape = shape\n self.last_value = None\n\n def _setup_graph(self):\n if self.param_type == HyperParamSetter.TF_VAR:\n all_vars = tf.all_variables()\n for v in all_vars:\n if v.name == self.var_name:\n self.var = v\n break\n else:\n raise ValueError(\"{} is not a VARIABLE in the graph!\".format(self.var_name))\n\n self.val_holder = tf.placeholder(tf.float32, shape=self.shape,\n name=self.readable_name + '_feed')\n self.assign_op = self.var.assign(self.val_holder)\n\n def get_current_value(self):\n \"\"\"\n :returns: the value to assign to the variable now.\n \"\"\"\n ret = self._get_current_value()\n if ret is not None and ret != self.last_value:\n logger.info(\"{} at epoch {} will change to {}\".format(\n self.readable_name, self.epoch_num + 1, ret))\n self.last_value = ret\n return ret\n\n @abstractmethod\n def _get_current_value(self):\n pass\n\n def _trigger_epoch(self):\n self._set_param()\n\n def _before_train(self):\n self._set_param()\n\n def _set_param(self):\n v = self.get_current_value()\n if v is not None:\n if self.param_type == HyperParamSetter.TF_VAR:\n self.assign_op.eval(feed_dict={self.val_holder:v})\n else:\n setattr(self.obj_attr[0], self.obj_attr[1], v)\n\nclass HumanHyperParamSetter(HyperParamSetter):\n \"\"\"\n Set hyperparameters manually by modifying a file.\n \"\"\"\n def __init__(self, param, file_name):\n \"\"\"\n :param file_name: a file containing the value of the variable. Each line in the file is a k:v pair\n \"\"\"\n self.file_name = file_name\n super(HumanHyperParamSetter, self).__init__(param)\n\n def _get_current_value(self):\n try:\n with open(self.file_name) as f:\n lines = f.readlines()\n lines = [s.strip().split(':') for s in lines]\n dic = {str(k):float(v) for k, v in lines}\n ret = dic[self.readable_name]\n return ret\n except:\n logger.warn(\n \"Failed to parse {} in {}\".format(\n self.readable_name, self.file_name))\n return None\n\nclass ScheduledHyperParamSetter(HyperParamSetter):\n \"\"\"\n Set hyperparameters by a predefined schedule.\n \"\"\"\n def __init__(self, param, schedule):\n \"\"\"\n :param schedule: [(epoch1, val1), (epoch2, val2), (epoch3, val3), ...]\n \"\"\"\n schedule = [(int(a), float(b)) for a, b in schedule]\n self.schedule = sorted(schedule, key=operator.itemgetter(0))\n super(ScheduledHyperParamSetter, self).__init__(param)\n\n def _get_current_value(self):\n for e, v in self.schedule:\n if e == self.epoch_num:\n return v\n return None\n\n\n\n","sub_path":"tensorpack/callbacks/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"553711662","text":"from django.urls import path\nfrom . import views\n\nurlpatterns=[\n path('',views.home,name=\"home\"),\n path('contactdatas',views.contactdatas,name=\"contactdatas\"),\n path('orderdatas',views.orderdatas,name=\"orderdatas\"),\n path('forms',views.forms,name=\"forms\"),\n path('imageforms',views.imageforms,name=\"imageforms\"),\n path('postimage',views.postimage,name=\"postimage\"),\n path('images',views.images,name='images'),\n path('slideimage',views.slideimage,name=\"slideimage\"),\n path('slideimages',views.slideimages,name=\"slideimages\"),\n path('slidepostimage',views.slidepostimage,name=\"slidepostimage\"),\n path('secondaryimages',views.secondaryimages,name=\"secondaryimages\"),\n path('primaryimages',views.primaryimages,name=\"primaryimages\"),\n\n]\n\n","sub_path":"ShopWebpage/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"423810116","text":"import random\n\nclass Lattice():\n\n\tdef __init__(self,N,r,g,beta,I0,V0,c,R0):\n\t\tself.lattice = [[' ' for i in range(N)] for j in range(N)]\n\t\tself.I0 = I0\n\t\tself.V0 = V0\n\t\tself.N = N\n\t\tself.r = r\n\t\tself.g = g\n\t\tself.beta = beta\n\t\tself.c = c\n\t\tself.R0 = R0\n\t\tself.distribute()\n\n\tdef distribute(self):\n\n\t\tif self.N == 0:\n\t\t\treturn \n\n\t\tlista = [i for i in range(self.N*self.N)]\n\t\trandom.shuffle(lista)\n\n\t\tfor i in range(self.I0):\n\t\t\tel = lista.pop()\n\t\t\tcolumn = el % self.N\n\t\t\tline = int(el / self.N)\n\t\t\tself.lattice[line][column] = 'I'\n\n\t\tfor i in range(self.V0):\n\t\t\tel = lista.pop()\n\t\t\tcolumn = el % self.N\n\t\t\tline = int(el / self.N)\n\t\t\tself.lattice[line][column] = 'V'\n\n\t\tfor el in lista:\n\t\t\tcolumn = el % self.N\n\t\t\tline = int(el / self.N)\n\t\t\tself.lattice[line][column] = 'S'\n\n\n\tdef listOfClass(self,c):\n\t\tlista = []\n\t\tfor i in range(self.N):\n\t\t\tfor j in range(self.N):\n\t\t\t\tif self.lattice[i][j] == c:\n\t\t\t\t\tlista.append((i,j))\n\n\t\treturn lista\n\n\tdef statistics(self):\n\n\t\tinfected, susceptible = 0,0\n\t\trecovered, vaccinated = 0,0\n\n\t\tfor i in range(self.N):\n\t\t\tfor j in range(self.N):\n\t\t\t\t# Vaccinated\n\t\t\t\tif self.lattice[i][j] == 'V':\n\t\t\t\t\tvaccinated += 1\n\t\t\t\t# Susceptible\n\t\t\t\telif self.lattice[i][j] == 'S':\n\t\t\t\t\tsusceptible += 1\n\t\t\t\t# Infected\n\t\t\t\telif self.lattice[i][j] == 'I':\n\t\t\t\t\tinfected += 1\n\t\t\t\t# Recovered\n\t\t\t\telif self.lattice[i][j] == 'R':\n\t\t\t\t\trecovered += 1\n\n\t\treturn (susceptible, infected, recovered, vaccinated)\n\n\tdef neighbors(self,a,b):\n\t\tlista = []\n\t\tif b != 0:\n\t\t\tlista.append((a,b-1))\n\t\tif b != self.N-1:\n\t\t\tlista.append((a,b+1))\n\t\tif a != 0:\n\t\t\tlista.append((a-1,b))\n\t\tif a != self.N-1:\n\t\t\tlista.append((a+1,b))\n\t\treturn lista\n\n\n\tdef transit(self):\n\t\tlistaI = []\n\t\tlistaR = []\n\t\tinfected = self.listOfClass('I')\n\t\tflag = False\n\t\tp = self.r\n\t\tfor el in infected:\n\t\t\tflag = True\n\t\t\tel1, el2 = el\n\t\t\tneighbors = self.neighbors(el1,el2)\n\t\t\tfor neighbor in neighbors:\n\t\t\t\tr = random.random()\n\t\t\t\tline, column = neighbor\n\t\t\t\tif self.lattice[line][column] == 'S':\n\t\t\t\t\tif r < p:\n\t\t\t\t\t\tlistaI.append((line,column))\n\n\t\tp = self.g\n\t\tfor el in infected:\n\t\t\tr = random.random()\n\t\t\tline, column = el\n\t\t\tif r < p:\n\t\t\t\tlistaR.append((line,column))\n\n\t\tfor i in listaI:\n\t\t\tline, column = i\n\t\t\tself.lattice[line][column] = 'I'\n\t\tfor r in listaR:\n\t\t\tline, column = r\n\t\t\tself.lattice[line][column] = 'R' \n\n\t\treturn flag\n\n\tdef copy(self):\n\t\tcopy_lat = Lattice(0,self.r,self.g,self.beta,self.I0,self.V0,self.c,self.R0)\n\t\tcopy_lat.N = self.N\n\t\tcopy_lat.lattice = [[self.lattice[j][i] for i in range(self.N)] for j in range(self.N)]\n\t\treturn copy_lat\n","sub_path":"lattice/lattice.py","file_name":"lattice.py","file_ext":"py","file_size_in_byte":2549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"245236712","text":"import datetime\nfrom collections import Counter\n\nimport pygame\nfrom pygame.sprite import DirtySprite\n\nimport config\nfrom utils import wrapline\n\nclass Hud(DirtySprite):\n\n def __init__(self,x, y, w, h, viewport):\n DirtySprite.__init__(self)\n\n self.viewport = viewport\n self.enterprise = None\n self.dispatcher = None\n\n self.rect = pygame.Rect(x, y, w, h)\n\n self.font = pygame.font.Font(\"assets/bitwise/bitwise.ttf\", 25)\n self.small_font = pygame.font.Font(\"assets/bitwise/bitwise.ttf\", 15)\n\n self.hovered_unit = None\n self.hovered_stock_pile = None\n\n self.dirty = 2\n\n self.alerts = []\n\n\n def draw(self, surf, force=False):\n if self.dirty == 1 or self.dirty ==2 or force:\n if self.dirty == 1: self.dirty = 0\n\n units = self.viewport.unit_layer\n\n # clear hud\n surf.fill(pygame.Color(\"#000000\"))\n\n fully_functioning_units = len(list(filter(lambda x: x.cooperation_rating >= 5, units)))\n unit_count = self.font.render(\"Functioning Units: {0}\".format(fully_functioning_units), True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_count, (10, 10))\n\n malfunctioning_units = len(list(filter(lambda x: 0 < x.cooperation_rating < 5, units)))\n unit_count = self.font.render(\"Malfunctioning Units: {0}\".format(malfunctioning_units), True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_count, (10, 35))\n\n rogue_units = len(list(filter(lambda x: x.cooperation_rating <= 0, units)))\n unit_count = self.font.render(\"Rogue Units: {0}\".format(rogue_units), True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_count, (10, 60))\n\n idle_units = len(list(filter(lambda x: not x.task, units)))\n unit_count = self.font.render(\"Idle Units: {0}\".format(rogue_units), True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_count, (300, 10))\n\n # alert rendering\n pygame.draw.line(surf, pygame.Color(\"#00AD03\"), (445, 5), (445, config.hud_size - 5))\n\n pygame.draw.line(surf, pygame.Color(\"#00AD03\"), (675, 5), (675, config.hud_size - 5))\n\n if True or len(self.alerts) > 0: #TODO remove true\n unit_count = self.font.render(\"Alerts: \", True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_count, (455, 10))\n\n y_offset = 1\n now = datetime.datetime.now()\n\n for alert in self.alerts:\n if alert[\"expiration\"] > now:\n text = wrapline(\"<\" + alert[\"message\"] + \">\", self.small_font, 500)\n for line in text:\n rendered_text = self.small_font.render(line, True, pygame.Color(\"#00AD03\"))\n surf.blit(rendered_text, (445, 20 + y_offset * 15))\n y_offset += 1\n else:\n self.alerts.remove(alert)\n\n\n # render unit info on hover\n if self.hovered_unit:\n unit_name = self.font.render(self.hovered_unit.name, True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_name, (700, 10))\n\n unit_name = self.small_font.render(\n \"Inventory: {0}/{1}\".format(\n len(self.hovered_unit.inventory),\n self.hovered_unit.inventory_size),\n True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_name, (700, 35))\n\n simple_inv = [ item.name for item in self.hovered_unit.inventory ]\n unique = set(simple_inv)\n counter = Counter(simple_inv)\n\n y_offset = 0\n\n for item in unique:\n unit_name = self.small_font.render(\"- {0}x{1}\".format(counter[item], item), True, pygame.Color(\"#00AD03\"))\n surf.blit(unit_name, (700, 50 + y_offset * 15))\n y_offset += 1\n\n\n elif self.hovered_stock_pile:\n stock_pile = self.font.render(\"Stockpile\", True, pygame.Color(\"#00AD03\"))\n surf.blit(stock_pile, (700, 10))\n\n contents = self.small_font.render(\n \"Contents: {0}/{1}\".format(\n len(self.hovered_stock_pile.items),\n self.hovered_stock_pile.capacity),\n True, pygame.Color(\"#00AD03\"))\n surf.blit(contents, (700, 35))\n\n simple_contents = [ item.name for item in self.hovered_stock_pile.items ]\n unique = set(simple_contents)\n counter = Counter(simple_contents)\n\n y_offset = 0\n\n for item in unique:\n contents = self.small_font.render(\"- {0}x{1}\".format(counter[item], item), True, pygame.Color(\"#00AD03\"))\n surf.blit(contents, (700, 50 + y_offset * 15))\n y_offset += 1\n\n else: # render stats\n\n if self.enterprise is not None:\n month_day = self.font.render(\"Month {0}, Day {1} \".format(\n self.enterprise.current_month,\n self.enterprise.current_day),\n True, pygame.Color(\"#00AD03\"))\n surf.blit(month_day, (700, 10))\n\n profit = self.font.render(\"Export Quota: ${0}/${1}\".format(\n self.enterprise.funds,\n self.enterprise.monthly_quota),\n True, pygame.Color(\"#00AD03\"))\n surf.blit(profit, (700, 35))\n\n render = self.small_font.render(\"Availiable Resources:\".format(\n self.enterprise.funds,\n self.enterprise.monthly_quota),\n True, pygame.Color(\"#00AD03\"))\n surf.blit(render, (700, 60))\n\n items = []\n for stock_pile in self.dispatcher.stock_piles:\n items.extend( [ item.name for item in stock_pile.items ] )\n\n unique = set(items)\n counter = Counter(items)\n\n y_offset = 0\n\n for item in unique:\n contents = self.small_font.render(\"- {0}x{1}\".format(counter[item], item), True, pygame.Color(\"#00AD03\"))\n surf.blit(contents, (700, 75 + y_offset * 12))\n y_offset += 1\n\n\n\n\n def add_alert(self, msg):\n if len(self.alerts) == 2: # remove alert before adding another\n to_remove = min(self.alerts, key=lambda x: x[\"expiration\"])\n self.alerts.remove(to_remove)\n\n self.alerts.append({\n \"expiration\": datetime.datetime.now() + datetime.timedelta(seconds=15),\n \"message\": msg})\n\n\n","sub_path":"hud.py","file_name":"hud.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"244401807","text":"'''\nA script to reduce DIS data in the blue\n'''\nfrom astropy.io import fits\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pdb\nimport pyds9\nfrom pyraf import iraf\n\n#1. Read in a file\ndef read(filename):\n directory = '/home/holtz/raw/apo/mar16/UT160328/'\n image = fits.open(directory+filename)[0].data\n return image\n\n\n#2. Subtract overscan\ndef biassub(im):\n return im-im[35:823,2059:2090].mean()\n\n#3. Take out spectral signature\ndef specsig(im):\n shape = im[200:900,:].sum(axis=0)\n for i in range(im.shape[0]):\n im[i,:]/=shape\n return im\n\n#4. Extract the spectrum\n#def specextract(im):\n #Can use IRAF function apall, in noao->twodspec->apextract->apall\n\n#5. Wavelength calibration\n #using IRAF 'identify' task, looked at wavecal\n #Find HeNeAr plot for 3.5 m @ http://www.apo.nmsu.edu/arc35m/Instruments/DIS/images/henearb400w4400.gif \n #Couldn't get identify to work... If I could though, I would have used the plots from the APO website to identify a few lines, let the IRAF task do the rest, and then use the output file to define a functino of angstrom/pixel.\n\n#running everything:\na = read('BrQrtz.0016b.fits') #read in a flat\nimage = read('160328.0037b.fits') #read in a science image\nb = biassub(a) #subtract the bias from the flat\ne = biassub(image) #subtract the bias from the science image\nc = specsig(b) #take spectral signature out of the flat\nfinal = image/c #divide the bias-subtracted image by the non-spectral, bias-subtracted flat\n\nd = pyds9.DS9() #opening DS9 \nd.set_np2arr(final) #putting the image into DS9 \n\n'''\nFinal images look pretty bad, I'd say since the raw data images have very low SNR. \n'''\n","sub_path":"DIS_reduc.py","file_name":"DIS_reduc.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"7305825","text":"from urllib.request import urlopen\nfrom bs4 import BeautifulSoup\n\nweb = urlopen('http://s.cafef.vn/bao-cao-tai-chinh/VNM/IncSta/2017/3/0/0/ket-qua-hoat-dong-kinh-doanh-cong-ty-co-phan-sua-viet-nam.chn')\nhtml_content = web.read().decode('utf8')\nweb.close()\n\nsoup = BeautifulSoup(html_content, 'html.parser')\ntable = soup.find('table', id= 'tableContent')\ntd_list = table.find_all('td')\nfor td in td_list:\n print(td.get_text())\n","sub_path":"Lab2/Homework/scafef.py","file_name":"scafef.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"70714738","text":"#===============================================================================\n# Imports\n#===============================================================================\n\nimport copy\nimport logging\n\nimport numpy as np\nimport scipy.stats\n\nfrom utils import ShallowCopyProxy\nfrom NestedList import NestedList, NestedListManager\n\n\n#===============================================================================\n# Classes\n#===============================================================================\n\nclass FeedForwardProcess(object):\n \n def __init__(self, network, input_struct=None):\n \n # Deep-copying keras models is very expensive. We avoid this by specifying that network \n # attribute should always be shallow copied.\n self._network = ShallowCopyProxy(network)\n \n if input_struct == None:\n input_struct = NestedList.generate_random_instance()\n self.input_struct = input_struct\n \n self.tokens = list(input_struct.get_tokens())\n \n self.output_struct = NestedListManager()\n self.execution_history = []\n \n @property\n def network(self):\n return self._network.referent\n \n def do_next_step(self):\n \n token = self.tokens[len(self.execution_history)]\n \n output_layer, action = self.network.feed_forward(token)\n \n step = FeedForwardStep(token, copy.deepcopy(self.output_struct), output_layer, action)\n self.execution_history.append(step)\n self.output_struct.do_action(action, token)\n \n def run_to_end(self):\n \n while len(self.execution_history) < len(self.tokens):\n self.do_next_step()\n \n def calc_total_score(self):\n \n set(self.input_struct.get_descendents())\n \n self.output_struct.root.get_descendents()\n \n \n def calc_score(self):\n # Wrap the input_struct in an additional NestedList because the NestedListManager for output_strcut \n # automatically initializes input_struct.root to be a NestedList and does everything inside that root.\n return self.output_struct.root.calc_score(NestedList(self.input_struct))\n \n def fork(self, step_num):\n new_instance = copy.deepcopy(self)\n new_instance.output_struct = new_instance.execution_history[step_num].struct\n new_instance.execution_history = new_instance.execution_history[:step_num]\n return new_instance\n \n def get_random_fork(self):\n \n logging.log(logging.NOTSET, \"Choosing at which step to fork\")\n entropies = [step.entropy for step in self.execution_history]\n total_entropy = sum(entropies)\n probabilities = [e/total_entropy for e in entropies]\n step_index = np.random.choice(len(self.execution_history), p=probabilities)\n step = self.execution_history[step_index]\n \n logging.log(logging.NOTSET, \"Choosing how to modify this step\")\n probabilities = step.network_output\n probabilities[step.action] = 0\n probabilities = probabilities * (1/sum(probabilities))\n \n num_possible_actions = len(step.network_output)\n \n new_action = np.random.choice(num_possible_actions, p=probabilities)\n \n new_network_output = self.network.encode_action(new_action)\n \n logging.log(logging.NOTSET, \"Forking\")\n fork = self.fork(step_index)\n \n logging.log(logging.NOTSET, \"Applying modification\")\n new_step = FeedForwardStep(step.token, copy.deepcopy(step.struct), new_network_output, new_action)\n fork.execution_history.append(new_step)\n fork.output_struct = copy.deepcopy(step.struct)\n fork.output_struct.do_action(new_action, step.token)\n \n return fork, step_index\n\n\nclass FeedForwardStep(object):\n def __init__(self, token, struct, network_output, action):\n self.token = token\n self.struct = struct\n self.network_output = network_output \n self.action = action\n self.entropy = scipy.stats.entropy(network_output) \n","sub_path":"experiment1/FeedForwardProcess.py","file_name":"FeedForwardProcess.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"323013001","text":"from fastapi import FastAPI, File, Form, UploadFile\nfrom fastapi.middleware.cors import CORSMiddleware\nimport uvicorn\nimport tensorflow as tf\n\n\nfrom core.db.utils import init_mongodb\nfrom core.impl import (\n predict as predict_impl,\n load_sample as load_sample_impl,\n load_specified_sample as load_specified_sample_impl,\n get_patients as get_patients_impl,\n get_patient as get_patient_impl,\n import_patient as import_patient_impl,\n import_ner as import_ner_impl,\n login as login_impl,\n register as register_impl,\n get_favorites as get_favorites_impl,\n add_favorite as add_favorite_impl,\n modify_favorite as modify_favorite_impl,\n delete_favorites as delete_favorites_impl,\n get_users as get_users_impl,\n update_user as update_user_impl,\n similarity as similarity_impl\n)\nfrom core.models import PredictRequest\nfrom core.utils import (\n init_grpc_client,\n init_norm_params\n)\n\nimport os\n\nvars = {}\napp = FastAPI()\n\n# configure CORS middleware if origins env var is set\n# cors_origins = '*'\n\ncors_origins = os.getenv('CORS_ORIGINS')\nif cors_origins is not None:\n app.add_middleware(\n CORSMiddleware,\n allow_origins=cors_origins.split(','),\n allow_credentials=True,\n allow_methods=['*'],\n allow_headers=['*'],\n )\n\n# 启动后端\n\n\n@app.on_event('startup')\ndef startup_event():\n vars['grpc_client'] = init_grpc_client()\n vars['norm_params'] = init_norm_params()\n vars['db_client'], vars['db'] = init_mongodb()\n\n# 关闭后端\n\n\n@app.on_event('shutdown')\ndef shutdown_event():\n if vars['db_client'] is not None:\n vars['db_client'].close()\n\n# 以下前后端数据交互的方法定义,方法实现在implements里边\n# 获取前端发送的数据并预测\n\n\n@app.post('/api/predict')\nasync def predict(payload: PredictRequest):\n return predict_impl(\n payload=payload,\n grpc_client=vars['grpc_client'],\n norm_params=vars['norm_params'],\n )\n\n# 向前端发送示例数据\n\n\n@app.get('/api/load-sample')\nasync def load_sample(target: str):\n return await load_sample_impl(\n target=target,\n db=vars['db'],\n )\n\n\n@app.post('/api/load-specified-sample')\nasync def load_specified_sample(\n target: str = Form(...),\n objectid: str = Form(...),\n):\n print(\"target:\"+target)\n print(\"objectid:\"+objectid)\n return await load_specified_sample_impl(\n target=target,\n objectid=objectid,\n db=vars['db'],\n )\n\n# 向前端发送病例数据\n\n\n@app.get('/api/patients')\nasync def get_patients():\n return await get_patients_impl(db=vars['db'])\n\n# 获取前端的病例数据\n\n\n@app.post('/api/patients')\nasync def import_patient(\n id: str = Form(...),\n name: str = Form(...),\n age: int = Form(...),\n gender: str = Form(...),\n ethnicity: str = Form(...),\n importfile: UploadFile = File(...)\n):\n print(name)\n return await import_patient_impl(\n db=vars['db'],\n id=id,\n name=name,\n age=age,\n gender=gender,\n ethnicity=ethnicity,\n weight=0,\n height=0,\n import_file=importfile,\n )\n\n# 向前端发送某一特定id的病例数据\n\n\n@app.get('/api/patients/{patient_id}')\nasync def get_patient(patient_id: str):\n return await get_patient_impl(\n patient_id=patient_id,\n db=vars['db'],\n grpc_client=vars['grpc_client'],\n norm_params=vars['norm_params'],\n )\n\n# 获取前端的实体识别文本\n\n\n@app.post('/api/ner/txt')\nasync def import_ner(\n sequence: str = Form(...),\n # file_import: bool = Form(...),\n # importfile: UploadFile = File(...)\n):\n print(sequence)\n return await import_ner_impl(\n db=vars['db'],\n sequence=sequence,\n # file_import=file_import,\n # importfile=importfile,\n )\n\n\n@app.post('/api/ner/file')\nasync def import_ner2(\n file_sequence: str = Form(...)\n # file_import: bool = Form(...),\n # importfile: UploadFile = File(...)\n):\n print(\"flagflagflagflagflagflagflagflagflagflagflagflagflag\")\n for sequence in file_sequence:\n print(sequence)\n\n # return await import_ner_impl(\n # db=vars['db'],\n # sequence=sequence,\n # file_import=file_import,\n # importfile=importfile,\n # )\n# TODO:\n\n\n@app.post('/api/login')\nasync def login(\n userName: str = Form(...),\n password: str = Form(...),\n # remember: bool = Form(...)\n):\n print(\"userName:\" + userName)\n print(\"password:\" + password)\n return await login_impl(\n db=vars['db'],\n userName=userName,\n password=password,\n # remember=remember\n )\n\n\n@app.post('/api/register')\nasync def register(\n username: str = Form(...),\n password: str = Form(...),\n email: str = Form(...),\n phone: str = Form(...),\n):\n print(\"username:\" + username)\n print(\"password:\" + password)\n print(\"email:\" + email)\n print(\"phone:\" + phone)\n return await register_impl(\n db=vars['db'],\n username=username,\n password=password,\n email=email,\n phone=phone)\n\n\n@app.get('/api/favorites/{username}')\nasync def get_favorites(username: str):\n return await get_favorites_impl(db=vars['db'], username=username)\n\n\n@app.post('/api/favorites/add')\nasync def add_favorite(\n username: str = Form(...),\n id: str = Form(...),\n fav_type: str = Form(...),\n remark: str = Form(...),\n value: str = Form(...),\n):\n # print(\"username:\" + username)\n # print(\"id:\" + id)\n # print(\"fav_type:\" + fav_type)\n # print(\"remark:\" + remark)\n # print(\"value:\" + value)\n return await add_favorite_impl(\n db=vars['db'],\n username=username,\n id=id,\n fav_type=fav_type,\n remark=remark,\n value=value\n )\n\n\n@app.post('/api/favorites/modify')\nasync def modify_favorite(\n username: str = Form(...),\n id: str = Form(...),\n fav_type: str = Form(...),\n remark: str = Form(...),\n value: str = Form(...),\n):\n # print(\"username:\" + username)\n # print(\"id:\" + id)\n # print(\"fav_type:\" + fav_type)\n # print(\"remark:\" + remark)\n # print(\"value:\" + value)\n return await modify_favorite_impl(\n db=vars['db'],\n username=username,\n id=id,\n fav_type=fav_type,\n remark=remark,\n value=value\n )\n\n\n@app.post('/api/favorites/delete')\nasync def delete_favorites(\n username: str = Form(...),\n id: str = Form(...),\n):\n print('username' + username)\n print('id' + id)\n return await delete_favorites_impl(\n db=vars['db'],\n username=username,\n id=id)\n\n\n@app.get('/api/users')\nasync def get_users():\n return await get_users_impl(db=vars['db'])\n\n\n@app.post('/api/users/update')\nasync def update_user(\n username: str = Form(...),\n modified_level: str = Form(...),\n):\n return await update_user_impl(\n db=vars['db'], \n username=username, \n modified_level=modified_level)\n\n\n@app.get('/api/similarity/{current_id}')\nasync def similarity(current_id: str):\n print('similarity_begin')\n return await similarity_impl(\n db=vars['db'],\n current_id=current_id,\n )\n","sub_path":"app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"463288541","text":"def isleap(y): return (y%400==0) or (y%4==0 and y%100!=0)\ndef isvaliddate(d,m,y,l,ml):\n if m<1 or m>12: return False \n if d>ml[m-1]:\n if not (d==29 and l and m==2):\n return False\n if y>2014 or y<1812:return False\n return True\nday,month,year=[1,1,1812]\nmonth_len=[31,28,31,30,31,30,31,31,30,31,30,31]\nleap=True\nwhile True:\n try: day,month,year=map(int,input(\"Enter date as dd-mm-yyyy: \").split('-'))\n except:\n print(\"Invalid input!\")\n continue\n leap=isleap(year)\n if isvaliddate(day,month,year,leap,month_len): break\n print(\"Invalid Date!\")\nif day{point.name}: {point.percentage:.3f} %'})\n\nchart.save_file('religion_dist')\n\t\t\t\n","sub_path":"data_visualization/religion_normal.py","file_name":"religion_normal.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"413356020","text":"from typing import Optional\n\nfrom alice.onboarding.models.report.checks.check import Check\n\n\nclass FaceInFrontSideDocumentCheck(Check):\n def __init__(self, value: Optional[float] = None):\n super().__init__(\n key=\"face_in_front_side\",\n detail=\"The doc contains a face in its front side\",\n value=value,\n )\n","sub_path":"alice/onboarding/models/report/checks/document/face_in_document_check.py","file_name":"face_in_document_check.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"110866870","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# \n# # Ideas for Generating Image Features and Measuring Image Quality\n# \n#
\n# \n# ![](https://i.imgur.com/84TEdoa.png)\n# \n#
\n# \n# [Avito](https://www.kaggle.com/c/avito-demand-prediction) is Russia's largest Advertisment firm. The quality of the advertisement image significantly affects the demand volume on an item. For both advertisers and Avito, it is important to use authentic high quality images. In this kernel, I have implemented some ideas which can be used to create new features related to images. These features are an indicatory factors about the Image Quality. Following is the list of feature ideas: \n# \n# \n# ### 1. Dullness : Is the Image Very Dull ? \n# \n# 1.1 Image Dullness Score\n# \n# ### 2. Whiteness : Is the Image Very White ? \n# 2.1 Image Whiteness Score \n# \n# ### 3. Uniformity : Is the Image too Uniform ?\n# 3.1 Average Pixel Width\n# \n# ### 4. Colors : What are the top colors used in the Image ? \n# 4.1 Dominant Color of the Image \n# 4.2 Average Color of the Image\n# \n# ### 5. Dimensions : Is the Image too Large or too Small ? \n# 5.1 Width of the Image \n# 5.2 Height of the Image \n# 5.3 Size of the Image \n# \n# ### 6. Blurrness : Is the Image Too Blurry ? \n# 6.1 Width of the Image \n# \n#
\n# \n\n# In[1]:\n\n\nfrom collections import defaultdict\nfrom scipy.stats import itemfreq\nfrom scipy import ndimage as ndi\nimport matplotlib.pyplot as plt\nfrom skimage import feature\nfrom PIL import Image as IMG\nimport numpy as np\nimport pandas as pd \nimport operator\nimport cv2\nimport os \n\nfrom IPython.core.display import HTML \nfrom IPython.display import Image\n\nimages_path = '../input/sampleavitoimages/sample_avito_images/'\nimgs = os.listdir(images_path)\n\nfeatures = pd.DataFrame()\nfeatures['image'] = imgs\n\n\n# ## 1. Is the image Very Dull \n# \n# ### Feature 1 : Dullness\n# \n# Dull Images may not be good for the advirtisment purposes. The analysis of prominent colors present in the images can indicate a lot about if the image is dull or not. In the following cell, I have added a code to measure the dullness score of the image which can be used as one of the feature in the model. \n# \n# \n\n# In[2]:\n\n\ndef color_analysis(img):\n # obtain the color palatte of the image \n palatte = defaultdict(int)\n for pixel in img.getdata():\n palatte[pixel] += 1\n \n # sort the colors present in the image \n sorted_x = sorted(palatte.items(), key=operator.itemgetter(1), reverse = True)\n light_shade, dark_shade, shade_count, pixel_limit = 0, 0, 0, 25\n for i, x in enumerate(sorted_x[:pixel_limit]):\n if all(xx <= 20 for xx in x[0][:3]): ## dull : too much darkness \n dark_shade += x[1]\n if all(xx >= 240 for xx in x[0][:3]): ## bright : too much whiteness \n light_shade += x[1]\n shade_count += x[1]\n \n light_percent = round((float(light_shade)/shade_count)*100, 2)\n dark_percent = round((float(dark_shade)/shade_count)*100, 2)\n return light_percent, dark_percent\n\n\n# Lets compute the dull score for the sample images from Avito's dataset \n\n# In[3]:\n\n\ndef perform_color_analysis(img, flag):\n path = images_path + img \n im = IMG.open(path) #.convert(\"RGB\")\n \n # cut the images into two halves as complete average may give bias results\n size = im.size\n halves = (size[0]/2, size[1]/2)\n im1 = im.crop((0, 0, size[0], halves[1]))\n im2 = im.crop((0, halves[1], size[0], size[1]))\n\n try:\n light_percent1, dark_percent1 = color_analysis(im1)\n light_percent2, dark_percent2 = color_analysis(im2)\n except Exception as e:\n return None\n\n light_percent = (light_percent1 + light_percent2)/2 \n dark_percent = (dark_percent1 + dark_percent2)/2 \n \n if flag == 'black':\n return dark_percent\n elif flag == 'white':\n return light_percent\n else:\n return None\n\n\n# In[4]:\n\n\nfeatures['dullness'] = features['image'].apply(lambda x : perform_color_analysis(x, 'black'))\ntopdull = features.sort_values('dullness', ascending = False)\ntopdull.head(5)\n\n\n# Lets plot some of the images with very high dullness\n\n# In[5]:\n\n\nfor j,x in topdull.head(2).iterrows():\n path = images_path + x['image']\n html = \"

Image : \"+x['image']+\"     (Dullness : \" + str(x['dullness']) +\")

\"\n display(HTML(html))\n display(IMG.open(path).resize((300,300), IMG.ANTIALIAS))\n\n\n# ## 2. Is the Image too bright or white \n# \n# ### Feature 2 : Image Whiteness\n# \n# Some images can be too white or too bright which might not be good for the advertisement purposes. Using the samy type of color analysis, we can check if the images are too white. \n\n# In[6]:\n\n\nfeatures['whiteness'] = features['image'].apply(lambda x : perform_color_analysis(x, 'white'))\ntopdull = features.sort_values('whiteness', ascending = False)\ntopdull.head(5)\n\n\n# Lets plot some of the images having high whiteness score\n\n# In[7]:\n\n\nfor j,x in topdull.head(2).iterrows():\n path = images_path + x['image']\n html = \"

Image : \"+x['image']+\"     (Whiteness : \" + str(x['whiteness']) +\")

\"\n display(HTML(html))\n display(IMG.open(path).resize((300,300), IMG.ANTIALIAS))\n\n\n# ## 3. Uniform Images (with no pixel variations)\n# \n# ### Feature 3 - Average Pixel Width (using edge detection)\n# \n# Some images may contain no pixel variation and are entirely uniform. Average Pixel Width is a measure which indicates the amount of edges present in the image. If this number comes out to be very low, then the image is most likely a uniform image and may not represent right content. \n# \n# To compute this measure, I am using skimage's Canny Detection\n\n# In[8]:\n\n\nim1 = IMG.open(images_path+'59.png')\nim2 = im1.convert(mode='L')\nim = np.asarray(im2)\n\nedges1 = feature.canny(im, sigma=1)\nedges2 = feature.canny(im, sigma=3)\n\n# display results\nfig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True)\n\nax1.imshow(im, cmap=plt.cm.gray)\nax1.axis('off')\nax1.set_title('noisy image', fontsize=20)\n\nax2.imshow(edges1, cmap=plt.cm.gray)\nax2.axis('off')\nax2.set_title('Canny filter, $\\sigma=1$', fontsize=20)\n\nax3.imshow(edges2, cmap=plt.cm.gray)\nax3.axis('off')\nax3.set_title('Canny filter, $\\sigma=3$', fontsize=20)\n\nfig.tight_layout()\n\nplt.show()\n\n\n# In[9]:\n\n\ndef average_pixel_width(img):\n path = images_path + img \n im = IMG.open(path) \n im_array = np.asarray(im.convert(mode='L'))\n edges_sigma1 = feature.canny(im_array, sigma=3)\n apw = (float(np.sum(edges_sigma1)) / (im.size[0]*im.size[1]))\n return apw*100\n\n\n# In[10]:\n\n\nfeatures['average_pixel_width'] = features['image'].apply(average_pixel_width)\ntempdf = features.sort_values('average_pixel_width').head()\ntempdf \n\n\n# Lets plot some images having very low average pixel width\n\n# In[11]:\n\n\nfor j,x in tempdf.head(6).iterrows():\n path = images_path + x['image']\n html = \"

Image : \"+x['image']+\"     (Average Pixel Width : \" + str(x['average_pixel_width']) +\")

\"\n display(HTML(html))\n display(IMG.open(path).resize((300,300), IMG.ANTIALIAS))\n\n\n# Above images are most likely nosie and have low average pixel width values.\n# \n# ## 4. What are the key colors used in the image ?\n# \n# Colors used in the images play a significant role in garnering the attraction from users. Additional features related to colors such as Dominant and Average colors can be created. \n# \n# ### Feature 4.1 - Dominant Color\n\n# In[12]:\n\n\ndef get_dominant_color(img):\n path = images_path + img \n img = cv2.imread(path)\n arr = np.float32(img)\n pixels = arr.reshape((-1, 3))\n\n n_colors = 5\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 200, .1)\n flags = cv2.KMEANS_RANDOM_CENTERS\n _, labels, centroids = cv2.kmeans(pixels, n_colors, None, criteria, 10, flags)\n\n palette = np.uint8(centroids)\n quantized = palette[labels.flatten()]\n quantized = quantized.reshape(img.shape)\n\n dominant_color = palette[np.argmax(itemfreq(labels)[:, -1])]\n return dominant_color\n\nfeatures['dominant_color'] = features['image'].apply(get_dominant_color)\nfeatures.head(10)\n\n\n# Lets split the dominant color's RGB values to separate features \n# \n# - Feature 4.1.1 dominant_red value\n# - Feature 4.1.2 dominant_green value\n# - Feature 4.1.3 dominant_blue value\n\n# In[13]:\n\n\nfeatures['dominant_red'] = features['dominant_color'].apply(lambda x: x[0]) / 255\nfeatures['dominant_green'] = features['dominant_color'].apply(lambda x: x[1]) / 255\nfeatures['dominant_blue'] = features['dominant_color'].apply(lambda x: x[2]) / 255\nfeatures[['dominant_red', 'dominant_green', 'dominant_blue']].head(5)\n\n\n# ### Feature 4.2 Average Color\n\n# In[14]:\n\n\ndef get_average_color(img):\n path = images_path + img \n img = cv2.imread(path)\n average_color = [img[:, :, i].mean() for i in range(img.shape[-1])]\n return average_color\n\nfeatures['average_color'] = features['image'].apply(get_average_color)\nfeatures.head(10)\n\n\n# In[15]:\n\n\nfeatures['average_red'] = features['average_color'].apply(lambda x: x[0]) / 255\nfeatures['average_green'] = features['average_color'].apply(lambda x: x[1]) / 255\nfeatures['average_blue'] = features['average_color'].apply(lambda x: x[2]) / 255\nfeatures[['average_red', 'average_green', 'average_blue']].head(5)\n\n\n# ## 5. Dimensions of the Image \n# \n# Too Big Images or Too Small Images might not be very good for generating good attraction. Users may skip viewing a very large or very small sized image. Hence for advertisers it is important to set precise dimensions and size of the image. Hence we can create additional features. \n# \n# - Image width\n# - Image height\n# - Image size\n\n# In[16]:\n\n\ndef getSize(filename):\n filename = images_path + filename\n st = os.stat(filename)\n return st.st_size\n\ndef getDimensions(filename):\n filename = images_path + filename\n img_size = IMG.open(filename).size\n return img_size \n\n\n# In[17]:\n\n\nfeatures['image_size'] = features['image'].apply(getSize)\nfeatures['temp_size'] = features['image'].apply(getDimensions)\nfeatures['width'] = features['temp_size'].apply(lambda x : x[0])\nfeatures['height'] = features['temp_size'].apply(lambda x : x[1])\nfeatures = features.drop(['temp_size', 'average_color', 'dominant_color'], axis=1)\nfeatures.head()\n\n\n# ## 6. Is the image too Blurry \n# \n# ### Feature 6 - Image Blurrness\n# \n# To measure the image blurrness, I refered to the following paper: \"Diatom Autofocusing in Brightfield Microscopy: A Comparative Study\". \n# \n# In this paper the author Pech-Pacheco et al. has provided variance of the Laplacian Filter which can be used to measure if the image blurryness score.\n# \n# In this technique, the single channel of an image is convolved with the the laplacian filter. If the specified value is less than a threshold value, then image is blurry otherwise not. \n# \n# ![](https://www.pyimagesearch.com/wp-content/uploads/2015/09/detecting_blur_laplacian.png)\n# \n# - Paper Link : http://optica.csic.es/papers/icpr2k.pdf \n# - Reference : https://www.pyimagesearch.com/2015/09/07/blur-detection-with-opencv/\n# \n# \n# \n# \n# \n\n# In[18]:\n\n\ndef get_blurrness_score(image):\n path = images_path + image \n image = cv2.imread(path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n fm = cv2.Laplacian(image, cv2.CV_64F).var()\n return fm\n\n\n# In[19]:\n\n\nfeatures['blurrness'] = features['image'].apply(get_blurrness_score)\nfeatures[['image','blurrness']].head(5)\n\n\n# In[20]:\n\n\ntempdf = features.sort_values('blurrness')\nfor y,x in tempdf.head(5).iterrows():\n path = images_path + x['image']\n html = \"

Image : \"+x['image']+\"     (Blurrness : \" + str(x['blurrness']) +\")

\"\n display(HTML(html))\n display(IMG.open(path).resize((300,300), IMG.ANTIALIAS))\n\n\n# ### Other Ideas about features from Image\n# \n# - No of objects detected \n# - Total Number of Color Present \n# - No. of shapes detected \n# - Amount of Text Present in the image \n# \n# Other great kernels on Image Feature Extraction:\n# \n# 1. https://www.kaggle.com/wesamelshamy/ad-image-recognition-and-quality-scoring by wesamelshamy \n# 2. https://www.kaggle.com/peterhurford/image-feature-engineering by peterhurford \n","sub_path":"NLP/16/ideas-for-image-features-and-image-quality.py","file_name":"ideas-for-image-features-and-image-quality.py","file_ext":"py","file_size_in_byte":12244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"567333276","text":"from ...base import *\n\n\nclass GeomSelectionBase(BaseObject):\n\n def __init__(self):\n\n self._subobj_sel_state = subobj_sel_state = {}\n\n for subobj_type in (\"vert\", \"edge\", \"poly\"):\n subobj_sel_state[subobj_type] = {\"selected\": [], \"unselected\": []}\n\n self._selected_subobj_ids = {\"vert\": [], \"edge\": [], \"poly\": []}\n\n def set_selected(self, subobj, is_selected=True, update_verts_to_transf=True):\n\n subobj_type = subobj.get_type()\n subobj_id = subobj.get_id()\n sel_state = self._subobj_sel_state[subobj_type][\"selected\"]\n unsel_state = self._subobj_sel_state[subobj_type][\"unselected\"]\n selected_subobj_ids = self._selected_subobj_ids[subobj_type]\n geom_selected = self._geoms[subobj_type][\"selected\"]\n geom_unselected = self._geoms[subobj_type][\"unselected\"]\n\n if is_selected:\n\n if subobj_id in selected_subobj_ids:\n return False\n\n if subobj_type == \"vert\":\n\n merged_vert = self._merged_verts[subobj_id]\n selected_subobj_ids.extend(merged_vert[:])\n row_indices = merged_vert.get_row_indices()\n\n prim = geom_unselected.node().modify_geom(0).modify_primitive(0)\n array = prim.modify_vertices()\n stride = array.get_array_format().get_stride()\n handle = array.modify_handle()\n data_rows = dict((unsel_state.index(i), i)\n for i in row_indices)\n data = \"\"\n\n for start in sorted(data_rows.iterkeys(), reverse=True):\n row_index = data_rows[start]\n unsel_state.remove(row_index)\n sel_state.append(row_index)\n data += handle.get_subdata(start * stride, stride)\n handle.set_subdata(start * stride, stride, \"\")\n\n elif subobj_type == \"edge\":\n\n merged_edge = self._merged_edges[subobj_id]\n selected_subobj_ids.extend(merged_edge[:])\n row_indices = merged_edge.get_start_row_indices()\n\n prim = geom_unselected.node().modify_geom(0).modify_primitive(0)\n array = prim.modify_vertices()\n stride = array.get_array_format().get_stride()\n handle = array.modify_handle()\n data_rows = dict((unsel_state.index(i) * 2, i)\n for i in row_indices)\n data = \"\"\n\n for start in sorted(data_rows.iterkeys(), reverse=True):\n row_index = data_rows[start]\n unsel_state.remove(row_index)\n sel_state.append(row_index)\n data += handle.get_subdata(start * stride, stride * 2)\n handle.set_subdata(start * stride, stride * 2, \"\")\n\n elif subobj_type == \"poly\":\n\n selected_subobj_ids.append(subobj_id)\n\n start = unsel_state.index(subobj[0]) * 3\n size = len(subobj)\n\n for vert_ids in subobj:\n unsel_state.remove(vert_ids)\n\n sel_state.extend(subobj[:])\n prim = geom_unselected.node().modify_geom(0).modify_primitive(0)\n array = prim.modify_vertices()\n stride = array.get_array_format().get_stride()\n handle = array.modify_handle()\n data = handle.get_subdata(start * stride, size * stride)\n handle.set_subdata(start * stride, size * stride, \"\")\n\n prim = geom_selected.node().modify_geom(0).modify_primitive(0)\n handle = prim.modify_vertices().modify_handle()\n handle.set_data(handle.get_data() + data)\n\n else:\n\n if subobj_id not in selected_subobj_ids:\n return False\n\n if subobj_type == \"vert\":\n\n merged_vert = self._merged_verts[subobj_id]\n for v_id in merged_vert:\n selected_subobj_ids.remove(v_id)\n row_indices = merged_vert.get_row_indices()\n\n prim = geom_selected.node().modify_geom(0).modify_primitive(0)\n array = prim.modify_vertices()\n stride = array.get_array_format().get_stride()\n handle = array.modify_handle()\n data_rows = dict((sel_state.index(i), i) for i in row_indices)\n data = \"\"\n\n for start in sorted(data_rows.iterkeys(), reverse=True):\n row_index = data_rows[start]\n unsel_state.append(row_index)\n sel_state.remove(row_index)\n data += handle.get_subdata(start * stride, stride)\n handle.set_subdata(start * stride, stride, \"\")\n\n elif subobj_type == \"edge\":\n\n merged_edge = self._merged_edges[subobj_id]\n for e_id in merged_edge:\n selected_subobj_ids.remove(e_id)\n row_indices = merged_edge.get_start_row_indices()\n\n prim = geom_selected.node().modify_geom(0).modify_primitive(0)\n array = prim.modify_vertices()\n stride = array.get_array_format().get_stride()\n handle = array.modify_handle()\n data_rows = dict((sel_state.index(i) * 2, i)\n for i in row_indices)\n data = \"\"\n\n for start in sorted(data_rows.iterkeys(), reverse=True):\n row_index = data_rows[start]\n unsel_state.append(row_index)\n sel_state.remove(row_index)\n data += handle.get_subdata(start * stride, stride * 2)\n handle.set_subdata(start * stride, stride * 2, \"\")\n\n elif subobj_type == \"poly\":\n\n selected_subobj_ids.remove(subobj_id)\n\n start = sel_state.index(subobj[0]) * 3\n size = len(subobj)\n\n for vert_ids in subobj:\n sel_state.remove(vert_ids)\n\n unsel_state.extend(subobj[:])\n prim = geom_selected.node().modify_geom(0).modify_primitive(0)\n array = prim.modify_vertices()\n stride = array.get_array_format().get_stride()\n handle = array.modify_handle()\n data = handle.get_subdata(start * stride, size * stride)\n handle.set_subdata(start * stride, size * stride, \"\")\n\n prim = geom_unselected.node().modify_geom(0).modify_primitive(0)\n handle = prim.modify_vertices().modify_handle()\n handle.set_data(handle.get_data() + data)\n\n if update_verts_to_transf:\n self._update_verts_to_transform(subobj_type)\n\n return True\n\n def is_selected(self, subobj):\n\n subobj_type = subobj.get_type()\n subobj_id = subobj.get_id()\n selected_subobj_ids = self._selected_subobj_ids[subobj_type]\n\n return subobj_id in selected_subobj_ids\n\n def get_selection(self, subobj_lvl):\n\n subobjs = self._subobjs[subobj_lvl]\n selected_subobj_ids = self._selected_subobj_ids[subobj_lvl]\n\n if subobj_lvl == \"vert\":\n\n merged_verts = self._merged_verts\n verts = set(merged_verts[vert_id]\n for vert_id in selected_subobj_ids)\n selection = list(verts)\n\n elif subobj_lvl == \"edge\":\n\n merged_edges = self._merged_edges\n edges = set(merged_edges[edge_id]\n for edge_id in selected_subobj_ids)\n selection = list(edges)\n\n elif subobj_lvl == \"poly\":\n\n selection = [subobjs[i] for i in selected_subobj_ids]\n\n return selection\n\n def clear_selection(self, subobj_lvl, update_verts_to_transf=True):\n\n sel_state = self._subobj_sel_state[subobj_lvl]\n sel_state[\"unselected\"].extend(sel_state[\"selected\"])\n sel_state[\"selected\"] = []\n self._selected_subobj_ids[subobj_lvl] = []\n geom_selected = self._geoms[subobj_lvl][\"selected\"]\n geom_unselected = self._geoms[subobj_lvl][\"unselected\"]\n handle = geom_selected.node().modify_geom(\n 0).modify_primitive(0).modify_vertices().modify_handle()\n data = handle.get_data()\n handle.set_data(\"\")\n handle = geom_unselected.node().modify_geom(\n 0).modify_primitive(0).modify_vertices().modify_handle()\n handle.set_data(handle.get_data() + data)\n\n if update_verts_to_transf:\n self._verts_to_transf[subobj_lvl] = {}\n\n def delete_selection(self, subobj_lvl):\n\n subobjs = self._subobjs\n verts = subobjs[\"vert\"]\n edges = subobjs[\"edge\"]\n polys = subobjs[\"poly\"]\n ordered_polys = self._ordered_polys\n sel_state = self._subobj_sel_state\n\n for subobj_type in (\"vert\", \"edge\", \"poly\"):\n for state in (\"selected\", \"unselected\"):\n sel_state[subobj_type][state] = []\n geom_node = self._geoms[subobj_type][state].node()\n geom_node.modify_geom(0).modify_primitive(\n 0).modify_vertices().modify_handle().set_data(\"\")\n # NOTE: do *NOT* call geom_node.modify_geom(0).modify_primitive(0).clearVertices(),\n # as this will explicitly remove all data from the primitive, and adding new\n # data thru ...modify_primitive(0).modify_vertices().modify_handle().set_data(data)\n # will not internally notify Panda3D that the primitive has now been\n # updated to contain new data! This will result in an assertion error\n # later on.\n\n selected_subobj_ids = self._selected_subobj_ids\n selected_vert_ids = selected_subobj_ids[\"vert\"]\n selected_edge_ids = selected_subobj_ids[\"edge\"]\n selected_poly_ids = selected_subobj_ids[\"poly\"]\n self._verts_to_transf[\"vert\"] = {}\n self._verts_to_transf[\"edge\"] = {}\n self._verts_to_transf[\"poly\"] = {}\n verts_to_delete = []\n edges_to_delete = []\n\n if subobj_lvl == \"vert\":\n\n polys_to_delete = set()\n\n for vert in (verts[v_id] for v_id in selected_vert_ids):\n polys_to_delete.add(polys[vert.get_polygon_id()])\n\n elif subobj_lvl == \"edge\":\n\n polys_to_delete = set()\n\n for edge in (edges[e_id] for e_id in selected_edge_ids):\n polys_to_delete.add(polys[edge.get_polygon_id()])\n\n elif subobj_lvl == \"poly\":\n\n polys_to_delete = [polys[poly_id] for poly_id in selected_poly_ids]\n\n poly_index = min(ordered_polys.index(poly) for poly in polys_to_delete)\n polys_to_offset = ordered_polys[poly_index:]\n\n row_ranges_to_delete = []\n merged_verts = self._merged_verts\n merged_edges = self._merged_edges\n\n subobjs_to_unreg = self._subobjs_to_unreg\n\n subobj_change = self._subobj_change\n subobj_change[\"vert\"][\"deleted\"] = vert_change = {}\n subobj_change[\"edge\"][\"deleted\"] = edge_change = {}\n subobj_change[\"poly\"][\"deleted\"] = poly_change = {}\n subobj_change[\"selection\"] = [\"vert\", \"edge\", \"poly\"]\n\n for poly in polys_to_delete:\n\n poly_verts = poly.get_vertices()\n vert = poly_verts[0]\n row = vert.get_row_index()\n row_ranges_to_delete.append((row, len(poly_verts)))\n\n verts_to_delete.extend(poly_verts)\n edges_to_delete.extend(poly.get_edges())\n\n ordered_polys.remove(poly)\n poly_id = poly.get_id()\n subobjs_to_unreg[\"poly\"][poly_id] = poly\n poly_change[poly] = poly.get_creation_time()\n\n if poly_id in selected_poly_ids:\n selected_poly_ids.remove(poly_id)\n\n merged_verts_to_smooth = set()\n\n for vert in verts_to_delete:\n\n vert_id = vert.get_id()\n subobjs_to_unreg[\"vert\"][vert_id] = vert\n vert_change[vert] = vert.get_creation_time()\n\n if vert_id in selected_vert_ids:\n selected_vert_ids.remove(vert_id)\n\n if vert_id in merged_verts:\n merged_vert = merged_verts[vert_id]\n merged_vert.remove(vert_id)\n del merged_verts[vert_id]\n merged_verts_to_smooth.add(merged_vert)\n\n for edge in edges_to_delete:\n\n edge_id = edge.get_id()\n subobjs_to_unreg[\"edge\"][edge_id] = edge\n edge_change[edge] = edge.get_creation_time()\n\n if edge_id in selected_edge_ids:\n selected_edge_ids.remove(edge_id)\n\n if edge_id in merged_edges:\n merged_edge = merged_edges[edge_id]\n merged_edge.remove(edge_id)\n del merged_edges[edge_id]\n\n self.unregister_subobjects(locally=True)\n\n row_index_offset = 0\n\n for poly in polys_to_offset:\n\n if poly in polys_to_delete:\n row_index_offset -= poly.get_vertex_count()\n continue\n\n poly_verts = poly.get_vertices()\n\n for vert in poly_verts:\n vert.offset_row_index(row_index_offset)\n\n row_ranges_to_delete.sort(reverse=True)\n\n vertex_data_vert = self._vertex_data[\"vert\"]\n vertex_data_edge = self._vertex_data[\"edge\"]\n vertex_data_poly = self._vertex_data[\"poly\"]\n\n vert_array = vertex_data_vert.modify_array(1)\n vert_handle = vert_array.modify_handle()\n vert_stride = vert_array.get_array_format().get_stride()\n edge_array = vertex_data_edge.modify_array(1)\n edge_handle = edge_array.modify_handle()\n edge_stride = edge_array.get_array_format().get_stride()\n\n poly_arrays = []\n poly_handles = []\n poly_strides = []\n\n for i in xrange(vertex_data_poly.get_num_arrays()):\n poly_array = vertex_data_poly.modify_array(i)\n poly_arrays.append(poly_array)\n poly_handles.append(poly_array.modify_handle())\n poly_strides.append(poly_array.get_array_format().get_stride())\n\n pos_array = poly_arrays[0]\n\n count = self._data_row_count\n\n for start, size in row_ranges_to_delete:\n\n vert_handle.set_subdata(\n start * vert_stride, size * vert_stride, \"\")\n edge_handle.set_subdata(\n (start + count) * edge_stride, size * edge_stride, \"\")\n edge_handle.set_subdata(\n start * edge_stride, size * edge_stride, \"\")\n\n for poly_handle, poly_stride in zip(poly_handles, poly_strides):\n poly_handle.set_subdata(\n start * poly_stride, size * poly_stride, \"\")\n\n count -= size\n\n self._data_row_count = count = len(verts)\n\n vertex_data_vert.set_array(0, GeomVertexArrayData(pos_array))\n tmp_array = GeomVertexArrayData(pos_array)\n handle = tmp_array.modify_handle()\n handle.set_data(handle.get_data() * 2)\n vertex_data_edge.set_array(0, tmp_array)\n\n sel_state[\"vert\"][\"unselected\"] = range(count)\n sel_state_edge = sel_state[\"edge\"][\"unselected\"]\n sel_state_poly = sel_state[\"poly\"][\"unselected\"]\n\n for poly in ordered_polys:\n sel_state_poly.extend(poly[:])\n\n geoms = self._geoms\n\n points_prim = GeomPoints(Geom.UH_static)\n points_prim.reserve_num_vertices(count)\n points_prim.add_next_vertices(count)\n geom_node = geoms[\"vert\"][\"unselected\"].node()\n geom_node.modify_geom(0).set_primitive(0, points_prim)\n\n lines_prim = GeomLines(Geom.UH_static)\n lines_prim.reserve_num_vertices(count * 2)\n\n tris_prim = GeomTriangles(Geom.UH_static)\n\n for poly in ordered_polys:\n\n for edge in poly.get_edges():\n row1, row2 = [verts[v_id].get_row_index() for v_id in edge]\n lines_prim.add_vertices(row1, row2 + count)\n sel_state_edge.append(row1)\n\n for vert_ids in poly:\n tris_prim.add_vertices(\n *[verts[v_id].get_row_index() for v_id in vert_ids])\n\n geom_node = geoms[\"top\"][\"wire\"].node()\n geom_node.modify_geom(0).set_primitive(0, lines_prim)\n geom_node = geoms[\"edge\"][\"unselected\"].node()\n geom_node.modify_geom(0).set_primitive(0, GeomLines(lines_prim))\n\n geom_node_top = geoms[\"top\"][\"shaded\"].node()\n geom_node_top.modify_geom(0).set_primitive(0, tris_prim)\n\n geom_node = geoms[\"poly\"][\"unselected\"].node()\n geom_node.modify_geom(0).set_primitive(0, GeomTriangles(tris_prim))\n\n vertex_data_top = geom_node_top.modify_geom(0).modify_vertex_data()\n\n for i, poly_array in enumerate(poly_arrays):\n vertex_data_top.set_array(i, poly_array)\n\n geom_node.modify_geom(0).set_primitive(0, GeomTriangles(tris_prim))\n\n selected_subobj_ids[\"vert\"] = []\n selected_subobj_ids[\"edge\"] = []\n selected_subobj_ids[\"poly\"] = []\n\n for vert_id in selected_vert_ids:\n self.set_selected(verts[vert_id], True, False)\n\n if selected_vert_ids:\n self._update_verts_to_transform(\"vert\")\n\n for edge_id in selected_edge_ids:\n self.set_selected(edges[edge_id], True, False)\n\n if selected_edge_ids:\n self._update_verts_to_transform(\"edge\")\n\n for poly_id in selected_poly_ids:\n self.set_selected(polys[poly_id], True, False)\n\n if selected_poly_ids:\n self._update_verts_to_transform(\"poly\")\n\n self._update_vertex_normals(merged_verts_to_smooth)\n self.get_toplevel_object().get_bbox().update(*self._origin.get_tight_bounds())\n\n def _restore_subobj_selection(self, time_id):\n\n obj_id = self.get_toplevel_object().get_id()\n prop_id = \"subobj_selection\"\n data = Mgr.do(\"load_last_from_history\", obj_id, prop_id, time_id)\n\n for subobj_type in (\"vert\", \"edge\", \"poly\"):\n\n subobj_ids = data[subobj_type]\n old_sel_subobj_ids = set(self._selected_subobj_ids[subobj_type])\n new_sel_subobj_ids = set(subobj_ids)\n sel_subobj_ids = new_sel_subobj_ids - old_sel_subobj_ids\n unsel_subobj_ids = old_sel_subobj_ids - new_sel_subobj_ids\n\n subobjs = self._subobjs[subobj_type]\n\n unsel_subobjs = [subobjs[i]\n for i in unsel_subobj_ids if i in subobjs]\n sel_subobjs = [subobjs[i] for i in sel_subobj_ids]\n\n if subobj_type in (\"vert\", \"edge\"):\n\n merged_subobjs = self._merged_verts if subobj_type == \"vert\" else self._merged_edges\n original_merged_subobjs = {}\n\n if unsel_subobjs:\n tmp_merged_subobj = Mgr.do(\n \"create_merged_%s\" % subobj_type, self)\n for subobj_id in unsel_subobj_ids:\n tmp_merged_subobj.append(subobj_id)\n unsel_id = unsel_subobj_ids.pop()\n original_merged_subobjs[\n unsel_id] = merged_subobjs[unsel_id]\n merged_subobjs[unsel_id] = tmp_merged_subobj\n unsel_subobjs = [subobjs[unsel_id]]\n\n if sel_subobjs:\n tmp_merged_subobj = Mgr.do(\n \"create_merged_%s\" % subobj_type, self)\n for subobj_id in sel_subobj_ids:\n tmp_merged_subobj.append(subobj_id)\n sel_id = sel_subobj_ids.pop()\n original_merged_subobjs[sel_id] = merged_subobjs[sel_id]\n merged_subobjs[sel_id] = tmp_merged_subobj\n sel_subobjs = [subobjs[sel_id]]\n\n for subobj in unsel_subobjs:\n self.set_selected(subobj, False, False)\n\n for subobj in sel_subobjs:\n self.set_selected(subobj, True, False)\n\n if subobj_type in (\"vert\", \"edge\"):\n if unsel_subobjs:\n merged_subobjs[\n unsel_id] = original_merged_subobjs[unsel_id]\n if sel_subobjs:\n merged_subobjs[sel_id] = original_merged_subobjs[sel_id]\n\n self._update_verts_to_transform(subobj_type)\n\n if self._tmp_geom:\n self.clear_triangulation_data()\n self.create_triangulation_data()\n","sub_path":"src/core/geom/data/select.py","file_name":"select.py","file_ext":"py","file_size_in_byte":20555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"192430473","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Mar 29 16:44:22 2020\n\n@author: Wenqing Hu (Missouri S&T)\n\"\"\"\n\n#SGD, SVRG and SARAH for quadratic loss and Gaussian input data\n#tensorflow version=1.14.0\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import animation\nfrom random import sample\n\nimport tensorflow as tf\ntf.enable_eager_execution()\n\n#set the parameters A, B for the loss function\nA=1\nB=1\n#set the training sample size and the batchsize\ntraining_sample_size=100\nbatchsize=10\n#set number of iteration steps\nnum_steps=1000\n#set the learning rate\nlr=0.01\n\n\"\"\"\nLoss Function L(w_1, w_2; (x_1, x_2, y)) = 0.5(Aw_1x_1+Bw_2x_2-y)^2 for A, B>0\nand its gradients with respect to the weight parameters w_1 and w_2\nthe gradients are calculated via the tf.GradientTape() mode\n\"\"\"\nclass LossFunction(object):\n def __init__(self,\n axA=A,\n axB=B):\n self.axA=axA\n self.axB=axB\n \n #value of the loss function \n def value(self, w, x, y):\n return 0.5*(self.axA*w[0]*x[0]+self.axB*w[1]*x[1]-y)**2\n \n #gradient of the loss function with respect to the weights (w_1, w_2)\n def grad(self, w, x, y):\n tfw_1=tf.Variable(initial_value=w[0], dtype='float')\n tfw_2=tf.Variable(initial_value=w[1], dtype='float')\n with tf.GradientTape() as tape:\n loss=0.5*tf.math.square(tf.subtract(tf.add(tf.multiply(tf.multiply(self.axA, tfw_1), x[0]), tf.multiply(tf.multiply(self.axB, tfw_2), x[1])), y))\n grad_w_1, grad_w_2=tape.gradient(loss, [tfw_1, tfw_2])\n return np.array([grad_w_1.numpy(), grad_w_2.numpy()])\n \n #average of a sequence of loss functions for a given list of training samples (x_i, y_i)\n def average(self, w, training_sample_x, training_sample_y, function):\n average=function(w, training_sample_x[0], training_sample_y[0])\n size=len(training_sample_y)\n for i in range(size-1):\n average+=function(w, training_sample_x[i+1], training_sample_y[i+1])\n average=average/size\n return average\n \n \n\"\"\"\nThe stochastic optimizer update for: SGD, SVRG, SARAH\n\"\"\"\nclass stochastic_optimizer(object):\n def __init__(self, \n function=LossFunction()):\n self.function=function\n \n def SGD(self, w, training_sample_x, training_sample_y, lr, batchsize):\n #detect the size of the training set\n trainingsize=len(training_sample_y)\n #randomly choose the index set that forms the mini-batch\n batch_index=sample(list(range(0,trainingsize)), batchsize)\n #from the mini-batch index set select the corresponding training samples (x, y)\n batch_x=[]\n batch_y=[]\n for i in range(batchsize):\n batch_x.append(training_sample_x[batch_index[i]])\n batch_y.append(training_sample_y[batch_index[i]])\n batch_x=np.array(batch_x)\n batch_y=np.array(batch_y)\n #calculate the stochastic gradient updates \n grad=self.function.average(w, batch_x, batch_y, self.function.grad)\n update=-lr*grad\n return update\n \n def update(self, w, training_sample_x, training_sample_y, lr, batchsize, optname):\n update=np.array([0,0])\n if optname==\"SGD\":\n update=self.SGD(w, training_sample_x, training_sample_y, lr, batchsize)\n else:\n update=np.array([0,0])\n return update\n \n \n\"\"\"\nrunning the code, plot \n(1) the trajectory animation; \n(2) the evolution of the training error; \n(3) the evolution of generalization error.\n\"\"\"\nif __name__ == \"__main__\":\n #generate the training samples (x_i, y_i)\n training_sample_x=np.random.normal(0,1,size=(training_sample_size, 2))\n training_sample_y=np.random.normal(0,1,size=training_sample_size)\n #initialize the initial weights\n w_init=[1, 1]\n #pick a particular pair of test sample (x, y) from the given distribution\n test_sample_x=np.random.normal(0,1,size=2)\n test_sample_y=np.random.normal(0,1,size=1)\n #optimization step obtain a sequence of losses and weights trajectory\n for optname in {\"SGD\"}:\n w_current=w_init\n w_current_minus1=w_current\n trajectory_w_1=[]\n trajectory_w_2=[]\n loss_list=[]\n generalization_error_list=[]\n function=LossFunction()\n for i in range(num_steps):\n #record the current model weights w \n trajectory_w_1.append(w_current[0]) \n trajectory_w_2.append(w_current[1])\n #calculate the generalization error for the current model weights w\n generalization_error=function.value(w_current, test_sample_x, test_sample_y)\n generalization_error_list.append(generalization_error)\n #calculate the training error (loss) for the current model weights w\n loss_list.append(function.average(w_current, training_sample_x, training_sample_y, function.value))\n #update w via stochastic optimization\n stochastic_optimization=stochastic_optimizer(function=function)\n w=w_current+stochastic_optimization.update(w_current, training_sample_x, training_sample_y, lr, batchsize, optname)\n w_current_minus1=w_current\n w_current=w\n\n #plot the trajctory as an animation\n fig = plt.figure()\n ax=Axes3D(fig)\n line=ax.plot([],[],'b:')\n point=ax.plot([],[],'bo',markersize=10)\n images=[]\n def init():\n line=ax.plot([],[],'b:',markersize=8)\n point=ax.plot([],[],'bo',markersize=10)\n return line,point\n def anmi(i):\n ax.clear()\n line =ax.plot(trajectory_w_1[0:i],trajectory_w_2[0:i],loss_list[0:i],'b:', markersize=8)\n point = ax.plot(trajectory_w_1[i-1:i],trajectory_w_2[i-1:i],loss_list[i-1:i],'bo', markersize=10)\n return line,point\n anim = animation.FuncAnimation(fig, anmi, init_func=init,\n frames=num_steps, interval=10, blit=False,repeat=False)\n anim.save(optname+'_A='+str(A)+'_B='+str(B)+'_trainingsize='+str(training_sample_size)+'_batchsize='+str(batchsize)+'_learningrate='+str(lr)+'_steps='+str(num_steps)+'.gif', writer='imagemagick')\n\n\n\n #plot the training error (loss) and the generalization error\n fig = plt.figure()\n mpl.rcParams['legend.fontsize'] = 10\n plt.plot(loss_list)\n plt.xlabel('iteration')\n plt.ylabel('loss')\n plt.title(optname)\n plt.savefig('Loss_'+optname+'_A='+str(A)+'_B='+str(B)+'_trainingsize='+str(training_sample_size)+'_batchsize='+str(batchsize)+'_learningrate='+str(lr)+'_steps='+str(num_steps)+'.jpg')\n plt.show()\n\n plt.plot(generalization_error_list)\n plt.xlabel('iteration')\n plt.ylabel('generalization error')\n plt.title(optname)\n plt.savefig('Generalization_'+optname+'_A='+str(A)+'_B='+str(B)+'_trainingsize='+str(training_sample_size)+'_batchsize='+str(batchsize)+'_learningrate='+str(lr)+'_steps='+str(num_steps)+'.jpg')\n plt.show()\n","sub_path":"5-SGD-VarianceReduction/stochastic_optimizers.py","file_name":"stochastic_optimizers.py","file_ext":"py","file_size_in_byte":7174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"40966347","text":"\ndef get_migratory_birds(arr):\n max_type_bird = 0\n birds_count = [0]*(len(arr)+1)\n for i in arr:\n birds_count[i] += 1\n\n print(birds_count)\n for i in range(len(birds_count)):\n if birds_count[i] > birds_count[max_type_bird]:\n max_type_bird = i\n return max_type_bird\n\n\nif __name__ == \"__main__\":\n \"\"\"\n https://www.hackerrank.com/challenges/migratory-birds/problem\n \"\"\"\n n = int(input().strip())\n arr = list(map(int, input().strip().split()))\n print(get_migratory_birds(arr))","sub_path":"Algorithm/problem_solving/Migratory_Bird.py","file_name":"Migratory_Bird.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"441901952","text":"## Context Manager Examples - Advanced Usecase. \n## yield without argument is semantically equivalent to yield None\n\nfrom contextlib import contextmanager\nimport sys\n\n@contextmanager\ndef redirected(**kwds):\n stream_names = [\"stdin\", \"stdout\", \"stderr\"]\n old_streams = {}\n try:\n for sname in stream_names:\n stream = kwds.get(sname, None)\n if stream is not None and stream != getattr(sys, sname):\n old_streams[sname] = getattr(sys, sname)\n setattr(sys, sname, stream)\n yield\n finally:\n for sname, stream in old_streams.items():\n setattr(sys, sname, stream)\n\nwith redirected(stdout=open(\"/tmp/uw-py220-log-context-mgr.txt\", \"w\")):\n # these print statements will go to /tmp/log.txt\n print (\"Test entry 1\")\n print (\"Test entry 2\")\n# back to the normal stdout\nprint (\"Back to normal stdout again\")","sub_path":"Student/tammyd_Py220/Py220_lesson03/lesson03_context_manager.py","file_name":"lesson03_context_manager.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"109507332","text":"# Bitcasa Python Class v2 (Still Unofficial) #\n# 2013 Michael Thomas (Biscuit Labs) #\n\n# System Imports\nimport os, sys, json, time\n# Requests Imports\nimport requests\n# Multipart Form Encoding\nimport codecs, mimetypes, sys, uuid\n# Multithreading (planned for Uploads, then slowly roll out to other ops - need to discuss)\n# http://code.google.com/p/pyloadtools/wiki/CodeTutorialMultiThreading\n# from threading import Thread\n# Watchdog (for monitoring mirrored folders)\n# from watchdog.observers import Observer\n# from watchdog.events import LoggingEventHandler\n\n# Bitcasa Uploader Class\n# Works great for now, but Upload API changes soon.\nclass BitcasaUploader(object):\n\tdef __init__(self, file_obj, filename, chunksize=1 << 13):\n\t\tself.file_obj = file_obj\n\t\tself.filename = filename\n\t\tself.chunksize = chunksize\n\t\tself.totalsize = os.path.getsize(filename)\n\t\tself.readsofar = 0\n\t\t# Form Multipart\n\t\tself.encoder = codecs.getencoder('utf-8')\n\t\tself.boundary = uuid.uuid4().hex\n\t\tself.content_type = 'multipart/form-data; boundary={}'.format(self.boundary)\n\t\t# Print Debug\n\t\tself.debug = True\n\n\tdef __iter__(self):\n\t\t# Start Multipart\n\t\tform_header = str('--'+self.boundary+'\\r\\nContent-Disposition: form-data; name=\"file\"; filename=\"'+self.filename+'\"\\r\\nContent-Type: '+'application/octet-stream'+'\\r\\n\\r\\n').encode('utf-8')\n\t\tyield form_header\n\t\tself.time = time.time()\n\t\twhile True:\n\t\t\tdata = self.file_obj.read(self.chunksize)\n\t\t\tif not data:\n\t\t\t\ttotal_time = time.time() - self.time\n\t\t\t\tif(self.debug):\n\t\t\t\t\tsys.stderr.write(\"\\r[Bitcasa] Finished uploading file: \" + self.filename + \" (took \"+ str(round(total_time,2)) +\" seconds)\\n\")\n\t\t\t\t# Finish Multipart\n\t\t\t\tform_footer = str('\\r\\n--'+self.boundary+'--\\r\\n').encode('utf-8')\n\t\t\t\tyield form_footer\n\t\t\t\tbreak\n\t\t\tself.readsofar += len(data)\n\t\t\tpercent = self.readsofar * 1e2 / self.totalsize\n\t\t\tif(self.debug):\n\t\t\t\tsys.stderr.write(\"\\r[Bitcasa] Uploading file: \" + self.filename + \" {percent:3.0f}%\".format(percent=percent))\n\t\t\tyield data\n\t\n\tdef __len__(self):\n\t\treturn self.totalsize\n\nclass BitcasaUploaderFileAdapter(object):\n\tdef __init__(self, iterable):\n\t\tself.iterator = iter(iterable)\n\t\tself.length = len(iterable)\n\n\tdef read(self, size=-1): # TBD: add buffer for `len(data) > size` case\n\t\treturn next(self.iterator, b'')\n\n\tdef __len__(self):\n\t\treturn self.length\n\t\t\n# Bitcasa Main Class\nclass Bitcasa:\n\t# URL \"Constants\"\n\tbitcasa_api_url = \"https://developer.api.bitcasa.com/v1\"\t\n\tbitcasa_file_api_url = \"https://files.api.bitcasa.com/v1\"\n\toauth_redirect_url = \"\"\n\t# API Auth/Access\n\tapi_oauth_token = \"\"\n\tapi_access_token = \"\"\n\n\t# Setup our class\n\tdef __init__ (self, app_client_id, app_client_secret, debug=False, auth_token=None, access_token=None):\n\t\t# Make sure we atleast have a Client ID and/or Client Secret\n\t\tself.app_client_id = app_client_id\n\t\tself.app_client_secret = app_client_secret\n\t\tif(self.app_client_id == None):\n\t\t\traise Exception(\"You must have a Bitcasa App Client ID to use this module.\")\n\t\tif(self.app_client_secret == None):\n\t\t\traise Exception(\"You must have a Bitcasa App Secret to use this module.\")\n\t\t# Set Auth Token and/or Access Token\n\t\tself.api_oauth_token = auth_token\n\t\tself.api_access_token = access_token\n\t\t# Print out lots of useful info\n\t\tself.debug = debug\n\n\t# Get OAuth URL to get Token\n\tdef oauth_url (self):\n\t\treturn self.bitcasa_api_url + \"/oauth2/authenticate?client_id=\" + self.app_client_id + \"&redirect=\" + self.oauth_redirect_url\n\n\t# Authenticate & Get Access Token\n\tdef authenticate (self, oauth=None):\n\t\t# Set OAuth Token\t\t\n\t\tif(oauth != None):\n\t\t\tself.oauth_token = oauth\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa] Authenticate OAuth Token: \" + self.oauth_token)\n\n\t\t# Make Request for Access Token\n\t\tr = requests.get(self.bitcasa_api_url + \"/oauth2/access_token?secret=\" + self.app_client_secret + \"&code=\" + self.oauth_token)\n\t\tif(self.debug):\n\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/oauth2/access_token?secret=\" + self.app_client_secret + \"&code=\" + self.oauth_token)\t\t\n\t\tif(r.status_code == 200):\n\t\t\t# Success, set in instance & return\n\t\t\tself.api_access_token = r.json()['result']['access_token']\n\t\t\treturn self.api_access_token\n\t\telse:\n\t\t\t# Error\n\t\t\t# @todo - Better HTTP/Requests Error Handling\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\n\t# Get User Profile\n\tdef user_profile(self):\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa] Fetch User Information\")\n\t\t# Make Request for User Profile\n\t\tr = requests.get(self.bitcasa_api_url + \"/user/profile?access_token=\" + self.api_access_token)\n\t\tif(self.debug):\n\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/user/profile?access_token=\" + self.api_access_token)\t\t\n\t\tif(r.status_code == 200):\n\t\t\t# Success, return profile\n\t\t\treturn r.json()['result']\n\t\telse:\n\t\t\t# Error\n\t\t\t# @todo - Better HTTP/Requests Error Handling\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\n\t### Folder Methods ###\n\n\t## List Directory Contents\n\tdef dir (self, path = \"\"):\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa] Listing Directory Contents: \" + path)\n\t\tr = requests.get(self.bitcasa_api_url + \"/folders/\" + path + \"?access_token=\" + self.api_access_token)\n\t\tif(self.debug):\n\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/folders/\" + path + \"?access_token=\" + self.api_access_token)\n\t\tif(r.status_code == 200):\n\t\t\t# Success\n\t\t\tcontents = r.json()['result']['items']\n\t\t\treturn contents\n\t\telse:\n\t\t\t# Error\n\t\t\t# @todo - Better HTTP/Requests Error Handling\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\n\t## Add Folder\n\tdef mkdir (self, path, folder_name):\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa] Creating Directory Named: \" + folder_name + \" in Path: \" + path)\n\t\tpayload = {'folder_name' : folder_name}\n\t\tif(self.debug):\n\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/folders/\" + path + \"?access_token=\" + self.api_access_token)\n\t\tr = requests.post(self.bitcasa_api_url + \"/folders/\" + path + \"?access_token=\" + self.api_access_token, data=payload)\n\t\tif(r.status_code == 200):\n\t\t\t# Make Sure Errors aren't here\n\t\t\tif(r.json()['error'] == None):\n\t\t\t\treturn r.json()['result']['items']\n\t\t\telse:\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\telse:\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\n\t## Remove Folder\n\tdef rmdir(self, path):\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa] Removing Directory at Path: \" + path)\n\t\tpayload = {'path' : path}\n\t\tif(self.debug):\n\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/folders/?access_token=\" + path + \"?access_token=\" + self.api_access_token)\n\t\tr = requests.delete(self.bitcasa_api_url + \"/folders/?access_token=\" + self.api_access_token, data=payload)\n\t\tif(r.status_code == 200):\n\t\t\tif(r.json()['error'] == None):\n\t\t\t\t# Success\n\t\t\t\t# @todo - If it doesn't delete anything (if not found) it will still return a success.\n\t\t\t\treturn r.json()['result']\n\t\t\telse:\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\telse:\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\n\t## Rename Folder\n\tdef renamedir(self, path, new_name):\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa] Renaming Directory With Path: \" + path + \" To: \" + new_name)\n\t\tpayload = {'from' : path, 'filename': new_name}\n\t\tif(self.debug):\n\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/folders?operation=rename&access_token=\" + self.api_access_token)\n\t\tr = requests.post(self.bitcasa_api_url + \"/folders?operation=rename&access_token=\" + self.api_access_token, data=payload)\n\t\tif(r.status_code == 200):\n\t\t\tif(r.json()['error'] == None):\n\t\t\t\t# Success\n\t\t\t\t# @todo - If it doesn't delete anything (if not found) it will still return a success.\n\t\t\t\treturn r.json()['result']\n\t\t\telse:\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\telse:\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\n\t## Move Folder\n\tdef mvdir(self, path, new_path):\n\t\tpayload = {'from' : path, 'to': new_path}\n\t\tr = requests.post(self.api_url + \"/folders?operation=move&access_token=\" + self.access_token, data=payload)\n\t\tif(r.status_code == 200):\n\t\t\tif(r.json()['error'] == None):\n\t\t\t\t# Success\n\t\t\t\t# @todo - If it doesn't delete anything (if not found) it will still return a success.\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tif(r.json()['error']['code'] == 2022):\n\t\t\t\t\traise Exception(2022, r.json()['error']['message'])\n\t\t\t\telif(r.json()['error']['code'] == 2023):\n\t\t\t\t\traise Exception(2023, r.json()['error']['message'])\n\t\t\t\telse:\n\t\t\t\t\t# Other Error\n\t\t\t\t\traise Exception(\"A strange error has occurred. Derp.\")\n\t\telse:\n\t\t\tif(r.json()['error'] != None):\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\t\telse:\n\t\t\t\t# Other Error\n\t\t\t\traise Exception(\"A strange error has occurred. Derp.\")\n\n\t## Copy Folder\n\tdef cpdir(self, path, new_path):\n\t\tpayload = {'from' : path, 'to': new_path}\n\t\tr = requests.post(self.api_url + \"/folders?operation=copy&access_token=\" + self.access_token, data=payload)\n\t\tif(r.status_code == 200):\n\t\t\tif(r.json()['error'] == None):\n\t\t\t\t# Success\n\t\t\t\t# @todo - If it doesn't delete anything (if not found) it will still return a success.\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tif(r.json()['error']['code'] == 2022):\n\t\t\t\t\traise Exception(2022, r.json()['error']['message'])\n\t\t\t\telif(r.json()['error']['code'] == 2023):\n\t\t\t\t\traise Exception(2023, r.json()['error']['message'])\n\t\t\t\telse:\n\t\t\t\t\t# Other Error\n\t\t\t\t\traise Exception(\"A strange error has occurred. Derp.\")\n\t\telse:\n\t\t\tif(r.json()['error'] != None):\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\t\telse:\n\t\t\t\t# Other Error\n\t\t\t\traise Exception(\"A strange error has occurred. Derp.\")\n\t\n\t### File Methods ###\n\n\t## Download File\n\tdef download(self, file_path, file_id, file_name):\n\t\tif(file_path != None):\n\t\t\tif(self.debug):\n\t\t\t\tprint(\"[Bitcasa] Downloading file from path: \" + file_path + \" To Filename: \" + file_name)\n\t\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/files/\" + file_name + \"?access_token=\" + self.api_access_token)\n\t\t\tr = requests.get(self.bitcasa_api_url + \"/files/\" + file_name + \"?access_token=\" + self.api_access_token + \"&path=\" + file_path)\n\t\telif(file_id != None):\n\t\t\tif(self.debug):\n\t\t\t\tprint(\"[Bitcasa] Downloading file with ID: \" + file_id + \" To Filename: \" + file_name)\n\t\t\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/files/\" + file_id + \"/\" + file_name + \"?access_token=\" + self.api_access_token)\n\t\t\tr = requests.get(self.bitcasa_api_url + \"/files/\" + file_id + \"/\" + file_name + \"?access_token=\" + self.api_access_token)\n\t\telse:\n\t\t\traise Exception(\"You must specify a file path or a file ID.\")\t\t\n\n\t\tif(r.status_code == 200):\n\t\t\t# Make Sure Errors aren't here\n\t\t\ttry:\n\t\t\t\tr.json()\n\t\t\texcept:\t\t\t\n\t\t\t\treturn r.content\n\t\t\telse:\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\telse:\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\n\t## Upload File\n\t# Please Bitcasa, make Uploads better (eh I suppose it works, but it'd be nice to have pause/resume support via chunked requests)\n\t# Below is a memory efficient, multipart encoding beast.\n\n\tdef upload(self, file_obj, filename, file_path):\n\t\tif(self.debug):\n\t\t\tprint(\"[Bitcasa]: Uploading file: \" + filename + \" to path:\" + file_path)\n\t\t# Request Payload (Multipart Encoding)\n\t\tpayload = BitcasaUploader(file_obj, filename, 8192)\n\t\t# Request Headers\n\t\theaders = {'Content-Type': payload.content_type, 'Content-Length': str(payload.totalsize)}\n\t\tprint(headers)\n\t\t# Setup & Do Request\n\t\tprint(\"[Network] Request: \" + self.bitcasa_api_url + \"/files\" + file_path + \"?access_token=\" + self.api_access_token)\t\t\n\t\tr = requests.post(self.bitcasa_file_api_url + \"/files\" + file_path + \"?access_token=\" + self.api_access_token, data=payload, headers=headers);\n\t\tif(r.status_code == 200):\n\t\t\t# Make Sure Errors aren't here\n\t\t\tif(r.json()['error'] == None):\n\t\t\t\treturn r.json()['result']['items']\n\t\t\telse:\n\t\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\t\telse:\n\t\t\traise Exception(r.json()['error']['code'], r.json()['error']['message'])\n\n\t\t\n","sub_path":"bitcasa.py","file_name":"bitcasa.py","file_ext":"py","file_size_in_byte":12140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"480762297","text":"import os\n\nREDIS_URL = os.getenv(\"REDIS_URL\", \"redis://localhost:6379/0\")\n\nDOCKER_REGISTRY_API = os.getenv(\"DOCKER_REGISTRY_API\", \"https://registry-1.docker.io/v2\")\n# default dockerhub registry url\n\nTARGET_REGISTRY_API = os.getenv(\"TARGET_REGISTRY_API\", \"http://localhost:5000/v2\")\n\n# to do support authorize registry\n\nTARGET_REGISTRY_ENDPOINT = os.getenv(\"TARGET_REGISTRY\", \"localhost:5000\")\n\nIMAGES_DIR = os.getenv(\"IMAGES_DIR\", os.path.join(os.path.dirname(os.path.dirname(__file__)), \"images\"))\n\nLIBRARY_IMAGE_LIST_PATH = os.path.join(IMAGES_DIR, 'image.list')\nTHIRD_PARTY_LIST_PATH = os.path.join(IMAGES_DIR, 'third_party.list')\nIMAGE_NAME_CONVERT_LIST_PATH = os.path.join(IMAGES_DIR, 'image_name_convert.list')\n\nPROD = str(os.getenv(\"PROD\")) in ('yes', '1', 'true', 'True')\n\nTARGET_THIRD_PARTY_NAMESPACE = os.getenv(\"TARGET_THIRD_PARTY_NAMESPACE\", \"daocloud\")\n","sub_path":"sync_docker/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"180788609","text":"# Time Complexity :average O(logn)\n# Space Complexity :O(1) \n# Did this code successfully run on Leetcode : yes\n# Any problem you faced while coding this : no\nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n #low and high of binary search\n low = 0\n high = len(citations) -1\n #total number of citation\n n = len(citations)\n while low <=high:\n #get mid\n mid = low + (high-low)/2\n #check if number of citation is equal than the sum of the rest of the papers if yes\n if citations[mid] == n-mid:\n #return total number of citation minus position of mid\n return n-mid\n # if number of citations more than total number of citation minus position of mid search left subarray\n elif citations[mid] > n-mid:\n high = mid -1\n # else search right subarray\n else:\n low = mid+1\n #return n-low\n return n-low\n ","sub_path":"Problem-1.py","file_name":"Problem-1.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"550247567","text":"import os\nimport spiceypy as sp\n\nfrom .io import _downloadFile\nfrom .io import _readFileLog\n\n__all__ = [\n \"getSPICEKernels\",\n \"setupSPICE\"\n]\n\nKERNELS = {\n # Internal Name : [File, URL]\n \"LSK - Latest\" : [\n \"latest_leapseconds.tls\", \n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/lsk/latest_leapseconds.tls\"\n ],\n \"Planetary Constants\" : [\n \"pck00010.tpc\",\n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/pck00010.tpc\"\n ],\n \"Earth PCK - Latest High Accuracy\" : [\n \"earth_latest_high_prec.bpc\",\n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/earth_latest_high_prec.bpc\"\n ],\n \"Earth PCK - Historical High Accuracy\" : [\n \"earth_720101_070426.bpc\",\n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/earth_720101_070426.bpc\"\n ],\n \"Earth PCK - Long Term Predict Low Accuracy\" : [\n \"earth_200101_990628_predict.bpc\",\n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/earth_200101_990628_predict.bpc\"\n ],\n \"Earth FK\" : [\n \"earth_assoc_itrf93.tf\",\n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/fk/planets/earth_assoc_itrf93.tf\"\n ],\n \"Planetary SPK\" : [\n \"de430.bsp\", \n \"https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/de430.bsp\" \n ]\n }\n\ndef getSPICEKernels(kernels=[\"LSK - Latest\", \n \"Planetary Constants\", \n \"Earth PCK - Long Term Predict Low Accuracy\",\n \"Earth PCK - Historical High Accuracy\", \n \"Earth PCK - Latest High Accuracy\", \n \"Planetary SPK\"]):\n \"\"\"\n Download SPICE kernels. If any already exist, check if they have been updated. If so, replace the \n outdated file with the latest version. \n \n SPICE kernels used by THOR: \n \"LSK - Latest\": latest_leapseconds.tls downloaded from https://naif.jpl.nasa.gov/pub/naif/generic_kernels/lsk,\n \"Earth PCK - Latest High Accuracy\": earth_latest_high_prec.bpc downloaded from https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck,\n \"Earth PCK - Historical High Accuracy\": earth_720101_070426.bpc downloaded from https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck,\n \"Earth PCK - Long Term Predict Low Accuracy\": earth_070425_370426_predict.bpc downloaded from https://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/\n \"Planetary SPK\": de430.bsp downloaded from https://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/planets/\n \n Only the leapsecond and Earth planetary constants kernels are checked for updates since these files are rather small (< 10 MB). The \n planetary ephemerides file is over 1.5 GB and is not checked for an update (these files are not updated regularly and are often released as \n different version with different physical assumptions)\n \n Parameters\n ----------\n kernels : list, optional\n Names of the kernels to download. By default, all kernels required by THOR are downloaded. \n Possible options are:\n \"Planetary Constants\" \n \"Earth PCK - Latest High Accuracy\"\n \"Earth PCK - Historical High Accuracy\"\n \"Earth PCK - Long Term Predict Low Accuracy\"\n \"Planetary SPK\"\n \n Returns\n -------\n None\n \"\"\"\n for kernel in kernels:\n print(\"Checking for {} kernel...\".format(kernel))\n _downloadFile(os.path.join(os.path.dirname(__file__), \"..\", \"data\"), KERNELS[kernel][1])\n print(\"\")\n return\n\ndef setupSPICE(kernels=[\"LSK - Latest\", \n \"Planetary Constants\", \n \"Earth PCK - Long Term Predict Low Accuracy\",\n \"Earth PCK - Historical High Accuracy\", \n \"Earth PCK - Latest High Accuracy\", \n \"Planetary SPK\"],\n verbose=True):\n \"\"\"\n Loads the leapsecond, the Earth planetary constants and the planetary ephemerides kernels into SPICE. \n \n Parameters\n ----------\n kernels : list, optional\n Names of the kernels to load. By default, all kernels required by THOR are loaded. \n Possible options are:\n \"Planetary Constants\" \n \"Earth PCK - Latest High Accuracy\"\n \"Earth PCK - Historical High Accuracy\"\n \"Earth PCK - Long Term Predict Low Accuracy\"\n \"Planetary SPK\"\n verbose : bool, optional\n Print progress statements.\n \n Returns\n -------\n None\n \"\"\"\n if \"THOR_SPICE\" in os.environ.keys() and os.environ[\"THOR_SPICE\"] == \"True\":\n if verbose:\n print(\"SPICE is already enabled.\")\n else:\n if verbose:\n print(\"Enabling SPICE...\")\n log = _readFileLog(os.path.join(os.path.dirname(__file__), \"..\", \"data/log.yaml\"))\n for kernel in kernels:\n file_name = KERNELS[kernel][0]\n if file_name not in log.keys():\n err = (\"{} not found. Please run thor.utils.getSPICEKernels to download SPICE kernels.\")\n raise FileNotFoundError(err.format(file_name))\n sp.furnsh(log[file_name][\"location\"])\n os.environ[\"THOR_SPICE\"] = \"True\"\n if verbose:\n print(\"Done.\")\n return","sub_path":"thor/utils/spice.py","file_name":"spice.py","file_ext":"py","file_size_in_byte":5402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"530694840","text":"import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.metrics import accuracy_score\nimport pickle\n\nbase = pd.read_csv('insurance.csv')\nbase.Age.unique()\nbase.RiskAversion.unique()\nbase.MakeModel.unique()\nbase.Accident.unique()\n\nX = base.iloc[:, [2, 4, 9]].values\ny = base.iloc[:, 8].values\n\n#o labelenconder tem a função de tranformar os valores string em inteiros, pois o naive bayes usa valores inteiros\nlabelencoder = LabelEncoder()\nX[:,0] = labelencoder.fit_transform(X[:, 0])\nX[:,1] = labelencoder.fit_transform(X[:, 1])\nX[:,2] = labelencoder.fit_transform(X[:, 2])\n\nmodelo = GaussianNB()\nmodelo.fit(X, y)\n\nprevisoes = modelo.predict(X)\naccuracy_score(y, previsoes)\n\npickle.dump(modelo, open('naivebayes_finalizado.sav', 'wb'))\n","sub_path":"Algoritimos/insurance_naivebayes_salvar.py","file_name":"insurance_naivebayes_salvar.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"351488878","text":"\nfrom stompest.sync import Stomp\nfrom stompest.config import StompConfig\nimport ConfigParser\nimport logging\n\nclass QueueConnector():\n '''\n '''\n\n def __init__(self):\n \"\"\"Receive configuration data for the instance\"\"\"\n logging.getLogger(\"django.request\").info( \"Get queue \") \n\n INSTANCE = self\n\n logging.getLogger(\"django.request\").info(\n 'Setting module configuration'\n )\n app_config_path = './app.config'\n try:\n config = ConfigParser.ConfigParser()\n config.read(app_config_path)\n \n except:\n logging.getLogger('django.request').error(\n \"Error processing %s\" % app_config_path\n )\n raise\n # Notive this /queue/ in from of the name\n # it will be ripped out by activemq on delivery to non-stomp\n INSTANCE.queuename = \"/queue/\" + config.get('receiver', 'queuebasename') + \"_from_mobile\"\n INSTANCE.brokerconnection = config.get('receiver', 'brokerconnection')\n INSTANCE.STOMPCONFIG = StompConfig(INSTANCE.brokerconnection)\n\n\n\nINSTANCE = QueueConnector()\n","sub_path":"djangothuraya/thuraya/queueConnector.py","file_name":"queueConnector.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"592533047","text":"from vanilla.utility.debugger import Debugger\nfrom tabulate import tabulate\nimport random\nimport functools\n\nclass Simulator:\n @classmethod\n def run(cls, nodes, packet_loss_chance, round_count, debug=True):\n Debugger.set_enabled(debug)\n Debugger.log(\"Initial states:\")\n cls.__print_node_states(nodes)\n current_round = 1\n while current_round <= round_count:\n Debugger.log(\"Round: \" + str(current_round))\n messages = cls.__collect_messages(nodes)\n cls.__publish_messages(messages, nodes, packet_loss_chance)\n cls.__act_all(nodes)\n cls.__print_node_states(nodes)\n current_round += 1\n Debugger.log(\"\")\n Debugger.log(\"Simulation Complete\")\n\n @classmethod\n def __collect_messages(cls, nodes):\n messages = []\n for node in nodes:\n message = node.send()\n if message is not None:\n messages.append(message)\n return messages\n\n @classmethod\n def __publish_messages(cls, messages, nodes, packet_loss_chance):\n for message in messages:\n content = message.content\n for node in nodes:\n if content.sender_id == node.get_id():\n continue\n if random.random() <= packet_loss_chance:\n # print(\"Message from {0} to {1} was lost due to packet loss.\".\n # format(message.sender_id, vehicle.id))\n continue\n if message.type == \"VIEW_BROADCAST\" or \\\n content.receiver_id == node.get_id():\n node.receive(message)\n\n @classmethod\n def __act_all(cls, nodes):\n for vehicle in nodes:\n vehicle.act()\n\n @classmethod\n def __print_node_states(cls, nodes):\n states = []\n for vehicle in nodes:\n states.append(vehicle.as_displayable())\n states = sorted(states, key=functools.cmp_to_key(cls.__cmp))\n Debugger.log(tabulate(states, headers=\"keys\", tablefmt=\"grid\"))\n\n @classmethod\n def __cmp(cls, a, b):\n if a[\"context\"] > b[\"context\"]:\n return 1\n return -1\n\n","sub_path":"vanilla/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"206576176","text":"import numpy as np\nfrom setuptools import setup\nfrom setuptools import find_packages\n\nVERSION = '0.4.0'\n\nAUTHORS = 'Matthew Bourque, Sara Ogaz, Joe Filippazzo, Bryan Hilbert, Misty Cracraft, Graham Kanarek'\nAUTHORS += 'Johannes Sahlmann, Lauren Chambers, Catherine Martlin'\n\nREQUIRES = ['astropy', 'astroquery', 'bokeh==0.12.5', 'django==2.0.5', 'matplotlib', 'numpy', 'python-dateutil', 'sphinx', 'sphinx-automodapi', 'sqlalchemy']\n\nsetup(\n name='jwql',\n version=VERSION,\n description='The JWST Quicklook Project',\n url='https://github.com/spacetelescope/jwql.git',\n author=AUTHORS,\n author_email='jwql@stsci.edu',\n license='BSD',\n keywords=['astronomy', 'python'],\n classifiers=['Programming Language :: Python'],\n packages=find_packages(),\n install_requires=REQUIRES,\n include_package_data=True,\n include_dirs=[np.get_include()],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"93"} +{"seq_id":"482423830","text":"def has_negatives(a):\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n numbers = dict()\n negatives = []\n for number in a:\n if number not in numbers and number < 0:\n numbers[abs(number)] = number\n for num in a:\n if num in numbers:\n negatives.append(num)\n\n return negatives\n \n\n\nif __name__ == \"__main__\":\n print(has_negatives([-1, -2, 1, 2, 3, 4, -4]))\n","sub_path":"hashtables/ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"327975492","text":"import argparse\nimport os\n\nimport torch\nfrom boilr import VIExperimentManager\nfrom boilr.nn_init import data_dependent_init\nfrom boilr.utils import linear_anneal, img_grid_pad_value\nfrom torch import optim\nfrom torchvision.utils import save_image\n\nfrom models.lvae import LadderVAE\nfrom .data import DatasetLoader\n\n\nclass LVAEExperiment(VIExperimentManager):\n \"\"\"\n Experiment manager.\n\n Data attributes:\n - 'args': argparse.Namespace containing all config parameters. When\n initializing this object, if 'args' is not given, all config\n parameters are set based on experiment defaults and user input, using\n argparse.\n - 'run_description': string description of the run that includes a timestamp\n and can be used e.g. as folder name for logging.\n - 'model': the model.\n - 'device': torch.device that is being used\n - 'dataloaders': DataLoaders, with attributes 'train' and 'test'\n - 'optimizer': the optimizer\n \"\"\"\n\n\n def make_datamanager(self):\n cuda = self.device.type == 'cuda'\n return DatasetLoader(self.args, cuda)\n\n def make_model(self):\n args = self.args\n model = LadderVAE(\n self.dataloaders.color_ch,\n z_dims=args.z_dims,\n blocks_per_layer=args.blocks_per_layer,\n downsample=args.downsample,\n merge_type=args.merge_layers,\n batchnorm=args.batch_norm,\n nonlin=args.nonlin,\n stochastic_skip=args.skip_connections,\n n_filters=args.n_filters,\n dropout=args.dropout,\n res_block_type=args.residual_type,\n free_bits=args.free_bits,\n learn_top_prior=args.learn_top_prior,\n img_shape=self.dataloaders.img_size,\n likelihood_form=args.likelihood,\n gated=args.gated,\n no_initial_downscaling=args.no_initial_downscaling,\n analytical_kl=args.analytical_kl,\n ).to(self.device)\n\n # Weight initialization\n if args.simple_data_dependent_init:\n\n # Get batch\n t = [self.dataloaders.train.dataset[i] for i in range(args.batch_size)]\n t = torch.stack(tuple(t[i][0] for i in range(len(t))))\n\n # Use batch for data dependent init\n data_dependent_init(model, {'x': t.to(self.device)})\n\n return model\n\n def make_optimizer(self):\n args = self.args\n optimizer = optim.Adamax(\n self.model.parameters(),\n lr=args.lr,\n weight_decay=args.weight_decay\n )\n return optimizer\n\n\n def _parse_args(self):\n \"\"\"\n Parse command-line arguments defining experiment settings.\n\n :return: args: argparse.Namespace with experiment settings\n \"\"\"\n\n def list_options(lst):\n if lst:\n return \"'\" + \"' | '\".join(lst) + \"'\"\n return \"\"\n\n\n legal_merge_layers = ['linear', 'residual']\n legal_nonlin = ['relu', 'leakyrelu', 'elu', 'selu']\n legal_resblock = ['cabdcabd', 'bacdbac', 'bacdbacd']\n legal_datasets = ['static_mnist', 'cifar10', 'celeba',\n 'svhn', 'multi_dsprites_binary_rgb',\n 'multi_mnist_binary']\n legal_likelihoods = ['bernoulli', 'gaussian',\n 'discr_log', 'discr_log_mix']\n\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n allow_abbrev=False)\n\n self.add_required_args(parser,\n\n # General\n batch_size=64,\n test_batch_size=1000,\n lr=3e-4,\n log_interval=10000,\n test_log_interval=10000,\n checkpoint_interval=100000,\n resume=\"\",\n\n # VI-specific\n ll_every=50000,\n loglik_samples=100,)\n\n parser.add_argument('-d', '--dataset',\n type=str,\n choices=legal_datasets,\n default='static_mnist',\n metavar='NAME',\n dest='dataset_name',\n help=\"dataset: \" + list_options(legal_datasets))\n\n parser.add_argument('--likelihood',\n type=str,\n choices=legal_likelihoods,\n metavar='NAME',\n dest='likelihood',\n help=\"likelihood: {}; default depends on dataset\".format(\n list_options(legal_likelihoods)))\n\n parser.add_argument('--zdims',\n nargs='+',\n type=int,\n default=[32, 32, 32],\n metavar='DIM',\n dest='z_dims',\n help='list of dimensions (number of channels) for '\n 'each stochastic layer')\n\n parser.add_argument('--blocks-per-layer',\n type=int,\n default=2,\n metavar='N',\n help='residual blocks between stochastic layers')\n\n parser.add_argument('--nfilters',\n type=int,\n default=64,\n metavar='N',\n dest='n_filters',\n help='number of channels in all residual blocks')\n\n parser.add_argument('--no-bn',\n action='store_true',\n dest='no_batch_norm',\n help='do not use batch normalization')\n\n parser.add_argument('--skip',\n action='store_true',\n dest='skip_connections',\n help='skip connections in generative model')\n\n parser.add_argument('--gated',\n action='store_true',\n dest='gated',\n help='use gated layers in residual blocks')\n\n parser.add_argument('--downsample',\n nargs='+',\n type=int,\n default=[1, 1, 1],\n metavar='N',\n help='list of integers, each int is the number of downsampling'\n ' steps (by a factor of 2) before each stochastic layer')\n\n parser.add_argument('--learn-top-prior',\n action='store_true',\n help=\"learn the top-layer prior\")\n\n parser.add_argument('--residual-type',\n type=str,\n choices=legal_resblock,\n default='bacdbacd',\n metavar='TYPE',\n help=\"type of residual blocks: \" +\n list_options(legal_resblock))\n\n parser.add_argument('--merge-layers',\n type=str,\n choices=legal_merge_layers,\n default='residual',\n metavar='TYPE',\n help=\"type of merge layers: \" +\n list_options(legal_merge_layers))\n\n parser.add_argument('--beta-anneal',\n type=int,\n default=0,\n metavar='B',\n help='steps for annealing beta from 0 to 1')\n\n parser.add_argument('--data-dep-init',\n action='store_true',\n dest='simple_data_dependent_init',\n help='use simple data-dependent initialization to '\n 'normalize outputs of affine layers')\n\n parser.add_argument('--wd',\n type=float,\n default=0.0,\n dest='weight_decay',\n help='weight decay')\n\n parser.add_argument('--nonlin',\n type=str,\n choices=legal_nonlin,\n default='elu',\n metavar='F',\n help=\"nonlinear activation: \" +\n list_options(legal_nonlin))\n\n parser.add_argument('--dropout',\n type=float,\n default=0.2,\n metavar='D',\n help='dropout probability (in deterministic layers)')\n\n parser.add_argument('--freebits',\n type=float,\n default=0.0,\n metavar='N',\n dest='free_bits',\n help='free bits (nats)')\n\n parser.add_argument('--analytical-kl',\n action='store_true',\n dest='analytical_kl',\n help='use analytical KL')\n\n parser.add_argument('--no-initial-downscaling',\n action='store_true',\n dest='no_initial_downscaling',\n help='do not downscale as first inference step (and'\n 'upscale as last generation step)')\n\n args = parser.parse_args()\n\n if len(args.z_dims) != len(args.downsample):\n msg = (\n \"length of list of latent dimensions ({}) does not match \"\n \"length of list of downsampling factors ({})\").format(\n len(args.z_dims), len(args.downsample))\n raise RuntimeError(msg)\n\n assert args.weight_decay >= 0.0\n assert 0.0 <= args.dropout <= 1.0\n if args.dropout < 1e-5:\n args.dropout = None\n assert args.free_bits >= 0.0\n assert args.loglik_interval % args.test_log_interval == 0\n args.batch_norm = not args.no_batch_norm\n\n likelihood_map = {\n 'static_mnist': 'bernoulli',\n 'multi_dsprites_binary_rgb': 'bernoulli',\n 'multi_mnist_binary': 'bernoulli',\n 'cifar10': 'discr_log_mix',\n 'celeba': 'discr_log_mix',\n 'svhn': 'discr_log_mix',\n }\n if args.likelihood is None: # default\n args.likelihood = likelihood_map[args.dataset_name]\n\n return args\n\n\n @staticmethod\n def _make_run_description(args):\n \"\"\"\n Create a string description of the run. It is used in the names of the\n logging folders.\n\n :param args: experiment config\n :return: the run description\n \"\"\"\n s = ''\n s += args.dataset_name\n s += ',{}ly'.format(len(args.z_dims))\n # s += ',z=' + str(args.z_dims).replace(\" \", \"\")\n # s += ',dwn=' + str(args.downsample).replace(\" \", \"\")\n s += ',{}bpl'.format(args.blocks_per_layer)\n s += ',{}ch'.format(args.n_filters)\n if args.skip_connections:\n s += ',skip'\n if args.gated:\n s += ',gate'\n s += ',block=' + args.residual_type\n if args.beta_anneal != 0:\n s += ',b{}'.format(args.beta_anneal)\n s += ',{}'.format(args.nonlin)\n if args.free_bits > 0:\n s += ',freeb={}'.format(args.free_bits)\n if args.dropout is not None:\n s += ',drop={}'.format(args.dropout)\n if args.learn_top_prior:\n s += ',learnp'\n if args.weight_decay > 0.0:\n s += ',wd={}'.format(args.weight_decay)\n s += ',seed{}'.format(args.seed)\n if len(args.additional_descr) > 0:\n s += ',' + args.additional_descr\n return s\n\n\n\n def forward_pass(self, model, x, y=None):\n \"\"\"\n Simple single-pass model evaluation. It consists of a forward pass\n and computation of all necessary losses and metrics.\n \"\"\"\n\n # Forward pass\n x = x.to(self.device, non_blocking=True)\n model_out = model(x)\n recons_sep = -model_out['ll']\n kl_sep = model_out['kl_sep']\n kl = model_out['kl']\n kl_loss = model_out['kl_loss']\n\n # ELBO\n elbo_sep = - (recons_sep + kl_sep)\n elbo = elbo_sep.mean()\n\n # Loss with beta\n beta = 1.\n if self.args.beta_anneal != 0:\n beta = linear_anneal(model.global_step, 0.0, 1.0, self.args.beta_anneal)\n recons = recons_sep.mean()\n loss = recons + kl_loss * beta\n\n # L2\n l2 = 0.0\n for p in model.parameters():\n l2 = l2 + torch.sum(p ** 2)\n l2 = l2.sqrt()\n\n output = {\n 'loss': loss,\n 'elbo': elbo,\n 'elbo_sep': elbo_sep,\n 'kl': kl,\n 'l2': l2,\n 'recons': recons,\n 'out_mean': model_out['out_mean'],\n 'out_mode': model_out['out_mode'],\n 'out_sample': model_out['out_sample'],\n 'likelihood_params': model_out['likelihood_params'],\n }\n if 'kl_avg_layerwise' in model_out:\n output['kl_avg_layerwise'] = model_out['kl_avg_layerwise']\n\n return output\n\n\n @staticmethod\n def print_train_log(step, epoch, summaries):\n s = \" [step {}] loss: {:.5g} ELBO: {:.5g} recons: {:.3g} KL: {:.3g}\"\n s = s.format(\n step,\n summaries['loss/loss'],\n summaries['elbo/elbo'],\n summaries['elbo/recons'],\n summaries['elbo/kl'])\n print(s)\n\n\n @staticmethod\n def print_test_log(summaries, step=None, epoch=None):\n log_string = \" \"\n if epoch is not None:\n log_string += \"[step {}, epoch {}] \".format(step, epoch)\n log_string += \"ELBO {:.5g} recons: {:.3g} KL: {:.3g}\".format(\n summaries['elbo/elbo'], summaries['elbo/recons'], summaries['elbo/kl'])\n ll_key = None\n for k in summaries.keys():\n if k.find('elbo_IW') > -1:\n ll_key = k\n iw_samples = k.split('_')[-1]\n break\n if ll_key is not None:\n log_string += \" marginal log-likelihood ({}) {:.5g}\".format(\n iw_samples, summaries[ll_key])\n\n print(log_string)\n\n\n @staticmethod\n def get_metrics_dict(results):\n metrics_dict = {\n 'loss/loss': results['loss'].item(),\n 'elbo/elbo': results['elbo'].item(),\n 'elbo/recons': results['recons'].item(),\n 'elbo/kl': results['kl'].item(),\n 'l2/l2': results['l2'].item(),\n }\n try:\n for i in range(len(results['kl_avg_layerwise'])):\n key = 'kl_layers/kl_layer_{}'.format(i)\n metrics_dict[key] = results['kl_avg_layerwise'][i].item()\n except (AttributeError, KeyError):\n pass\n return metrics_dict\n\n\n\n def additional_testing(self, img_folder):\n \"\"\"\n Perform additional testing, including possibly generating images.\n\n In this case, save samples from the generative model, and pairs\n input/reconstruction from the test set.\n\n :param img_folder: folder to store images\n \"\"\"\n\n step = self.model.global_step\n\n if not self.args.dry_run:\n\n # Saved images will have n**2 sub-images\n n = 8\n\n # Save model samples\n sample = self.model.sample_prior(n ** 2)\n pad_value = img_grid_pad_value(sample)\n fname = os.path.join(img_folder, 'sample_' + str(step) + '.png')\n save_image(sample, fname, nrow=n, pad_value=pad_value)\n\n # Get first test batch\n (x, _) = next(iter(self.dataloaders.test))\n fname = os.path.join(img_folder, 'reconstruction_' + str(step) + '.png')\n\n # Save model original/reconstructions\n self.save_input_and_recons(x, fname, n)\n\n\n def save_input_and_recons(self, x, fname, n):\n n_img = n ** 2 // 2\n if x.shape[0] < n_img:\n msg = (\"{} data points required, but given batch has size {}. \"\n \"Please use a larger batch.\".format(n_img, x.shape[0]))\n raise RuntimeError(msg)\n outputs = self.forward_pass(self.model, x)\n x = x.to(self.device)\n imgs = torch.stack([\n x[:n_img],\n outputs['out_sample'][:n_img]])\n imgs = imgs.permute(1, 0, 2, 3, 4)\n imgs = imgs.reshape(n ** 2, x.size(1), x.size(2), x.size(3))\n pad_value = img_grid_pad_value(imgs)\n save_image(imgs.cpu(), fname, nrow=n, pad_value=pad_value)\n","sub_path":"experiment/experiment_manager.py","file_name":"experiment_manager.py","file_ext":"py","file_size_in_byte":17036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"190111562","text":"# Import modules\nfrom psychopy import visual, monitors\nfrom psychopy import core, event, gui\nimport numpy as np\nimport os, sys\nimport tobii_research\n\n# Insert the parent directory (where Titta is) to path\ncurdir = os.path.dirname(os.path.abspath(__file__))\nos.chdir(curdir)\nsys.path.insert(0,os.path.dirname(curdir)) \nfrom Titta import Titta, helpers_tobii as helpers\n\n\n# Parameters\net_name = 'Spectrum'\ndummy_mode = False\n \n# Change any of the default dettings?\nsettings = Titta.get_defaults(et_name)\nsettings.FILENAME = 'testfile.tsv'\n\n# Connect to eye tracker\ntracker = Titta.Connect(settings)\nif dummy_mode:\n tracker.set_dummy_mode()\ntracker.init()\n\n# Window set-up (this color will be used for calibration)\nwin = visual.Window(monitor = settings.mon, fullscr = settings.FULLSCREEN,\n screen=1, size=settings.SCREEN_RES, units = 'deg')\ntext = visual.TextStim(win, text='') \n\n \n\n\n\n\n# Calibrate \ntracker.calibrate(win)\n\n# Start recording\ntracker.start_recording(gaze_data=True, store_data=True)\n\n# Present something\ntext.text = 'Recording. Press space to stop'\ntext.draw()\nwin.flip()\ntracker.send_message('recording started')\n \nevent.waitKeys(['space'])\ntracker.send_message('recording stopped')\ntracker.stop_recording(gaze_data=True)\n\n# Close window and save data\nwin.close()\ntracker.save_data() \ncore.quit()\n","sub_path":"examples/read_me.py","file_name":"read_me.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"25918457","text":"import json\n\ndef view_contacts():\n print('Your contacts:')\n i = 0\n for contact in book:\n i += 1\n print(i,\" \",contact['name'])\n\n num = int(input('Enter contact number or 0 to exit to the main menu: '))\n if num == 0: return\n #for cont in book[num-1]:\n # print(cont, \": \", book[num-1][cont])\n\n #Bob Jones\n print(book[num-1]['name'])\n\n #Primary phone: +4915123123151\n print(\"Primary phone: \", book[num-1]['primary_phone'])\n\n #Phone 2: +1142312312312\n #Phone 3: +1312312312312\n i = 2\n for ph in book[num-1]['additional_phones']:\n print(\"Phone \", i , \": \", ph )\n i+=1\n\n #E-mail: b.jones@hotmail.com\n #Telegram: @bob123\n for data in book[num-1]:\n if data in ['e-mail', 'vk', 'instagram', 'linkedin', 'telegram']:\n print(data, \": \", book[num-1][data])\n\n print('Choose an option:')\n\n\n print('1 - Delete contact')\n print('2 - Go back to the contacts list')\n ch2 = int(input('Your choice: '))\n\n if ch2 ==2 : \n view_contacts()\n\n if ch2 ==1 : \n ch3 = input('Are you sure you want to delete Waine Smith from your contact list (y/n)? ')\n if ch3 == 'y':\n book.pop(num-1)\n print('Waine Smith successfully deleted from the contact book.')\n input('Press Enter to return to the contact list')\n view_contacts()\n\ndef add_new_contact():\n contact = {}\n contact['name'] = input('Enter name: ')\n contact['primary_phone'] = input('Enter primary phone: ')\n contact['additional_phones'] = []\n\n while True:\n aph = input('Enter additional phone or empty: ')\n if aph == '': break\n contact['additional_phones'].append(aph)\n \n while True:\n evit = input('Select one of the following additional fields (e-mail, vk, instagram, linkedin, telegram) or empty: ')\n if evit == '': break\n if evit not in ['e-mail', 'vk', 'instagram', 'linkedin', 'telegram']:\n print('Field name not recognized')\n\n if evit == 'e-mail': contact['e-mail']=input('Enter e-mail: ')\n if evit == 'vk': contact['vk']=input('Enter vk: ')\n if evit == 'instagram': contact['instagram']=input('Enter instagram: ')\n if evit == 'linkedin': contact['linkedin']=input('Enter linkedin: ')\n if evit == 'telegram': contact['telegram']=input('Enter telegram: ')\n\n print('Contact %s successfully created!' % contact['name'])\n book.append(contact)\n \ndef write():\n f=open('book.txt', 'w')\n f.write(json.dumps(book))\n f.close()\n\nbook = []\nf=open('book.txt', 'r')\nbook=json.loads(f.read())\nf.close()\n\nwhile(True):\n print('Choose an option:')\n print('1 - view contacts')\n print('2 - add new contact')\n print('3 - exit the application')\n ch1 = int(input('Your choice: ')) \n\n if ch1 == 1 : view_contacts()\n if ch1 == 2 : add_new_contact()\n if ch1 == 3 :\n write()\n break\n","sub_path":"2/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"483567974","text":"\nfrom rlpyt.utils.launching.affinity import encode_affinity\nfrom rlpyt.utils.launching.exp_launcher import run_experiments\nfrom rlpyt.utils.launching.variant import make_variants, VariantLevel\n\naffinity_code = encode_affinity(\n n_cpu_core=16,\n n_gpu=8,\n contexts_per_gpu=2,\n hyperthread_offset=24,\n n_socket=2,\n)\nruns_per_setting = 3\nvariant_levels = list()\n\nenv_ids = [\"Hopper-v3\", \"HalfCheetah-v3\",\n \"Walker2d-v3\", \"Ant-v3\", \"Humanoid-v3\"]\nvalues = list(zip(env_ids))\ndir_names = [\"env_{}\".format(*v) for v in values]\nkeys = [(\"env\", \"id\")]\nvariant_levels.append(VariantLevel(keys, values, dir_names))\n\nvariants, log_dirs = make_variants(*variant_levels)\n\ndefault_config_key = \"ddpg_from_td3_1M_serial\"\nscript = \"rlpyt/experiments/scripts/mujoco/qpg/train/mujoco_ddpg_serial.py\"\nexperiment_title = \"ddpg_mujoco\"\n\nrun_experiments(\n script=script,\n affinity_code=affinity_code,\n experiment_title=experiment_title,\n runs_per_setting=runs_per_setting,\n variants=variants,\n log_dirs=log_dirs,\n common_args=(default_config_key,),\n)\n\ndefault_config_key = \"td3_1M_serial\"\nscript = \"rlpyt/experiments/scripts/mujoco/qpg/train/mujoco_td3_serial.py\"\nexperiment_title = \"td3_mujoco\"\n\nrun_experiments(\n script=script,\n affinity_code=affinity_code,\n experiment_title=experiment_title,\n runs_per_setting=runs_per_setting,\n variants=variants,\n log_dirs=log_dirs,\n common_args=(default_config_key,),\n)\n\ndefault_config_key = \"sac_1M_serial\"\nscript = \"rlpyt/experiments/scripts/mujoco/qpg/train/mujoco_sac_serial.py\"\nexperiment_title = \"sac_mujoco\"\n\nrun_experiments(\n script=script,\n affinity_code=affinity_code,\n experiment_title=experiment_title,\n runs_per_setting=runs_per_setting,\n variants=variants,\n log_dirs=log_dirs,\n common_args=(default_config_key,),\n)\n","sub_path":"dependencies/rlpyt/rlpyt/experiments/scripts/mujoco/qpg/launch/pabti/launch_mujoco_ddpg_td3_sac_serial.py","file_name":"launch_mujoco_ddpg_td3_sac_serial.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"260784212","text":"import numpy as np\r\nimport math\r\n\r\nprint('Use basic quadratic equation syntax: a*x^2 + b*x + c')\r\na = int(input('Input a: '))\r\nb = int(input('Input b: '))\r\nc = int(input('Input c: '))\r\n\r\n\r\ndef quadratic_formula(a, b, c):\r\n\r\n x_positive = (-b + np.sqrt(b**2-4*a*c))/(2*a)\r\n x_negative = (-b - np.sqrt(b**2-4*a*c))/(2*a)\r\n\r\n print('%s %dx^2 + %dx + %d' % ('Equation:', a, b, c))\r\n\r\n if math.isnan(x_positive) or math.isnan(x_negative):\r\n print('This quadratic equation has no x-intercept(s)')\r\n\r\n else:\r\n print('x-intercept low:', x_negative)\r\n print('x-intercept high:', x_positive)\r\n\r\n\r\nquadratic_formula(a, b, c)\r\n\r\n","sub_path":"Quadratic_Formula.py","file_name":"Quadratic_Formula.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"244696102","text":"import torch\r\nfrom torch.utils.data import Dataset, DataLoader\r\nimport torch.nn as nn\r\nfrom torchvision import transforms\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\nimport pims\r\nimport pathlib\r\nimport torch.optim as optim\r\nfrom torch.autograd import Variable\r\nimport skimage as skm\r\nimport glob\r\n\r\n\r\n### Defining the Unet\r\n\r\ndef double_conv(in_c, out_c):\r\n\r\n\tconv = nn.Sequential(\r\n\t\tnn.Conv2d(in_c, out_c, kernel_size = 3, padding = 1),\r\n\t\tnn.ReLU(inplace = True),\r\n\t\tnn.Conv2d(out_c, out_c, kernel_size = 3, padding = 1),\r\n\t\tnn.ReLU(inplace = True)\r\n\t)\r\n\r\n\treturn conv\r\n\r\ndef crop_img(tensor, target_tensor):\r\n\r\n\ttarget_size = target_tensor.size()[2]\r\n\ttensor_size = tensor.size()[2]\r\n\r\n\tdelta = tensor_size - target_size\r\n\tdelta = delta // 2\r\n\r\n\treturn tensor[:, :, delta:tensor_size - delta, delta:tensor_size - delta]\r\n\r\nclass UNet(nn.Module):\r\n\r\n\tdef __init__(self):\r\n\r\n\t\tsuper(UNet, self).__init__()\r\n\r\n\t\tself.max_pool_2x2 = nn.MaxPool2d(kernel_size = 2, stride = 2)\r\n\r\n\t\tself.down_conv_1 = double_conv(1, 64) # Only 1 channel at the moment\r\n\t\tself.down_conv_2 = double_conv(64, 128)\r\n\t\tself.down_conv_3 = double_conv(128, 256)\r\n\t\r\n\t\tself.up_trans_3 = nn.ConvTranspose2d(\r\n\t\t\tin_channels = 256,\r\n\t\t\tout_channels = 128,\r\n\t\t\tkernel_size = 2,\r\n\t\t\tstride = 2\r\n\t\t)\r\n\r\n\t\tself.up_conv_3 = double_conv(256, 128)\r\n\r\n\t\tself.up_trans_4 = nn.ConvTranspose2d(\r\n\t\t\tin_channels = 128,\r\n\t\t\tout_channels = 64,\r\n\t\t\tkernel_size = 2,\r\n\t\t\tstride = 2\r\n\t\t)\r\n\r\n\t\tself.up_conv_4 = double_conv(128, 64)\r\n\r\n\t\tself.out = nn.Conv2d(\r\n\t\t\tin_channels = 64,\r\n\t\t\tout_channels = 1,\r\n\t\t\tkernel_size = 1\r\n\t\t)\r\n\r\n\tdef forward(self, image):\r\n\r\n\t\t# bs, c, h, w\r\n\t\t# Encoder\r\n\t\tx1 = self.down_conv_1(image)#\r\n\t\tx3 = self.max_pool_2x2(x1)\r\n\r\n\t\tx3 = self.down_conv_2(x3)#\r\n\t\tx5 = self.max_pool_2x2(x3)\r\n\r\n\t\tx5 = self.down_conv_3(x5)#\r\n\r\n\t\t# Decoder\r\n\r\n\t\tx = self.up_trans_3(x5)\r\n\t\ty = crop_img(x3, x)\r\n\t\tx = self.up_conv_3(torch.cat([x, y], 1))\r\n\r\n\t\tx = self.up_trans_4(x)\r\n\t\ty = crop_img(x1, x)\r\n\t\tx = self.up_conv_4(torch.cat([x, y], 1))\r\n\t\r\n\t\tx1, x3, x5, y = None, None, None, None,\r\n\r\n\t\tx = self.out(x)\r\n\t\treturn x\r\n\r\n\r\n","sub_path":"dev/unet.py","file_name":"unet.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"347734502","text":"from dataclasses import dataclass\n\n\nshow_type_label = {\n 'movie': 'Pel·lícula',\n 'show': 'SERIE'\n}\n\n\n@dataclass(init=False)\nclass HookData:\n title: str\n summary: str\n poster_url: str\n plex_url: str\n imdb_id: str\n directors: str\n release_year: str\n genres: str\n show_type: str\n themoviedb_id: str\n duration: str\n season_count: str\n\n def __init__(self, datamap):\n self.title = datamap['title']\n self.summary = datamap['summary']\n self.poster_url = datamap['poster_url']\n self.plex_url = datamap['plex_url']\n self.imdb_id = datamap['imdb_id']\n self.directors = datamap['directors']\n self.release_year = datamap['release_year']\n self.genres = datamap['genres']\n self.show_type = datamap['type']\n self.themoviedb_id = datamap['themoviedb_id']\n self.duration = datamap['duration']\n self.season_count = datamap['season_count']\n","sub_path":"uepflix-notifier/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"191983028","text":"#!/usr/bin/env python\n'''\n统计一个git仓库中所有人员贡献情况\n\nUsage: python get_git_info.py git_repo_dir\n'''\n\nimport os\nimport sys\nimport subprocess\nfrom datetime import datetime\n\n\ndef statistic_info(info):\n result = {}\n for line in info:\n fields = line.rsplit(maxsplit=2)\n author = fields[0] + ' ' + fields[1]\n if author not in result:\n result[author] = {'begin': fields[2], 'end': fields[2], 'count': 1}\n continue\n if result[author]['begin'] > fields[2]:\n result[author]['begin'] = fields[2]\n if result[author]['end'] < fields[2]:\n result[author]['end'] = fields[2]\n result[author]['count'] += 1\n\n for author in result:\n begin = result[author]['begin']\n end = result[author]['end']\n delta = datetime.strptime(end, '%Y-%m-%d') - datetime.strptime(begin, '%Y-%m-%d')\n result[author]['days'] = delta.days\n\n return result\n\n\ndef get_git_info(git_repo_dir):\n os.chdir(git_repo_dir)\n p = subprocess.Popen('git log --date=short --format=\"%aN %ae %cd\"', shell=True, stdout=subprocess.PIPE)\n info = p.communicate()[0]\n info = info.decode().splitlines()\n return info\n\n\nif __name__ == '__main__':\n git_repo_dir = sys.argv[1]\n assert git_repo_dir\n info = get_git_info(git_repo_dir)\n result = statistic_info(info)\n\n for author, stat in sorted(result.items(), key=lambda v: v[1]['begin']):\n\n print('{author:50} commits:{count:<10} begin:{begin:15} end:{end:15} days:{days}'.format(\n author=author,\n count=stat['count'],\n begin=stat['begin'],\n end=stat['end'],\n days=stat['days']))\n","sub_path":"get_git_info.py","file_name":"get_git_info.py","file_ext":"py","file_size_in_byte":1700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"372665991","text":"import json\n\nfrom flask import Flask, render_template, Response, jsonify\n\nimport common\nimport settings\n\napp = Flask(__name__)\napp.config.update({\n 'APPLICATION_ROOT': '/website/',\n})\n\n\n@app.route('/')\ndef main_page():\n return render_template('index.html', params=common.params, gaode_key=settings.GAODE_KEY)\n\n\n@app.route('/job')\ndef job():\n \"\"\"\n 使用server sent event获取未发送过的新的job info\n \"\"\"\n jobs = []\n while not common.job_queue.empty():\n job = common.job_queue.get().json()\n common.job_have_sent.append(job)\n jobs.append('data: {}\\n\\n'.format(json.dumps(job)))\n\n # 爬虫都已经关闭了,不会有再多的数据了\n if len(jobs) == 0 and len(common.job_spider_threadings) == 0:\n if common.job_to_much:\n return Response('event: finished\\ndata: to much\\n\\n', mimetype='text/event-stream')\n else:\n return Response('event: finished\\ndata: finished\\n\\n', mimetype='text/event-stream')\n\n return Response(jobs, mimetype='text/event-stream')\n\n\n@app.route('/jobs')\ndef jobs():\n \"\"\"\n 获取已经发送过给前端的job infos,刷新页面时用\n \"\"\"\n return jsonify(common.job_have_sent)\n\n\n@app.route('/house')\ndef house():\n \"\"\"\n 使用server sent event获取未发送过的新的house info\n \"\"\"\n houses = []\n while not common.house_queue.empty():\n house = common.house_queue.get().json()\n common.house_have_sent.append(house)\n houses.append('data: {}\\n\\n'.format(json.dumps(house)))\n\n # 爬虫都已经关闭了,不会有再多的数据了\n if len(houses) == 0 and len(common.house_spider_threadings) == 0:\n if common.house_to_much:\n return Response('event: finished\\ndata: to much\\n\\n', mimetype='text/event-stream')\n else:\n return Response('event: finished\\ndata: finished\\n\\n', mimetype='text/event-stream')\n\n return Response(houses, mimetype='text/event-stream')\n\n\n@app.route('/houses')\ndef houses():\n \"\"\"\n 获取已经发送过给前端的house infos,刷新页面时用\n \"\"\"\n return jsonify(common.house_have_sent)\n\n\n@app.route('/location')\ndef location():\n return {\n 'city': common.params['城市'],\n 'district': common.params.get('区域')\n }\n","sub_path":"website/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"564845023","text":"from django.conf.urls import url\n\nfrom warehouse import views\n\nurlpatterns = [\n url(r'^warehouse$',\n views.WarehouseListView.as_view(),\n name='warehouse_index'),\n\n url(r'^warehouse/create$',\n views.WarehouseCreateView.as_view(success_url='/warehouse'),\n name='warehouse_create'),\n\n url(r'^warehouse/(?P\\d+)$',\n views.WarehouseDetailView.as_view(),\n name='warehouse_detail'),\n\n url(r'warehouse/(?P\\d+)/add_boardgame$',\n views.BoardGameContainerCreateView.as_view(),\n name='container_create')\n]\n","sub_path":"warehouse/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"300594176","text":"import argparse\nimport abc\nfrom collections import defaultdict\nimport math\nimport sys\nimport pdb\n\nfrom pyspark.ml.feature import \\\n Tokenizer, \\\n RegexTokenizer, \\\n StopWordsRemover, \\\n HashingTF, \\\n IDF\nfrom pyspark.ml.clustering import KMeans, BisectingKMeans, GaussianMixture\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import col, udf\nfrom pyspark.sql.types import IntegerType\nfrom pyspark.ml import Pipeline\n\n\ndef preprocess(spark_session, data_file):\n raw_data = spark_session.read.format('json').load(data_file)\n\n regexTokenizer = RegexTokenizer(inputCol='text',\n outputCol='words',\n pattern='\\\\w+',\n gaps=False,\n toLowercase=True)\n\n stopWordsRemover = StopWordsRemover(inputCol='words',\n outputCol='filtered_words')\n\n hashingTF = HashingTF(inputCol='filtered_words',\n outputCol='tf_features',\n numFeatures=20)\n\n idf = IDF(inputCol='tf_features', outputCol='features')\n\n pipeline = Pipeline(stages=[regexTokenizer, stopWordsRemover, hashingTF, idf])\n pipeline_model = pipeline.fit(raw_data)\n data = pipeline_model.transform(raw_data)\n\n return data\n\n\ndef train(alg, data, k, seed=0):\n alg = alg.setK(k).setSeed(1)\n model = alg.fit(data)\n return model\n\n\ndef evaluate(data, model, alg, k):\n if alg != 'gmm':\n print_centers(model)\n wssse = model.computeCost(data)\n print('Within Set Sum of Squared Errors = {}'.format(wssse))\n\n predictions = model.transform(data)\n pred_and_label = [(row.prediction, row.category)\n for row in predictions.collect()]\n confusion = defaultdict(int)\n categories = list(set(list(zip(*pred_and_label))[1]))\n for r in pred_and_label:\n confusion[r] += 1\n\n print('Confusion Matrix:')\n for i in range(k):\n s = str(i)\n for c in categories:\n s += '\\t{}: {}'.format(c, confusion[(i, c)])\n s += '\\n'\n print(s)\n\n\ndef print_centers(model):\n print('Cluster centers:\\n')\n for c in model.clusterCenters():\n print(c)\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Clustering with pyspark.')\n\n parser.add_argument('--data-file', type=str,\n default='../data/enwiki.json')\n parser.add_argument('--num-clusters', type=int, default=4)\n parser.add_argument('--seed', type=int, default=23)\n parser.add_argument('--algorithm', default='kmeans',\n choices=['kmeans', 'hier', 'gmm'])\n\n args = parser.parse_args()\n\n spark_session = SparkSession.builder.appName('clustering').getOrCreate()\n\n data = preprocess(spark_session, args.data_file)\n\n if args.algorithm == 'kmeans':\n alg = KMeans()\n elif args.algorithm == 'hier':\n alg = BisectingKMeans()\n elif args.algorithm == 'gmm':\n alg = GaussianMixture()\n\n model = train(alg, data, args.num_clusters, seed=args.seed)\n evaluate(data, model, args.algorithm, args.num_clusters)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"HW3/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":3065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"241508075","text":"\n\n\n\ndef triangle(N):\n \"\"\" Compute triangle number \"\"\"\n i = 0\n\n for j in range(1,N+1):\n i += j\n \n return i\n\n#first 10 triangle terms\ndef rangetr(rn_tr):\n \"\"\" Compute range of triangular number \"\"\"\n l1 = [triangle(num) for num in range(2,rn_tr+1)]\n \n return l1\n\ndef listdivisor(tr_num):\n \"\"\" List divisor of each triangular number \"\"\"\n\n divisors = [div for div in range(1, tr_num+1) if tr_num % div == 0 ]\n\n return divisors\n\ndef main():\n \"\"\" What is the value of the first triangle number to have over five hundred divisors \"\"\"\n\n i = 1\n while True:\n if len(listdivisor(triangle(i))) > 10:\n print(\"Triangular number {} of {} number has over than ten divisors\".format(triangle(i), i))\n break\n i +=1\n\n \n\n\n\nif __name__ == '__main__':\n print(triangle(7))\n print(rangetr(10))\n print(listdivisor(28))\n main()\n\n","sub_path":"problem12.py","file_name":"problem12.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"496889427","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\no = cv2.imread('img22.jpg')\nimg = o\n\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\nishow = img.copy()\n\nret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\nkernel = np.ones((3,3), np.uint8)\nopening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations = 2)\nbg = cv2.dilate(opening, kernel, iterations = 3)\ndist = cv2.distanceTransform(opening, cv2.DIST_L2, 5)\nret, fore = cv2.threshold(dist, 0.7*dist.max(), 255, 0)\nfore = np.uint8(fore)\nun = cv2.subtract(bg, fore)\n\nplt.subplot(221)\nplt.imshow(ishow)\nplt.axis('off')\nplt.subplot(221)\nplt.imshow(bg)\nplt.axis('off')\nplt.subplot(223)\nplt.imshow(fore)\nplt.axis('off')\nplt.subplot(224)\nplt.imshow(un)\nplt.axis('off')\nplt.show()","sub_path":"ex17-3.py","file_name":"ex17-3.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"} +{"seq_id":"57585599","text":"\"\"\"Number Puzzle.\"\"\"\nfrom ortools.sat.python import cp_model\n\n\nclass VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback):\n \"\"\"Print solutions.\"\"\"\n\n def __init__(self, variables):\n cp_model.CpSolverSolutionCallback.__init__(self)\n self.__variables = variables\n self.__solution_count = 0\n\n def on_solution_callback(self):\n self.__solution_count += 1\n vars = list(self.__variables)\n for i, var in enumerate(vars):\n if (i + 1) % 3 == 0:\n print(f\"{self.Value(var)}\")\n else:\n print(f\"{self.Value(var)}\", end=' ')\n print()\n\n def solution_count(self):\n return self.__solution_count\n\n\nmodel = cp_model.CpModel()\n\n# Squares\n# 0 1 2\n# 3 4 5\n# 6 7 8\nsquares = [model.NewIntVar(0, 10, f\"n{str(i)}\") for i in range(9)]\n\nmodel.AddAllDifferent(squares)\n\nmodel.Add(squares[0] + squares[1] + squares[2] == 15)\nmodel.Add(squares[3] + squares[4] + squares[5] == 15)\nmodel.Add(squares[6] + squares[7] + squares[8] == 15)\nmodel.Add(squares[0] + squares[3] + squares[6] == 15)\nmodel.Add(squares[1] + squares[4] + squares[7] == 15)\nmodel.Add(squares[2] + squares[5] + squares[8] == 15)\nmodel.Add(squares[0] + squares[4] + squares[8] == 15)\nmodel.Add(squares[2] + squares[4] + squares[6] == 15)\n\nfor i in range(9):\n model.Add(squares[i] != 0)\n\nmodel.Add(sum(squares) == 45)\n\nsolver = cp_model.CpSolver()\nsolver.Solve(model)\n\nsolution_printer = VarArraySolutionPrinter(squares)\nstatus = solver.SearchForAllSolutions(model, solution_printer)\nprint(f\"Status = {solver.StatusName(status)}\")\nprint(f\"Number of solutions found: {solution_printer.solution_count()}\")\n","sub_path":"number_puzzle.py","file_name":"number_puzzle.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"92"}